1 /*
2  * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include <stdarg.h>
28 
29 #include "util/u_math.h"
30 #include "util/u_memory.h"
31 #include "util/u_string.h"
32 
33 #include "ir3_compiler.h"
34 #include "ir3_image.h"
35 #include "ir3_nir.h"
36 #include "ir3_shader.h"
37 
38 #include "instr-a3xx.h"
39 #include "ir3.h"
40 #include "ir3_context.h"
41 
42 void
ir3_handle_nonuniform(struct ir3_instruction * instr,nir_intrinsic_instr * intrin)43 ir3_handle_nonuniform(struct ir3_instruction *instr,
44                       nir_intrinsic_instr *intrin)
45 {
46    if (nir_intrinsic_has_access(intrin) &&
47        (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM)) {
48       instr->flags |= IR3_INSTR_NONUNIF;
49    }
50 }
51 
52 void
ir3_handle_bindless_cat6(struct ir3_instruction * instr,nir_src rsrc)53 ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc)
54 {
55    nir_intrinsic_instr *intrin = ir3_bindless_resource(rsrc);
56    if (!intrin)
57       return;
58 
59    instr->flags |= IR3_INSTR_B;
60    instr->cat6.base = nir_intrinsic_desc_set(intrin);
61 }
62 
63 static struct ir3_instruction *
create_input(struct ir3_context * ctx,unsigned compmask)64 create_input(struct ir3_context *ctx, unsigned compmask)
65 {
66    struct ir3_instruction *in;
67 
68    in = ir3_instr_create(ctx->in_block, OPC_META_INPUT, 1, 0);
69    in->input.sysval = ~0;
70    __ssa_dst(in)->wrmask = compmask;
71 
72    array_insert(ctx->ir, ctx->ir->inputs, in);
73 
74    return in;
75 }
76 
77 static struct ir3_instruction *
create_frag_input(struct ir3_context * ctx,struct ir3_instruction * coord,unsigned n)78 create_frag_input(struct ir3_context *ctx, struct ir3_instruction *coord,
79                   unsigned n)
80 {
81    struct ir3_block *block = ctx->block;
82    struct ir3_instruction *instr;
83    /* packed inloc is fixed up later: */
84    struct ir3_instruction *inloc = create_immed(block, n);
85 
86    if (coord) {
87       instr = ir3_BARY_F(block, inloc, 0, coord, 0);
88    } else if (ctx->compiler->flat_bypass) {
89       if (ctx->compiler->gen >= 6) {
90          instr = ir3_FLAT_B(block, inloc, 0, inloc, 0);
91       } else {
92          instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
93          instr->cat6.type = TYPE_U32;
94          instr->cat6.iim_val = 1;
95       }
96    } else {
97       instr = ir3_BARY_F(block, inloc, 0, ctx->ij[IJ_PERSP_PIXEL], 0);
98       instr->srcs[1]->wrmask = 0x3;
99    }
100 
101    return instr;
102 }
103 
104 static struct ir3_instruction *
create_driver_param(struct ir3_context * ctx,enum ir3_driver_param dp)105 create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
106 {
107    /* first four vec4 sysval's reserved for UBOs: */
108    /* NOTE: dp is in scalar, but there can be >4 dp components: */
109    struct ir3_const_state *const_state = ir3_const_state(ctx->so);
110    unsigned n = const_state->offsets.driver_param;
111    unsigned r = regid(n + dp / 4, dp % 4);
112    return create_uniform(ctx->block, r);
113 }
114 
115 /*
116  * Adreno's comparisons produce a 1 for true and 0 for false, in either 16 or
117  * 32-bit registers.  We use NIR's 1-bit integers to represent bools, and
118  * trust that we will only see and/or/xor on those 1-bit values, so we can
119  * safely store NIR i1s in a 32-bit reg while always containing either a 1 or
120  * 0.
121  */
122 
123 /*
124  * alu/sfu instructions:
125  */
126 
127 static struct ir3_instruction *
create_cov(struct ir3_context * ctx,struct ir3_instruction * src,unsigned src_bitsize,nir_op op)128 create_cov(struct ir3_context *ctx, struct ir3_instruction *src,
129            unsigned src_bitsize, nir_op op)
130 {
131    type_t src_type, dst_type;
132 
133    switch (op) {
134    case nir_op_f2f32:
135    case nir_op_f2f16_rtne:
136    case nir_op_f2f16_rtz:
137    case nir_op_f2f16:
138    case nir_op_f2i32:
139    case nir_op_f2i16:
140    case nir_op_f2i8:
141    case nir_op_f2u32:
142    case nir_op_f2u16:
143    case nir_op_f2u8:
144       switch (src_bitsize) {
145       case 32:
146          src_type = TYPE_F32;
147          break;
148       case 16:
149          src_type = TYPE_F16;
150          break;
151       default:
152          ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
153       }
154       break;
155 
156    case nir_op_i2f32:
157    case nir_op_i2f16:
158    case nir_op_i2i32:
159    case nir_op_i2i16:
160    case nir_op_i2i8:
161       switch (src_bitsize) {
162       case 32:
163          src_type = TYPE_S32;
164          break;
165       case 16:
166          src_type = TYPE_S16;
167          break;
168       case 8:
169          src_type = TYPE_S8;
170          break;
171       default:
172          ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
173       }
174       break;
175 
176    case nir_op_u2f32:
177    case nir_op_u2f16:
178    case nir_op_u2u32:
179    case nir_op_u2u16:
180    case nir_op_u2u8:
181       switch (src_bitsize) {
182       case 32:
183          src_type = TYPE_U32;
184          break;
185       case 16:
186          src_type = TYPE_U16;
187          break;
188       case 8:
189          src_type = TYPE_U8;
190          break;
191       default:
192          ir3_context_error(ctx, "invalid src bit size: %u", src_bitsize);
193       }
194       break;
195 
196    case nir_op_b2f16:
197    case nir_op_b2f32:
198    case nir_op_b2i8:
199    case nir_op_b2i16:
200    case nir_op_b2i32:
201       src_type = ctx->compiler->bool_type;
202       break;
203 
204    default:
205       ir3_context_error(ctx, "invalid conversion op: %u", op);
206    }
207 
208    switch (op) {
209    case nir_op_f2f32:
210    case nir_op_i2f32:
211    case nir_op_u2f32:
212    case nir_op_b2f32:
213       dst_type = TYPE_F32;
214       break;
215 
216    case nir_op_f2f16_rtne:
217    case nir_op_f2f16_rtz:
218    case nir_op_f2f16:
219    case nir_op_i2f16:
220    case nir_op_u2f16:
221    case nir_op_b2f16:
222       dst_type = TYPE_F16;
223       break;
224 
225    case nir_op_f2i32:
226    case nir_op_i2i32:
227    case nir_op_b2i32:
228       dst_type = TYPE_S32;
229       break;
230 
231    case nir_op_f2i16:
232    case nir_op_i2i16:
233    case nir_op_b2i16:
234       dst_type = TYPE_S16;
235       break;
236 
237    case nir_op_f2i8:
238    case nir_op_i2i8:
239    case nir_op_b2i8:
240       dst_type = TYPE_S8;
241       break;
242 
243    case nir_op_f2u32:
244    case nir_op_u2u32:
245       dst_type = TYPE_U32;
246       break;
247 
248    case nir_op_f2u16:
249    case nir_op_u2u16:
250       dst_type = TYPE_U16;
251       break;
252 
253    case nir_op_f2u8:
254    case nir_op_u2u8:
255       dst_type = TYPE_U8;
256       break;
257 
258    default:
259       ir3_context_error(ctx, "invalid conversion op: %u", op);
260    }
261 
262    if (src_type == dst_type)
263       return src;
264 
265    struct ir3_instruction *cov = ir3_COV(ctx->block, src, src_type, dst_type);
266 
267    if (op == nir_op_f2f16_rtne) {
268       cov->cat1.round = ROUND_EVEN;
269    } else if (op == nir_op_f2f16) {
270       unsigned execution_mode = ctx->s->info.float_controls_execution_mode;
271       nir_rounding_mode rounding_mode =
272          nir_get_rounding_mode_from_float_controls(execution_mode,
273                                                    nir_type_float16);
274       if (rounding_mode == nir_rounding_mode_rtne)
275          cov->cat1.round = ROUND_EVEN;
276    }
277 
278    return cov;
279 }
280 
281 /* For shift instructions NIR always has shift amount as 32 bit integer */
282 static struct ir3_instruction *
resize_shift_amount(struct ir3_context * ctx,struct ir3_instruction * src,unsigned bs)283 resize_shift_amount(struct ir3_context *ctx, struct ir3_instruction *src,
284                     unsigned bs)
285 {
286    if (bs != 16)
287       return src;
288 
289    return ir3_COV(ctx->block, src, TYPE_U32, TYPE_U16);
290 }
291 
292 static void
emit_alu_dot_4x8_as_dp4acc(struct ir3_context * ctx,nir_alu_instr * alu,struct ir3_instruction ** dst,struct ir3_instruction ** src)293 emit_alu_dot_4x8_as_dp4acc(struct ir3_context *ctx, nir_alu_instr *alu,
294                            struct ir3_instruction **dst,
295                            struct ir3_instruction **src)
296 {
297    struct ir3_instruction *accumulator = NULL;
298    if (alu->op == nir_op_udot_4x8_uadd_sat) {
299       accumulator = create_immed(ctx->block, 0);
300    } else {
301       accumulator = src[2];
302    }
303 
304    dst[0] = ir3_DP4ACC(ctx->block, src[0], 0, src[1], 0, accumulator, 0);
305 
306    if (alu->op == nir_op_udot_4x8_uadd ||
307        alu->op == nir_op_udot_4x8_uadd_sat) {
308       dst[0]->cat3.signedness = IR3_SRC_UNSIGNED;
309    } else {
310       dst[0]->cat3.signedness = IR3_SRC_MIXED;
311    }
312 
313    /* For some reason (sat) doesn't work in unsigned case so
314     * we have to emulate it.
315     */
316    if (alu->op == nir_op_udot_4x8_uadd_sat) {
317       dst[0] = ir3_ADD_U(ctx->block, dst[0], 0, src[2], 0);
318       dst[0]->flags |= IR3_INSTR_SAT;
319    } else if (alu->op == nir_op_sudot_4x8_iadd_sat) {
320       dst[0]->flags |= IR3_INSTR_SAT;
321    }
322 }
323 
324 static void
emit_alu_dot_4x8_as_dp2acc(struct ir3_context * ctx,nir_alu_instr * alu,struct ir3_instruction ** dst,struct ir3_instruction ** src)325 emit_alu_dot_4x8_as_dp2acc(struct ir3_context *ctx, nir_alu_instr *alu,
326                            struct ir3_instruction **dst,
327                            struct ir3_instruction **src)
328 {
329    int signedness;
330    if (alu->op == nir_op_udot_4x8_uadd ||
331        alu->op == nir_op_udot_4x8_uadd_sat) {
332       signedness = IR3_SRC_UNSIGNED;
333    } else {
334       signedness = IR3_SRC_MIXED;
335    }
336 
337    struct ir3_instruction *accumulator = NULL;
338    if (alu->op == nir_op_udot_4x8_uadd_sat ||
339        alu->op == nir_op_sudot_4x8_iadd_sat) {
340       accumulator = create_immed(ctx->block, 0);
341    } else {
342       accumulator = src[2];
343    }
344 
345    dst[0] = ir3_DP2ACC(ctx->block, src[0], 0, src[1], 0, accumulator, 0);
346    dst[0]->cat3.packed = IR3_SRC_PACKED_LOW;
347    dst[0]->cat3.signedness = signedness;
348 
349    dst[0] = ir3_DP2ACC(ctx->block, src[0], 0, src[1], 0, dst[0], 0);
350    dst[0]->cat3.packed = IR3_SRC_PACKED_HIGH;
351    dst[0]->cat3.signedness = signedness;
352 
353    if (alu->op == nir_op_udot_4x8_uadd_sat) {
354       dst[0] = ir3_ADD_U(ctx->block, dst[0], 0, src[2], 0);
355       dst[0]->flags |= IR3_INSTR_SAT;
356    } else if (alu->op == nir_op_sudot_4x8_iadd_sat) {
357       dst[0] = ir3_ADD_S(ctx->block, dst[0], 0, src[2], 0);
358       dst[0]->flags |= IR3_INSTR_SAT;
359    }
360 }
361 
362 static void
emit_alu(struct ir3_context * ctx,nir_alu_instr * alu)363 emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
364 {
365    const nir_op_info *info = &nir_op_infos[alu->op];
366    struct ir3_instruction **dst, *src[info->num_inputs];
367    unsigned bs[info->num_inputs]; /* bit size */
368    struct ir3_block *b = ctx->block;
369    unsigned dst_sz, wrmask;
370    type_t dst_type = type_uint_size(nir_dest_bit_size(alu->dest.dest));
371 
372    if (alu->dest.dest.is_ssa) {
373       dst_sz = alu->dest.dest.ssa.num_components;
374       wrmask = (1 << dst_sz) - 1;
375    } else {
376       dst_sz = alu->dest.dest.reg.reg->num_components;
377       wrmask = alu->dest.write_mask;
378    }
379 
380    dst = ir3_get_dst(ctx, &alu->dest.dest, dst_sz);
381 
382    /* Vectors are special in that they have non-scalarized writemasks,
383     * and just take the first swizzle channel for each argument in
384     * order into each writemask channel.
385     */
386    if ((alu->op == nir_op_vec2) || (alu->op == nir_op_vec3) ||
387        (alu->op == nir_op_vec4) || (alu->op == nir_op_vec8) ||
388        (alu->op == nir_op_vec16)) {
389 
390       for (int i = 0; i < info->num_inputs; i++) {
391          nir_alu_src *asrc = &alu->src[i];
392 
393          compile_assert(ctx, !asrc->abs);
394          compile_assert(ctx, !asrc->negate);
395 
396          src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
397          if (!src[i])
398             src[i] = create_immed_typed(ctx->block, 0, dst_type);
399          dst[i] = ir3_MOV(b, src[i], dst_type);
400       }
401 
402       ir3_put_dst(ctx, &alu->dest.dest);
403       return;
404    }
405 
406    /* We also get mov's with more than one component for mov's so
407     * handle those specially:
408     */
409    if (alu->op == nir_op_mov) {
410       nir_alu_src *asrc = &alu->src[0];
411       struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
412 
413       for (unsigned i = 0; i < dst_sz; i++) {
414          if (wrmask & (1 << i)) {
415             dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], dst_type);
416          } else {
417             dst[i] = NULL;
418          }
419       }
420 
421       ir3_put_dst(ctx, &alu->dest.dest);
422       return;
423    }
424 
425    /* General case: We can just grab the one used channel per src. */
426    for (int i = 0; i < info->num_inputs; i++) {
427       unsigned chan = ffs(alu->dest.write_mask) - 1;
428       nir_alu_src *asrc = &alu->src[i];
429 
430       compile_assert(ctx, !asrc->abs);
431       compile_assert(ctx, !asrc->negate);
432 
433       src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[chan]];
434       bs[i] = nir_src_bit_size(asrc->src);
435 
436       compile_assert(ctx, src[i]);
437    }
438 
439    switch (alu->op) {
440    case nir_op_f2f32:
441    case nir_op_f2f16_rtne:
442    case nir_op_f2f16_rtz:
443    case nir_op_f2f16:
444    case nir_op_f2i32:
445    case nir_op_f2i16:
446    case nir_op_f2i8:
447    case nir_op_f2u32:
448    case nir_op_f2u16:
449    case nir_op_f2u8:
450    case nir_op_i2f32:
451    case nir_op_i2f16:
452    case nir_op_i2i32:
453    case nir_op_i2i16:
454    case nir_op_i2i8:
455    case nir_op_u2f32:
456    case nir_op_u2f16:
457    case nir_op_u2u32:
458    case nir_op_u2u16:
459    case nir_op_u2u8:
460    case nir_op_b2f16:
461    case nir_op_b2f32:
462    case nir_op_b2i8:
463    case nir_op_b2i16:
464    case nir_op_b2i32:
465       dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
466       break;
467 
468    case nir_op_fquantize2f16:
469       dst[0] = create_cov(ctx, create_cov(ctx, src[0], 32, nir_op_f2f16_rtne),
470                           16, nir_op_f2f32);
471       break;
472    case nir_op_f2b1:
473       dst[0] = ir3_CMPS_F(
474          b, src[0], 0,
475          create_immed_typed(b, 0, type_float_size(bs[0])), 0);
476       dst[0]->cat2.condition = IR3_COND_NE;
477       break;
478 
479    case nir_op_i2b1:
480       /* i2b1 will appear when translating from nir_load_ubo or
481        * nir_intrinsic_load_ssbo, where any non-zero value is true.
482        */
483       dst[0] = ir3_CMPS_S(
484          b, src[0], 0,
485          create_immed_typed(b, 0, type_uint_size(bs[0])), 0);
486       dst[0]->cat2.condition = IR3_COND_NE;
487       break;
488 
489    case nir_op_b2b1:
490       /* b2b1 will appear when translating from
491        *
492        * - nir_intrinsic_load_shared of a 32-bit 0/~0 value.
493        * - nir_intrinsic_load_constant of a 32-bit 0/~0 value
494        *
495        * A negate can turn those into a 1 or 0 for us.
496        */
497       dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
498       break;
499 
500    case nir_op_b2b32:
501       /* b2b32 will appear when converting our 1-bit bools to a store_shared
502        * argument.
503        *
504        * A negate can turn those into a ~0 for us.
505        */
506       dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
507       break;
508 
509    case nir_op_fneg:
510       dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FNEG);
511       break;
512    case nir_op_fabs:
513       dst[0] = ir3_ABSNEG_F(b, src[0], IR3_REG_FABS);
514       break;
515    case nir_op_fmax:
516       dst[0] = ir3_MAX_F(b, src[0], 0, src[1], 0);
517       break;
518    case nir_op_fmin:
519       dst[0] = ir3_MIN_F(b, src[0], 0, src[1], 0);
520       break;
521    case nir_op_fsat:
522       /* if there is just a single use of the src, and it supports
523        * (sat) bit, we can just fold the (sat) flag back to the
524        * src instruction and create a mov.  This is easier for cp
525        * to eliminate.
526        */
527       if (alu->src[0].src.is_ssa && is_sat_compatible(src[0]->opc) &&
528           (list_length(&alu->src[0].src.ssa->uses) == 1)) {
529          src[0]->flags |= IR3_INSTR_SAT;
530          dst[0] = ir3_MOV(b, src[0], dst_type);
531       } else {
532          /* otherwise generate a max.f that saturates.. blob does
533           * similar (generating a cat2 mov using max.f)
534           */
535          dst[0] = ir3_MAX_F(b, src[0], 0, src[0], 0);
536          dst[0]->flags |= IR3_INSTR_SAT;
537       }
538       break;
539    case nir_op_fmul:
540       dst[0] = ir3_MUL_F(b, src[0], 0, src[1], 0);
541       break;
542    case nir_op_fadd:
543       dst[0] = ir3_ADD_F(b, src[0], 0, src[1], 0);
544       break;
545    case nir_op_fsub:
546       dst[0] = ir3_ADD_F(b, src[0], 0, src[1], IR3_REG_FNEG);
547       break;
548    case nir_op_ffma:
549       dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
550       break;
551    case nir_op_fddx:
552    case nir_op_fddx_coarse:
553       dst[0] = ir3_DSX(b, src[0], 0);
554       dst[0]->cat5.type = TYPE_F32;
555       break;
556    case nir_op_fddx_fine:
557       dst[0] = ir3_DSXPP_MACRO(b, src[0], 0);
558       dst[0]->cat5.type = TYPE_F32;
559       break;
560    case nir_op_fddy:
561    case nir_op_fddy_coarse:
562       dst[0] = ir3_DSY(b, src[0], 0);
563       dst[0]->cat5.type = TYPE_F32;
564       break;
565       break;
566    case nir_op_fddy_fine:
567       dst[0] = ir3_DSYPP_MACRO(b, src[0], 0);
568       dst[0]->cat5.type = TYPE_F32;
569       break;
570    case nir_op_flt:
571       dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
572       dst[0]->cat2.condition = IR3_COND_LT;
573       break;
574    case nir_op_fge:
575       dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
576       dst[0]->cat2.condition = IR3_COND_GE;
577       break;
578    case nir_op_feq:
579       dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
580       dst[0]->cat2.condition = IR3_COND_EQ;
581       break;
582    case nir_op_fneu:
583       dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
584       dst[0]->cat2.condition = IR3_COND_NE;
585       break;
586    case nir_op_fceil:
587       dst[0] = ir3_CEIL_F(b, src[0], 0);
588       break;
589    case nir_op_ffloor:
590       dst[0] = ir3_FLOOR_F(b, src[0], 0);
591       break;
592    case nir_op_ftrunc:
593       dst[0] = ir3_TRUNC_F(b, src[0], 0);
594       break;
595    case nir_op_fround_even:
596       dst[0] = ir3_RNDNE_F(b, src[0], 0);
597       break;
598    case nir_op_fsign:
599       dst[0] = ir3_SIGN_F(b, src[0], 0);
600       break;
601 
602    case nir_op_fsin:
603       dst[0] = ir3_SIN(b, src[0], 0);
604       break;
605    case nir_op_fcos:
606       dst[0] = ir3_COS(b, src[0], 0);
607       break;
608    case nir_op_frsq:
609       dst[0] = ir3_RSQ(b, src[0], 0);
610       break;
611    case nir_op_frcp:
612       dst[0] = ir3_RCP(b, src[0], 0);
613       break;
614    case nir_op_flog2:
615       dst[0] = ir3_LOG2(b, src[0], 0);
616       break;
617    case nir_op_fexp2:
618       dst[0] = ir3_EXP2(b, src[0], 0);
619       break;
620    case nir_op_fsqrt:
621       dst[0] = ir3_SQRT(b, src[0], 0);
622       break;
623 
624    case nir_op_iabs:
625       dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SABS);
626       break;
627    case nir_op_iadd:
628       dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
629       break;
630    case nir_op_ihadd:
631       dst[0] = ir3_ADD_S(b, src[0], 0, src[1], 0);
632       dst[0]->dsts[0]->flags |= IR3_REG_EI;
633       break;
634    case nir_op_uhadd:
635       dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
636       dst[0]->dsts[0]->flags |= IR3_REG_EI;
637       break;
638    case nir_op_iand:
639       dst[0] = ir3_AND_B(b, src[0], 0, src[1], 0);
640       break;
641    case nir_op_imax:
642       dst[0] = ir3_MAX_S(b, src[0], 0, src[1], 0);
643       break;
644    case nir_op_umax:
645       dst[0] = ir3_MAX_U(b, src[0], 0, src[1], 0);
646       break;
647    case nir_op_imin:
648       dst[0] = ir3_MIN_S(b, src[0], 0, src[1], 0);
649       break;
650    case nir_op_umin:
651       dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
652       break;
653    case nir_op_umul_low:
654       dst[0] = ir3_MULL_U(b, src[0], 0, src[1], 0);
655       break;
656    case nir_op_imadsh_mix16:
657       dst[0] = ir3_MADSH_M16(b, src[0], 0, src[1], 0, src[2], 0);
658       break;
659    case nir_op_imad24_ir3:
660       dst[0] = ir3_MAD_S24(b, src[0], 0, src[1], 0, src[2], 0);
661       break;
662    case nir_op_imul:
663       compile_assert(ctx, nir_dest_bit_size(alu->dest.dest) == 16);
664       dst[0] = ir3_MUL_S24(b, src[0], 0, src[1], 0);
665       break;
666    case nir_op_imul24:
667       dst[0] = ir3_MUL_S24(b, src[0], 0, src[1], 0);
668       break;
669    case nir_op_ineg:
670       dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
671       break;
672    case nir_op_inot:
673       if (bs[0] == 1) {
674          struct ir3_instruction *one =
675                create_immed_typed(ctx->block, 1, ctx->compiler->bool_type);
676          dst[0] = ir3_SUB_U(b, one, 0, src[0], 0);
677       } else {
678          dst[0] = ir3_NOT_B(b, src[0], 0);
679       }
680       break;
681    case nir_op_ior:
682       dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0);
683       break;
684    case nir_op_ishl:
685       dst[0] =
686          ir3_SHL_B(b, src[0], 0, resize_shift_amount(ctx, src[1], bs[0]), 0);
687       break;
688    case nir_op_ishr:
689       dst[0] =
690          ir3_ASHR_B(b, src[0], 0, resize_shift_amount(ctx, src[1], bs[0]), 0);
691       break;
692    case nir_op_isub:
693       dst[0] = ir3_SUB_U(b, src[0], 0, src[1], 0);
694       break;
695    case nir_op_ixor:
696       dst[0] = ir3_XOR_B(b, src[0], 0, src[1], 0);
697       break;
698    case nir_op_ushr:
699       dst[0] =
700          ir3_SHR_B(b, src[0], 0, resize_shift_amount(ctx, src[1], bs[0]), 0);
701       break;
702    case nir_op_ilt:
703       dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
704       dst[0]->cat2.condition = IR3_COND_LT;
705       break;
706    case nir_op_ige:
707       dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
708       dst[0]->cat2.condition = IR3_COND_GE;
709       break;
710    case nir_op_ieq:
711       dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
712       dst[0]->cat2.condition = IR3_COND_EQ;
713       break;
714    case nir_op_ine:
715       dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
716       dst[0]->cat2.condition = IR3_COND_NE;
717       break;
718    case nir_op_ult:
719       dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
720       dst[0]->cat2.condition = IR3_COND_LT;
721       break;
722    case nir_op_uge:
723       dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
724       dst[0]->cat2.condition = IR3_COND_GE;
725       break;
726 
727    case nir_op_bcsel: {
728       struct ir3_instruction *cond = src[0];
729 
730       /* If src[0] is a negation (likely as a result of an ir3_b2n(cond)),
731        * we can ignore that and use original cond, since the nonzero-ness of
732        * cond stays the same.
733        */
734       if (cond->opc == OPC_ABSNEG_S && cond->flags == 0 &&
735           (cond->srcs[0]->flags & (IR3_REG_SNEG | IR3_REG_SABS)) ==
736              IR3_REG_SNEG) {
737          cond = cond->srcs[0]->def->instr;
738       }
739 
740       compile_assert(ctx, bs[1] == bs[2]);
741 
742       /* The condition's size has to match the other two arguments' size, so
743        * convert down if necessary.
744        *
745        * Single hashtable is fine, because the conversion will either be
746        * 16->32 or 32->16, but never both
747        */
748       if (is_half(src[1]) != is_half(cond)) {
749          struct hash_entry *prev_entry =
750             _mesa_hash_table_search(ctx->sel_cond_conversions, src[0]);
751          if (prev_entry) {
752             cond = prev_entry->data;
753          } else {
754             if (is_half(cond)) {
755                cond = ir3_COV(b, cond, TYPE_U16, TYPE_U32);
756             } else {
757                cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
758             }
759             _mesa_hash_table_insert(ctx->sel_cond_conversions, src[0], cond);
760          }
761       }
762 
763       if (is_half(src[1])) {
764          dst[0] = ir3_SEL_B16(b, src[1], 0, cond, 0, src[2], 0);
765       } else {
766          dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
767       }
768 
769       break;
770    }
771    case nir_op_bit_count: {
772       if (ctx->compiler->gen < 5) {
773          dst[0] = ir3_CBITS_B(b, src[0], 0);
774          break;
775       }
776 
777       // We need to do this 16b at a time on a5xx+a6xx.  Once half-precision
778       // support is in place, this should probably move to a NIR lowering pass:
779       struct ir3_instruction *hi, *lo;
780 
781       hi = ir3_COV(b, ir3_SHR_B(b, src[0], 0, create_immed(b, 16), 0), TYPE_U32,
782                    TYPE_U16);
783       lo = ir3_COV(b, src[0], TYPE_U32, TYPE_U16);
784 
785       hi = ir3_CBITS_B(b, hi, 0);
786       lo = ir3_CBITS_B(b, lo, 0);
787 
788       // TODO maybe the builders should default to making dst half-precision
789       // if the src's were half precision, to make this less awkward.. otoh
790       // we should probably just do this lowering in NIR.
791       hi->dsts[0]->flags |= IR3_REG_HALF;
792       lo->dsts[0]->flags |= IR3_REG_HALF;
793 
794       dst[0] = ir3_ADD_S(b, hi, 0, lo, 0);
795       dst[0]->dsts[0]->flags |= IR3_REG_HALF;
796       dst[0] = ir3_COV(b, dst[0], TYPE_U16, TYPE_U32);
797       break;
798    }
799    case nir_op_ifind_msb: {
800       struct ir3_instruction *cmp;
801       dst[0] = ir3_CLZ_S(b, src[0], 0);
802       cmp = ir3_CMPS_S(b, dst[0], 0, create_immed(b, 0), 0);
803       cmp->cat2.condition = IR3_COND_GE;
804       dst[0] = ir3_SEL_B32(b, ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0),
805                            0, cmp, 0, dst[0], 0);
806       break;
807    }
808    case nir_op_ufind_msb:
809       dst[0] = ir3_CLZ_B(b, src[0], 0);
810       dst[0] = ir3_SEL_B32(b, ir3_SUB_U(b, create_immed(b, 31), 0, dst[0], 0),
811                            0, src[0], 0, dst[0], 0);
812       break;
813    case nir_op_find_lsb:
814       dst[0] = ir3_BFREV_B(b, src[0], 0);
815       dst[0] = ir3_CLZ_B(b, dst[0], 0);
816       break;
817    case nir_op_bitfield_reverse:
818       dst[0] = ir3_BFREV_B(b, src[0], 0);
819       break;
820 
821    case nir_op_uadd_sat:
822       dst[0] = ir3_ADD_U(b, src[0], 0, src[1], 0);
823       dst[0]->flags |= IR3_INSTR_SAT;
824       break;
825    case nir_op_iadd_sat:
826       dst[0] = ir3_ADD_S(b, src[0], 0, src[1], 0);
827       dst[0]->flags |= IR3_INSTR_SAT;
828       break;
829 
830    case nir_op_udot_4x8_uadd:
831    case nir_op_udot_4x8_uadd_sat:
832    case nir_op_sudot_4x8_iadd:
833    case nir_op_sudot_4x8_iadd_sat: {
834       if (ctx->compiler->has_dp4acc) {
835          emit_alu_dot_4x8_as_dp4acc(ctx, alu, dst, src);
836       } else if (ctx->compiler->has_dp2acc) {
837          emit_alu_dot_4x8_as_dp2acc(ctx, alu, dst, src);
838       } else {
839          ir3_context_error(ctx, "ALU op should have been lowered: %s\n",
840                            nir_op_infos[alu->op].name);
841       }
842 
843       break;
844    }
845 
846    default:
847       ir3_context_error(ctx, "Unhandled ALU op: %s\n",
848                         nir_op_infos[alu->op].name);
849       break;
850    }
851 
852    if (nir_alu_type_get_base_type(info->output_type) == nir_type_bool) {
853       assert(nir_dest_bit_size(alu->dest.dest) == 1 || alu->op == nir_op_b2b32);
854       assert(dst_sz == 1);
855    } else {
856       /* 1-bit values stored in 32-bit registers are only valid for certain
857        * ALU ops.
858        */
859       switch (alu->op) {
860       case nir_op_iand:
861       case nir_op_ior:
862       case nir_op_ixor:
863       case nir_op_inot:
864       case nir_op_bcsel:
865          break;
866       default:
867          compile_assert(ctx, nir_dest_bit_size(alu->dest.dest) != 1);
868       }
869    }
870 
871    ir3_put_dst(ctx, &alu->dest.dest);
872 }
873 
874 static void
emit_intrinsic_load_ubo_ldc(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)875 emit_intrinsic_load_ubo_ldc(struct ir3_context *ctx, nir_intrinsic_instr *intr,
876                             struct ir3_instruction **dst)
877 {
878    struct ir3_block *b = ctx->block;
879 
880    /* This is only generated for us by nir_lower_ubo_vec4, which leaves base =
881     * 0.
882     */
883    assert(nir_intrinsic_base(intr) == 0);
884 
885    unsigned ncomp = intr->num_components;
886    struct ir3_instruction *offset = ir3_get_src(ctx, &intr->src[1])[0];
887    struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[0])[0];
888    struct ir3_instruction *ldc = ir3_LDC(b, idx, 0, offset, 0);
889    ldc->dsts[0]->wrmask = MASK(ncomp);
890    ldc->cat6.iim_val = ncomp;
891    ldc->cat6.d = nir_intrinsic_component(intr);
892    ldc->cat6.type = TYPE_U32;
893 
894    ir3_handle_bindless_cat6(ldc, intr->src[0]);
895    if (ldc->flags & IR3_INSTR_B)
896       ctx->so->bindless_ubo = true;
897    ir3_handle_nonuniform(ldc, intr);
898 
899    ir3_split_dest(b, dst, ldc, 0, ncomp);
900 }
901 
902 static void
emit_intrinsic_copy_ubo_to_uniform(struct ir3_context * ctx,nir_intrinsic_instr * intr)903 emit_intrinsic_copy_ubo_to_uniform(struct ir3_context *ctx,
904                                    nir_intrinsic_instr *intr)
905 {
906    struct ir3_block *b = ctx->block;
907 
908    unsigned base = nir_intrinsic_base(intr);
909    unsigned size = nir_intrinsic_range(intr);
910 
911    struct ir3_instruction *addr1 = ir3_get_addr1(ctx, base);
912 
913    struct ir3_instruction *offset = ir3_get_src(ctx, &intr->src[1])[0];
914    struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[0])[0];
915    struct ir3_instruction *ldc = ir3_LDC_K(b, idx, 0, offset, 0);
916    ldc->cat6.iim_val = size;
917    ldc->barrier_class = ldc->barrier_conflict = IR3_BARRIER_CONST_W;
918 
919    ir3_handle_bindless_cat6(ldc, intr->src[0]);
920    if (ldc->flags & IR3_INSTR_B)
921       ctx->so->bindless_ubo = true;
922 
923    ir3_instr_set_address(ldc, addr1);
924 
925    array_insert(b, b->keeps, ldc);
926 }
927 
928 /* handles direct/indirect UBO reads: */
929 static void
emit_intrinsic_load_ubo(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)930 emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
931                         struct ir3_instruction **dst)
932 {
933    struct ir3_block *b = ctx->block;
934    struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
935    const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
936    unsigned ubo = regid(const_state->offsets.ubo, 0);
937    const unsigned ptrsz = ir3_pointer_size(ctx->compiler);
938 
939    int off = 0;
940 
941    /* First src is ubo index, which could either be an immed or not: */
942    src0 = ir3_get_src(ctx, &intr->src[0])[0];
943    if (is_same_type_mov(src0) && (src0->srcs[0]->flags & IR3_REG_IMMED)) {
944       base_lo = create_uniform(b, ubo + (src0->srcs[0]->iim_val * ptrsz));
945       base_hi = create_uniform(b, ubo + (src0->srcs[0]->iim_val * ptrsz) + 1);
946    } else {
947       base_lo = create_uniform_indirect(b, ubo, TYPE_U32,
948                                         ir3_get_addr0(ctx, src0, ptrsz));
949       base_hi = create_uniform_indirect(b, ubo + 1, TYPE_U32,
950                                         ir3_get_addr0(ctx, src0, ptrsz));
951 
952       /* NOTE: since relative addressing is used, make sure constlen is
953        * at least big enough to cover all the UBO addresses, since the
954        * assembler won't know what the max address reg is.
955        */
956       ctx->so->constlen =
957          MAX2(ctx->so->constlen,
958               const_state->offsets.ubo + (ctx->s->info.num_ubos * ptrsz));
959    }
960 
961    /* note: on 32bit gpu's base_hi is ignored and DCE'd */
962    addr = base_lo;
963 
964    if (nir_src_is_const(intr->src[1])) {
965       off += nir_src_as_uint(intr->src[1]);
966    } else {
967       /* For load_ubo_indirect, second src is indirect offset: */
968       src1 = ir3_get_src(ctx, &intr->src[1])[0];
969 
970       /* and add offset to addr: */
971       addr = ir3_ADD_S(b, addr, 0, src1, 0);
972    }
973 
974    /* if offset is to large to encode in the ldg, split it out: */
975    if ((off + (intr->num_components * 4)) > 1024) {
976       /* split out the minimal amount to improve the odds that
977        * cp can fit the immediate in the add.s instruction:
978        */
979       unsigned off2 = off + (intr->num_components * 4) - 1024;
980       addr = ir3_ADD_S(b, addr, 0, create_immed(b, off2), 0);
981       off -= off2;
982    }
983 
984    if (ptrsz == 2) {
985       struct ir3_instruction *carry;
986 
987       /* handle 32b rollover, ie:
988        *   if (addr < base_lo)
989        *      base_hi++
990        */
991       carry = ir3_CMPS_U(b, addr, 0, base_lo, 0);
992       carry->cat2.condition = IR3_COND_LT;
993       base_hi = ir3_ADD_S(b, base_hi, 0, carry, 0);
994 
995       addr = ir3_collect(b, addr, base_hi);
996    }
997 
998    for (int i = 0; i < intr->num_components; i++) {
999       struct ir3_instruction *load =
1000          ir3_LDG(b, addr, 0, create_immed(b, off + i * 4), 0,
1001                  create_immed(b, 1), 0); /* num components */
1002       load->cat6.type = TYPE_U32;
1003       dst[i] = load;
1004    }
1005 }
1006 
1007 /* Load a kernel param: src[] = { address }. */
1008 static void
emit_intrinsic_load_kernel_input(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1009 emit_intrinsic_load_kernel_input(struct ir3_context *ctx,
1010                                  nir_intrinsic_instr *intr,
1011                                  struct ir3_instruction **dst)
1012 {
1013    const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
1014    struct ir3_block *b = ctx->block;
1015    unsigned offset = nir_intrinsic_base(intr);
1016    unsigned p = regid(const_state->offsets.kernel_params, 0);
1017 
1018    struct ir3_instruction *src0 = ir3_get_src(ctx, &intr->src[0])[0];
1019 
1020    if (is_same_type_mov(src0) && (src0->srcs[0]->flags & IR3_REG_IMMED)) {
1021       offset += src0->srcs[0]->iim_val;
1022 
1023       /* kernel param position is in bytes, but constant space is 32b registers: */
1024       compile_assert(ctx, !(offset & 0x3));
1025 
1026       dst[0] = create_uniform(b, p + (offset / 4));
1027    } else {
1028       /* kernel param position is in bytes, but constant space is 32b registers: */
1029       compile_assert(ctx, !(offset & 0x3));
1030 
1031       /* TODO we should probably be lowering this in nir, and also handling
1032        * non-32b inputs.. Also we probably don't want to be using
1033        * SP_MODE_CONTROL.CONSTANT_DEMOTION_ENABLE for KERNEL shaders..
1034        */
1035       src0 = ir3_SHR_B(b, src0, 0, create_immed(b, 2), 0);
1036 
1037       dst[0] = create_uniform_indirect(b, offset / 4, TYPE_U32,
1038                                        ir3_get_addr0(ctx, src0, 1));
1039    }
1040 }
1041 
1042 /* src[] = { block_index } */
1043 static void
emit_intrinsic_ssbo_size(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1044 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1045                          struct ir3_instruction **dst)
1046 {
1047    struct ir3_block *b = ctx->block;
1048    struct ir3_instruction *ibo = ir3_ssbo_to_ibo(ctx, intr->src[0]);
1049    struct ir3_instruction *resinfo = ir3_RESINFO(b, ibo, 0);
1050    resinfo->cat6.iim_val = 1;
1051    resinfo->cat6.d = ctx->compiler->gen >= 6 ? 1 : 2;
1052    resinfo->cat6.type = TYPE_U32;
1053    resinfo->cat6.typed = false;
1054    /* resinfo has no writemask and always writes out 3 components */
1055    resinfo->dsts[0]->wrmask = MASK(3);
1056    ir3_handle_bindless_cat6(resinfo, intr->src[0]);
1057    ir3_handle_nonuniform(resinfo, intr);
1058 
1059    if (ctx->compiler->gen >= 6) {
1060       ir3_split_dest(b, dst, resinfo, 0, 1);
1061    } else {
1062       /* On a5xx, resinfo returns the low 16 bits of ssbo size in .x and the high 16 bits in .y */
1063       struct ir3_instruction *resinfo_dst[2];
1064       ir3_split_dest(b, resinfo_dst, resinfo, 0, 2);
1065       *dst = ir3_ADD_U(b, ir3_SHL_B(b, resinfo_dst[1], 0, create_immed(b, 16), 0), 0, resinfo_dst[0], 0);
1066    }
1067 }
1068 
1069 /* src[] = { offset }. const_index[] = { base } */
1070 static void
emit_intrinsic_load_shared(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1071 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1072                            struct ir3_instruction **dst)
1073 {
1074    struct ir3_block *b = ctx->block;
1075    struct ir3_instruction *ldl, *offset;
1076    unsigned base;
1077 
1078    offset = ir3_get_src(ctx, &intr->src[0])[0];
1079    base = nir_intrinsic_base(intr);
1080 
1081    ldl = ir3_LDL(b, offset, 0, create_immed(b, base), 0,
1082                  create_immed(b, intr->num_components), 0);
1083 
1084    ldl->cat6.type = utype_dst(intr->dest);
1085    ldl->dsts[0]->wrmask = MASK(intr->num_components);
1086 
1087    ldl->barrier_class = IR3_BARRIER_SHARED_R;
1088    ldl->barrier_conflict = IR3_BARRIER_SHARED_W;
1089 
1090    ir3_split_dest(b, dst, ldl, 0, intr->num_components);
1091 }
1092 
1093 /* src[] = { value, offset }. const_index[] = { base, write_mask } */
1094 static void
emit_intrinsic_store_shared(struct ir3_context * ctx,nir_intrinsic_instr * intr)1095 emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1096 {
1097    struct ir3_block *b = ctx->block;
1098    struct ir3_instruction *stl, *offset;
1099    struct ir3_instruction *const *value;
1100    unsigned base, wrmask, ncomp;
1101 
1102    value = ir3_get_src(ctx, &intr->src[0]);
1103    offset = ir3_get_src(ctx, &intr->src[1])[0];
1104 
1105    base = nir_intrinsic_base(intr);
1106    wrmask = nir_intrinsic_write_mask(intr);
1107    ncomp = ffs(~wrmask) - 1;
1108 
1109    assert(wrmask == BITFIELD_MASK(intr->num_components));
1110 
1111    stl = ir3_STL(b, offset, 0, ir3_create_collect(b, value, ncomp), 0,
1112                  create_immed(b, ncomp), 0);
1113    stl->cat6.dst_offset = base;
1114    stl->cat6.type = utype_src(intr->src[0]);
1115    stl->barrier_class = IR3_BARRIER_SHARED_W;
1116    stl->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1117 
1118    array_insert(b, b->keeps, stl);
1119 }
1120 
1121 /* src[] = { offset }. const_index[] = { base } */
1122 static void
emit_intrinsic_load_shared_ir3(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1123 emit_intrinsic_load_shared_ir3(struct ir3_context *ctx,
1124                                nir_intrinsic_instr *intr,
1125                                struct ir3_instruction **dst)
1126 {
1127    struct ir3_block *b = ctx->block;
1128    struct ir3_instruction *load, *offset;
1129    unsigned base;
1130 
1131    offset = ir3_get_src(ctx, &intr->src[0])[0];
1132    base = nir_intrinsic_base(intr);
1133 
1134    load = ir3_LDLW(b, offset, 0, create_immed(b, base), 0,
1135                    create_immed(b, intr->num_components), 0);
1136 
1137    /* for a650, use LDL for tess ctrl inputs: */
1138    if (ctx->so->type == MESA_SHADER_TESS_CTRL && ctx->compiler->tess_use_shared)
1139       load->opc = OPC_LDL;
1140 
1141    load->cat6.type = utype_dst(intr->dest);
1142    load->dsts[0]->wrmask = MASK(intr->num_components);
1143 
1144    load->barrier_class = IR3_BARRIER_SHARED_R;
1145    load->barrier_conflict = IR3_BARRIER_SHARED_W;
1146 
1147    ir3_split_dest(b, dst, load, 0, intr->num_components);
1148 }
1149 
1150 /* src[] = { value, offset }. const_index[] = { base } */
1151 static void
emit_intrinsic_store_shared_ir3(struct ir3_context * ctx,nir_intrinsic_instr * intr)1152 emit_intrinsic_store_shared_ir3(struct ir3_context *ctx,
1153                                 nir_intrinsic_instr *intr)
1154 {
1155    struct ir3_block *b = ctx->block;
1156    struct ir3_instruction *store, *offset;
1157    struct ir3_instruction *const *value;
1158 
1159    value = ir3_get_src(ctx, &intr->src[0]);
1160    offset = ir3_get_src(ctx, &intr->src[1])[0];
1161 
1162    store = ir3_STLW(b, offset, 0,
1163                     ir3_create_collect(b, value, intr->num_components), 0,
1164                     create_immed(b, intr->num_components), 0);
1165 
1166    /* for a650, use STL for vertex outputs used by tess ctrl shader: */
1167    if (ctx->so->type == MESA_SHADER_VERTEX && ctx->so->key.tessellation &&
1168        ctx->compiler->tess_use_shared)
1169       store->opc = OPC_STL;
1170 
1171    store->cat6.dst_offset = nir_intrinsic_base(intr);
1172    store->cat6.type = utype_src(intr->src[0]);
1173    store->barrier_class = IR3_BARRIER_SHARED_W;
1174    store->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1175 
1176    array_insert(b, b->keeps, store);
1177 }
1178 
1179 /*
1180  * CS shared variable atomic intrinsics
1181  *
1182  * All of the shared variable atomic memory operations read a value from
1183  * memory, compute a new value using one of the operations below, write the
1184  * new value to memory, and return the original value read.
1185  *
1186  * All operations take 2 sources except CompSwap that takes 3. These
1187  * sources represent:
1188  *
1189  * 0: The offset into the shared variable storage region that the atomic
1190  *    operation will operate on.
1191  * 1: The data parameter to the atomic function (i.e. the value to add
1192  *    in shared_atomic_add, etc).
1193  * 2: For CompSwap only: the second data parameter.
1194  */
1195 static struct ir3_instruction *
emit_intrinsic_atomic_shared(struct ir3_context * ctx,nir_intrinsic_instr * intr)1196 emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1197 {
1198    struct ir3_block *b = ctx->block;
1199    struct ir3_instruction *atomic, *src0, *src1;
1200    type_t type = TYPE_U32;
1201 
1202    src0 = ir3_get_src(ctx, &intr->src[0])[0]; /* offset */
1203    src1 = ir3_get_src(ctx, &intr->src[1])[0]; /* value */
1204 
1205    switch (intr->intrinsic) {
1206    case nir_intrinsic_shared_atomic_add:
1207       atomic = ir3_ATOMIC_ADD(b, src0, 0, src1, 0);
1208       break;
1209    case nir_intrinsic_shared_atomic_imin:
1210       atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1211       type = TYPE_S32;
1212       break;
1213    case nir_intrinsic_shared_atomic_umin:
1214       atomic = ir3_ATOMIC_MIN(b, src0, 0, src1, 0);
1215       break;
1216    case nir_intrinsic_shared_atomic_imax:
1217       atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1218       type = TYPE_S32;
1219       break;
1220    case nir_intrinsic_shared_atomic_umax:
1221       atomic = ir3_ATOMIC_MAX(b, src0, 0, src1, 0);
1222       break;
1223    case nir_intrinsic_shared_atomic_and:
1224       atomic = ir3_ATOMIC_AND(b, src0, 0, src1, 0);
1225       break;
1226    case nir_intrinsic_shared_atomic_or:
1227       atomic = ir3_ATOMIC_OR(b, src0, 0, src1, 0);
1228       break;
1229    case nir_intrinsic_shared_atomic_xor:
1230       atomic = ir3_ATOMIC_XOR(b, src0, 0, src1, 0);
1231       break;
1232    case nir_intrinsic_shared_atomic_exchange:
1233       atomic = ir3_ATOMIC_XCHG(b, src0, 0, src1, 0);
1234       break;
1235    case nir_intrinsic_shared_atomic_comp_swap:
1236       /* for cmpxchg, src1 is [ui]vec2(data, compare): */
1237       src1 = ir3_collect(b, ir3_get_src(ctx, &intr->src[2])[0], src1);
1238       atomic = ir3_ATOMIC_CMPXCHG(b, src0, 0, src1, 0);
1239       break;
1240    default:
1241       unreachable("boo");
1242    }
1243 
1244    atomic->cat6.iim_val = 1;
1245    atomic->cat6.d = 1;
1246    atomic->cat6.type = type;
1247    atomic->barrier_class = IR3_BARRIER_SHARED_W;
1248    atomic->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1249 
1250    /* even if nothing consume the result, we can't DCE the instruction: */
1251    array_insert(b, b->keeps, atomic);
1252 
1253    return atomic;
1254 }
1255 
1256 static void
stp_ldp_offset(struct ir3_context * ctx,nir_src * src,struct ir3_instruction ** offset,int32_t * base)1257 stp_ldp_offset(struct ir3_context *ctx, nir_src *src,
1258                struct ir3_instruction **offset, int32_t *base)
1259 {
1260    struct ir3_block *b = ctx->block;
1261 
1262    if (nir_src_is_const(*src)) {
1263       unsigned src_offset = nir_src_as_uint(*src);
1264       /* The base offset field is only 13 bits, and it's signed. Try to make the
1265        * offset constant whenever the original offsets are similar, to avoid
1266        * creating too many constants in the final shader.
1267        */
1268       *base = ((int32_t) src_offset << (32 - 13)) >> (32 - 13);
1269       uint32_t offset_val = src_offset - *base;
1270       *offset = create_immed(b, offset_val);
1271    } else {
1272       /* TODO: match on nir_iadd with a constant that fits */
1273       *base = 0;
1274       *offset = ir3_get_src(ctx, src)[0];
1275    }
1276 }
1277 
1278 /* src[] = { offset }. */
1279 static void
emit_intrinsic_load_scratch(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1280 emit_intrinsic_load_scratch(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1281                             struct ir3_instruction **dst)
1282 {
1283    struct ir3_block *b = ctx->block;
1284    struct ir3_instruction *ldp, *offset;
1285    int32_t base;
1286 
1287    stp_ldp_offset(ctx, &intr->src[0], &offset, &base);
1288 
1289    ldp = ir3_LDP(b, offset, 0, create_immed(b, base), 0,
1290                  create_immed(b, intr->num_components), 0);
1291 
1292    ldp->cat6.type = utype_dst(intr->dest);
1293    ldp->dsts[0]->wrmask = MASK(intr->num_components);
1294 
1295    ldp->barrier_class = IR3_BARRIER_PRIVATE_R;
1296    ldp->barrier_conflict = IR3_BARRIER_PRIVATE_W;
1297 
1298    ir3_split_dest(b, dst, ldp, 0, intr->num_components);
1299 }
1300 
1301 /* src[] = { value, offset }. const_index[] = { write_mask } */
1302 static void
emit_intrinsic_store_scratch(struct ir3_context * ctx,nir_intrinsic_instr * intr)1303 emit_intrinsic_store_scratch(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1304 {
1305    struct ir3_block *b = ctx->block;
1306    struct ir3_instruction *stp, *offset;
1307    struct ir3_instruction *const *value;
1308    unsigned wrmask, ncomp;
1309    int32_t base;
1310 
1311    value = ir3_get_src(ctx, &intr->src[0]);
1312 
1313    stp_ldp_offset(ctx, &intr->src[1], &offset, &base);
1314 
1315    wrmask = nir_intrinsic_write_mask(intr);
1316    ncomp = ffs(~wrmask) - 1;
1317 
1318    assert(wrmask == BITFIELD_MASK(intr->num_components));
1319 
1320    stp = ir3_STP(b, offset, 0, ir3_create_collect(b, value, ncomp), 0,
1321                  create_immed(b, ncomp), 0);
1322    stp->cat6.dst_offset = base;
1323    stp->cat6.type = utype_src(intr->src[0]);
1324    stp->barrier_class = IR3_BARRIER_PRIVATE_W;
1325    stp->barrier_conflict = IR3_BARRIER_PRIVATE_R | IR3_BARRIER_PRIVATE_W;
1326 
1327    array_insert(b, b->keeps, stp);
1328 }
1329 
1330 struct tex_src_info {
1331    /* For prefetch */
1332    unsigned tex_base, samp_base, tex_idx, samp_idx;
1333    /* For normal tex instructions */
1334    unsigned base, a1_val, flags;
1335    struct ir3_instruction *samp_tex;
1336 };
1337 
1338 /* TODO handle actual indirect/dynamic case.. which is going to be weird
1339  * to handle with the image_mapping table..
1340  */
1341 static struct tex_src_info
get_image_ssbo_samp_tex_src(struct ir3_context * ctx,nir_src * src)1342 get_image_ssbo_samp_tex_src(struct ir3_context *ctx, nir_src *src)
1343 {
1344    struct ir3_block *b = ctx->block;
1345    struct tex_src_info info = {0};
1346    nir_intrinsic_instr *bindless_tex = ir3_bindless_resource(*src);
1347 
1348    if (bindless_tex) {
1349       /* Bindless case */
1350       ctx->so->bindless_tex = true;
1351       info.flags |= IR3_INSTR_B;
1352 
1353       /* Gather information required to determine which encoding to
1354        * choose as well as for prefetch.
1355        */
1356       info.tex_base = nir_intrinsic_desc_set(bindless_tex);
1357       bool tex_const = nir_src_is_const(bindless_tex->src[0]);
1358       if (tex_const)
1359          info.tex_idx = nir_src_as_uint(bindless_tex->src[0]);
1360       info.samp_idx = 0;
1361 
1362       /* Choose encoding. */
1363       if (tex_const && info.tex_idx < 256) {
1364          if (info.tex_idx < 16) {
1365             /* Everything fits within the instruction */
1366             info.base = info.tex_base;
1367          } else {
1368             info.base = info.tex_base;
1369             info.a1_val = info.tex_idx << 3;
1370             info.flags |= IR3_INSTR_A1EN;
1371          }
1372          info.samp_tex = NULL;
1373       } else {
1374          info.flags |= IR3_INSTR_S2EN;
1375          info.base = info.tex_base;
1376 
1377          /* Note: the indirect source is now a vec2 instead of hvec2 */
1378          struct ir3_instruction *texture, *sampler;
1379 
1380          texture = ir3_get_src(ctx, src)[0];
1381          sampler = create_immed(b, 0);
1382          info.samp_tex = ir3_collect(b, texture, sampler);
1383       }
1384    } else {
1385       info.flags |= IR3_INSTR_S2EN;
1386       unsigned slot = nir_src_as_uint(*src);
1387       unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
1388       struct ir3_instruction *texture, *sampler;
1389 
1390       texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
1391       sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
1392 
1393       info.samp_tex = ir3_collect(b, sampler, texture);
1394    }
1395 
1396    return info;
1397 }
1398 
1399 static struct ir3_instruction *
emit_sam(struct ir3_context * ctx,opc_t opc,struct tex_src_info info,type_t type,unsigned wrmask,struct ir3_instruction * src0,struct ir3_instruction * src1)1400 emit_sam(struct ir3_context *ctx, opc_t opc, struct tex_src_info info,
1401          type_t type, unsigned wrmask, struct ir3_instruction *src0,
1402          struct ir3_instruction *src1)
1403 {
1404    struct ir3_instruction *sam, *addr;
1405    if (info.flags & IR3_INSTR_A1EN) {
1406       addr = ir3_get_addr1(ctx, info.a1_val);
1407    }
1408    sam = ir3_SAM(ctx->block, opc, type, wrmask, info.flags, info.samp_tex, src0,
1409                  src1);
1410    if (info.flags & IR3_INSTR_A1EN) {
1411       ir3_instr_set_address(sam, addr);
1412    }
1413    if (info.flags & IR3_INSTR_B) {
1414       sam->cat5.tex_base = info.base;
1415       sam->cat5.samp = info.samp_idx;
1416       sam->cat5.tex  = info.tex_idx;
1417    }
1418    return sam;
1419 }
1420 
1421 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
1422 static void
emit_intrinsic_load_image(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1423 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1424                           struct ir3_instruction **dst)
1425 {
1426    /* If the image can be written, must use LDIB to retrieve data, rather than
1427     * through ISAM (which uses the texture cache and won't get previous writes).
1428     */
1429    if (!(nir_intrinsic_access(intr) & ACCESS_CAN_REORDER)) {
1430       ctx->funcs->emit_intrinsic_load_image(ctx, intr, dst);
1431       return;
1432    }
1433 
1434    /* The sparse set of texture descriptors for non-coherent load_images means we can't do indirection, so
1435     * fall back to coherent load.
1436     */
1437    if (ctx->compiler->gen >= 5 &&
1438        !ir3_bindless_resource(intr->src[0]) &&
1439        !nir_src_is_const(intr->src[0])) {
1440       ctx->funcs->emit_intrinsic_load_image(ctx, intr, dst);
1441       return;
1442    }
1443 
1444    struct ir3_block *b = ctx->block;
1445    struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0]);
1446    struct ir3_instruction *sam;
1447    struct ir3_instruction *const *src0 = ir3_get_src(ctx, &intr->src[1]);
1448    struct ir3_instruction *coords[4];
1449    unsigned flags, ncoords = ir3_get_image_coords(intr, &flags);
1450    type_t type = ir3_get_type_for_image_intrinsic(intr);
1451 
1452    info.flags |= flags;
1453 
1454    /* hw doesn't do 1d, so we treat it as 2d with height of 1, and patch up the
1455     * y coord. Note that the array index must come after the fake y coord.
1456     */
1457    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(intr);
1458    if (dim == GLSL_SAMPLER_DIM_1D || dim == GLSL_SAMPLER_DIM_BUF) {
1459       coords[0] = src0[0];
1460       coords[1] = create_immed(b, 0);
1461       for (unsigned i = 1; i < ncoords; i++)
1462          coords[i + 1] = src0[i];
1463       ncoords++;
1464    } else {
1465       for (unsigned i = 0; i < ncoords; i++)
1466          coords[i] = src0[i];
1467    }
1468 
1469    sam = emit_sam(ctx, OPC_ISAM, info, type, 0b1111,
1470                   ir3_create_collect(b, coords, ncoords), NULL);
1471 
1472    ir3_handle_nonuniform(sam, intr);
1473 
1474    sam->barrier_class = IR3_BARRIER_IMAGE_R;
1475    sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
1476 
1477    ir3_split_dest(b, dst, sam, 0, 4);
1478 }
1479 
1480 /* A4xx version of image_size, see ir3_a6xx.c for newer resinfo version. */
1481 void
emit_intrinsic_image_size_tex(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1482 emit_intrinsic_image_size_tex(struct ir3_context *ctx,
1483                               nir_intrinsic_instr *intr,
1484                               struct ir3_instruction **dst)
1485 {
1486    struct ir3_block *b = ctx->block;
1487    struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0]);
1488    struct ir3_instruction *sam, *lod;
1489    unsigned flags, ncoords = ir3_get_image_coords(intr, &flags);
1490    type_t dst_type = nir_dest_bit_size(intr->dest) == 16 ? TYPE_U16 : TYPE_U32;
1491 
1492    info.flags |= flags;
1493    assert(nir_src_as_uint(intr->src[1]) == 0);
1494    lod = create_immed(b, 0);
1495    sam = emit_sam(ctx, OPC_GETSIZE, info, dst_type, 0b1111, lod, NULL);
1496 
1497    /* Array size actually ends up in .w rather than .z. This doesn't
1498     * matter for miplevel 0, but for higher mips the value in z is
1499     * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
1500     * returned, which means that we have to add 1 to it for arrays for
1501     * a3xx.
1502     *
1503     * Note use a temporary dst and then copy, since the size of the dst
1504     * array that is passed in is based on nir's understanding of the
1505     * result size, not the hardware's
1506     */
1507    struct ir3_instruction *tmp[4];
1508 
1509    ir3_split_dest(b, tmp, sam, 0, 4);
1510 
1511    for (unsigned i = 0; i < ncoords; i++)
1512       dst[i] = tmp[i];
1513 
1514    if (flags & IR3_INSTR_A) {
1515       if (ctx->compiler->levels_add_one) {
1516          dst[ncoords - 1] = ir3_ADD_U(b, tmp[3], 0, create_immed(b, 1), 0);
1517       } else {
1518          dst[ncoords - 1] = ir3_MOV(b, tmp[3], TYPE_U32);
1519       }
1520    }
1521 }
1522 
1523 /* src[] = { buffer_index, offset }. No const_index */
1524 static void
emit_intrinsic_load_ssbo(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1525 emit_intrinsic_load_ssbo(struct ir3_context *ctx,
1526                          nir_intrinsic_instr *intr,
1527                          struct ir3_instruction **dst)
1528 {
1529    /* Note: isam currently can't handle vectorized loads/stores */
1530    if (!(nir_intrinsic_access(intr) & ACCESS_CAN_REORDER) ||
1531        !ir3_bindless_resource(intr->src[0]) ||
1532        intr->dest.ssa.num_components > 1) {
1533       ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
1534       return;
1535    }
1536 
1537    struct ir3_block *b = ctx->block;
1538    struct ir3_instruction *offset = ir3_get_src(ctx, &intr->src[2])[0];
1539    struct ir3_instruction *coords = ir3_collect(b, offset, create_immed(b, 0));
1540    struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0]);
1541 
1542    unsigned num_components = intr->dest.ssa.num_components;
1543    struct ir3_instruction *sam =
1544       emit_sam(ctx, OPC_ISAM, info, utype_for_size(intr->dest.ssa.bit_size),
1545                MASK(num_components), coords, NULL);
1546 
1547    ir3_handle_nonuniform(sam, intr);
1548 
1549    sam->barrier_class = IR3_BARRIER_BUFFER_R;
1550    sam->barrier_conflict = IR3_BARRIER_BUFFER_W;
1551 
1552    ir3_split_dest(b, dst, sam, 0, num_components);
1553 }
1554 
1555 static void
emit_control_barrier(struct ir3_context * ctx)1556 emit_control_barrier(struct ir3_context *ctx)
1557 {
1558    /* Hull shaders dispatch 32 wide so an entire patch will always
1559     * fit in a single warp and execute in lock-step. Consequently,
1560     * we don't need to do anything for TCS barriers. Emitting
1561     * barrier instruction will deadlock.
1562     */
1563    if (ctx->so->type == MESA_SHADER_TESS_CTRL)
1564       return;
1565 
1566    struct ir3_block *b = ctx->block;
1567    struct ir3_instruction *barrier = ir3_BAR(b);
1568    barrier->cat7.g = true;
1569    if (ctx->compiler->gen < 6)
1570       barrier->cat7.l = true;
1571    barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
1572    barrier->barrier_class = IR3_BARRIER_EVERYTHING;
1573    array_insert(b, b->keeps, barrier);
1574 
1575    ctx->so->has_barrier = true;
1576 }
1577 
1578 static void
emit_intrinsic_barrier(struct ir3_context * ctx,nir_intrinsic_instr * intr)1579 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1580 {
1581    struct ir3_block *b = ctx->block;
1582    struct ir3_instruction *barrier;
1583 
1584    /* TODO: find out why there is a major difference of .l usage
1585     * between a5xx and a6xx,
1586     */
1587 
1588    switch (intr->intrinsic) {
1589    case nir_intrinsic_control_barrier:
1590       emit_control_barrier(ctx);
1591       return;
1592    case nir_intrinsic_scoped_barrier: {
1593       nir_scope exec_scope = nir_intrinsic_execution_scope(intr);
1594       nir_variable_mode modes = nir_intrinsic_memory_modes(intr);
1595       /* loads/stores are always cache-coherent so we can filter out
1596        * available/visible.
1597        */
1598       nir_memory_semantics semantics =
1599          nir_intrinsic_memory_semantics(intr) & (NIR_MEMORY_ACQUIRE |
1600                                                  NIR_MEMORY_RELEASE);
1601 
1602       if (ctx->so->type == MESA_SHADER_TESS_CTRL) {
1603          /* Remove mode corresponding to nir_intrinsic_memory_barrier_tcs_patch,
1604           * because hull shaders dispatch 32 wide so an entire patch will
1605           * always fit in a single warp and execute in lock-step.
1606           *
1607           * TODO: memory barrier also tells us not to reorder stores, this
1608           * information is lost here (backend doesn't reorder stores so we
1609           * are safe for now).
1610           */
1611          modes &= ~nir_var_shader_out;
1612       }
1613 
1614       assert(!(modes & nir_var_shader_out));
1615 
1616       if ((modes &
1617            (nir_var_mem_shared | nir_var_mem_ssbo | nir_var_mem_global |
1618             nir_var_image)) && semantics) {
1619          barrier = ir3_FENCE(b);
1620          barrier->cat7.r = true;
1621          barrier->cat7.w = true;
1622 
1623          if (modes & (nir_var_mem_ssbo | nir_var_image | nir_var_mem_global)) {
1624             barrier->cat7.g = true;
1625          }
1626 
1627          if (ctx->compiler->gen >= 6) {
1628             if (modes & (nir_var_mem_ssbo | nir_var_image)) {
1629                barrier->cat7.l = true;
1630             }
1631          } else {
1632             if (modes & (nir_var_mem_shared | nir_var_mem_ssbo | nir_var_image)) {
1633                barrier->cat7.l = true;
1634             }
1635          }
1636 
1637          barrier->barrier_class = 0;
1638          barrier->barrier_conflict = 0;
1639 
1640          if (modes & nir_var_mem_shared) {
1641             barrier->barrier_class |= IR3_BARRIER_SHARED_W;
1642             barrier->barrier_conflict |=
1643                IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1644          }
1645 
1646          if (modes & (nir_var_mem_ssbo | nir_var_mem_global)) {
1647             barrier->barrier_class |= IR3_BARRIER_BUFFER_W;
1648             barrier->barrier_conflict |=
1649                IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1650          }
1651 
1652          if (modes & nir_var_image) {
1653             barrier->barrier_class |= IR3_BARRIER_IMAGE_W;
1654             barrier->barrier_conflict |=
1655                IR3_BARRIER_IMAGE_W | IR3_BARRIER_IMAGE_R;
1656          }
1657          array_insert(b, b->keeps, barrier);
1658       }
1659 
1660       if (exec_scope >= NIR_SCOPE_WORKGROUP) {
1661          emit_control_barrier(ctx);
1662       }
1663 
1664       return;
1665    }
1666    case nir_intrinsic_memory_barrier_tcs_patch:
1667       /* Not applicable, see explanation for scoped_barrier + shader_out */
1668       return;
1669    case nir_intrinsic_memory_barrier_buffer:
1670       barrier = ir3_FENCE(b);
1671       barrier->cat7.g = true;
1672       if (ctx->compiler->gen >= 6)
1673          barrier->cat7.l = true;
1674       barrier->cat7.r = true;
1675       barrier->cat7.w = true;
1676       barrier->barrier_class = IR3_BARRIER_BUFFER_W;
1677       barrier->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1678       break;
1679    case nir_intrinsic_memory_barrier_image:
1680       barrier = ir3_FENCE(b);
1681       barrier->cat7.g = true;
1682       barrier->cat7.l = true;
1683       barrier->cat7.r = true;
1684       barrier->cat7.w = true;
1685       barrier->barrier_class = IR3_BARRIER_IMAGE_W;
1686       barrier->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
1687       break;
1688    case nir_intrinsic_memory_barrier_shared:
1689       barrier = ir3_FENCE(b);
1690       if (ctx->compiler->gen < 6)
1691          barrier->cat7.l = true;
1692       barrier->cat7.r = true;
1693       barrier->cat7.w = true;
1694       barrier->barrier_class = IR3_BARRIER_SHARED_W;
1695       barrier->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
1696       break;
1697    case nir_intrinsic_memory_barrier:
1698    case nir_intrinsic_group_memory_barrier:
1699       barrier = ir3_FENCE(b);
1700       barrier->cat7.g = true;
1701       barrier->cat7.l = true;
1702       barrier->cat7.r = true;
1703       barrier->cat7.w = true;
1704       barrier->barrier_class =
1705          IR3_BARRIER_SHARED_W | IR3_BARRIER_IMAGE_W | IR3_BARRIER_BUFFER_W;
1706       barrier->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
1707                                   IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
1708                                   IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
1709       break;
1710    default:
1711       unreachable("boo");
1712    }
1713 
1714    /* make sure barrier doesn't get DCE'd */
1715    array_insert(b, b->keeps, barrier);
1716 }
1717 
1718 static void
add_sysval_input_compmask(struct ir3_context * ctx,gl_system_value slot,unsigned compmask,struct ir3_instruction * instr)1719 add_sysval_input_compmask(struct ir3_context *ctx, gl_system_value slot,
1720                           unsigned compmask, struct ir3_instruction *instr)
1721 {
1722    struct ir3_shader_variant *so = ctx->so;
1723    unsigned n = so->inputs_count++;
1724 
1725    assert(instr->opc == OPC_META_INPUT);
1726    instr->input.inidx = n;
1727    instr->input.sysval = slot;
1728 
1729    so->inputs[n].sysval = true;
1730    so->inputs[n].slot = slot;
1731    so->inputs[n].compmask = compmask;
1732    so->total_in++;
1733 
1734    so->sysval_in += util_last_bit(compmask);
1735 }
1736 
1737 static struct ir3_instruction *
create_sysval_input(struct ir3_context * ctx,gl_system_value slot,unsigned compmask)1738 create_sysval_input(struct ir3_context *ctx, gl_system_value slot,
1739                     unsigned compmask)
1740 {
1741    assert(compmask);
1742    struct ir3_instruction *sysval = create_input(ctx, compmask);
1743    add_sysval_input_compmask(ctx, slot, compmask, sysval);
1744    return sysval;
1745 }
1746 
1747 static struct ir3_instruction *
get_barycentric(struct ir3_context * ctx,enum ir3_bary bary)1748 get_barycentric(struct ir3_context *ctx, enum ir3_bary bary)
1749 {
1750    static const gl_system_value sysval_base =
1751       SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL;
1752 
1753    STATIC_ASSERT(sysval_base + IJ_PERSP_PIXEL ==
1754                  SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL);
1755    STATIC_ASSERT(sysval_base + IJ_PERSP_SAMPLE ==
1756                  SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE);
1757    STATIC_ASSERT(sysval_base + IJ_PERSP_CENTROID ==
1758                  SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID);
1759    STATIC_ASSERT(sysval_base + IJ_PERSP_SIZE ==
1760                  SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE);
1761    STATIC_ASSERT(sysval_base + IJ_LINEAR_PIXEL ==
1762                  SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL);
1763    STATIC_ASSERT(sysval_base + IJ_LINEAR_CENTROID ==
1764                  SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID);
1765    STATIC_ASSERT(sysval_base + IJ_LINEAR_SAMPLE ==
1766                  SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE);
1767 
1768    if (!ctx->ij[bary]) {
1769       struct ir3_instruction *xy[2];
1770       struct ir3_instruction *ij;
1771 
1772       ij = create_sysval_input(ctx, sysval_base + bary, 0x3);
1773       ir3_split_dest(ctx->in_block, xy, ij, 0, 2);
1774 
1775       ctx->ij[bary] = ir3_create_collect(ctx->in_block, xy, 2);
1776    }
1777 
1778    return ctx->ij[bary];
1779 }
1780 
1781 /* TODO: make this a common NIR helper?
1782  * there is a nir_system_value_from_intrinsic but it takes nir_intrinsic_op so
1783  * it can't be extended to work with this
1784  */
1785 static gl_system_value
nir_intrinsic_barycentric_sysval(nir_intrinsic_instr * intr)1786 nir_intrinsic_barycentric_sysval(nir_intrinsic_instr *intr)
1787 {
1788    enum glsl_interp_mode interp_mode = nir_intrinsic_interp_mode(intr);
1789    gl_system_value sysval;
1790 
1791    switch (intr->intrinsic) {
1792    case nir_intrinsic_load_barycentric_pixel:
1793       if (interp_mode == INTERP_MODE_NOPERSPECTIVE)
1794          sysval = SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL;
1795       else
1796          sysval = SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL;
1797       break;
1798    case nir_intrinsic_load_barycentric_centroid:
1799       if (interp_mode == INTERP_MODE_NOPERSPECTIVE)
1800          sysval = SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID;
1801       else
1802          sysval = SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID;
1803       break;
1804    case nir_intrinsic_load_barycentric_sample:
1805       if (interp_mode == INTERP_MODE_NOPERSPECTIVE)
1806          sysval = SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE;
1807       else
1808          sysval = SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE;
1809       break;
1810    default:
1811       unreachable("invalid barycentric intrinsic");
1812    }
1813 
1814    return sysval;
1815 }
1816 
1817 static void
emit_intrinsic_barycentric(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)1818 emit_intrinsic_barycentric(struct ir3_context *ctx, nir_intrinsic_instr *intr,
1819                            struct ir3_instruction **dst)
1820 {
1821    gl_system_value sysval = nir_intrinsic_barycentric_sysval(intr);
1822 
1823    if (!ctx->so->key.msaa) {
1824       switch (sysval) {
1825       case SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE:
1826          sysval = SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL;
1827          break;
1828       case SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID:
1829          if (ctx->compiler->gen < 6)
1830             sysval = SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL;
1831          break;
1832       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE:
1833          sysval = SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL;
1834          break;
1835       case SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID:
1836          if (ctx->compiler->gen < 6)
1837             sysval = SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL;
1838          break;
1839       default:
1840          break;
1841       }
1842    }
1843 
1844    enum ir3_bary bary = sysval - SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL;
1845 
1846    struct ir3_instruction *ij = get_barycentric(ctx, bary);
1847    ir3_split_dest(ctx->block, dst, ij, 0, 2);
1848 }
1849 
1850 static struct ir3_instruction *
get_frag_coord(struct ir3_context * ctx,nir_intrinsic_instr * intr)1851 get_frag_coord(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1852 {
1853    if (!ctx->frag_coord) {
1854       struct ir3_block *b = ir3_after_preamble(ctx->ir);
1855       struct ir3_instruction *xyzw[4];
1856       struct ir3_instruction *hw_frag_coord;
1857 
1858       hw_frag_coord = create_sysval_input(ctx, SYSTEM_VALUE_FRAG_COORD, 0xf);
1859       ir3_split_dest(b, xyzw, hw_frag_coord, 0, 4);
1860 
1861       /* for frag_coord.xy, we get unsigned values.. we need
1862        * to subtract (integer) 8 and divide by 16 (right-
1863        * shift by 4) then convert to float:
1864        *
1865        *    sub.s tmp, src, 8
1866        *    shr.b tmp, tmp, 4
1867        *    mov.u32f32 dst, tmp
1868        *
1869        */
1870       for (int i = 0; i < 2; i++) {
1871          xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32);
1872          xyzw[i] =
1873             ir3_MUL_F(b, xyzw[i], 0, create_immed(b, fui(1.0 / 16.0)), 0);
1874       }
1875 
1876       ctx->frag_coord = ir3_create_collect(b, xyzw, 4);
1877    }
1878 
1879    ctx->so->fragcoord_compmask |= nir_ssa_def_components_read(&intr->dest.ssa);
1880 
1881    return ctx->frag_coord;
1882 }
1883 
1884 /* This is a bit of a hack until ir3_context is converted to store SSA values
1885  * as ir3_register's instead of ir3_instruction's. Pick out a given destination
1886  * of an instruction with multiple destinations using a mov that will get folded
1887  * away by ir3_cp.
1888  */
1889 static struct ir3_instruction *
create_multidst_mov(struct ir3_block * block,struct ir3_register * dst)1890 create_multidst_mov(struct ir3_block *block, struct ir3_register *dst)
1891 {
1892    struct ir3_instruction *mov = ir3_instr_create(block, OPC_MOV, 1, 1);
1893    unsigned dst_flags = dst->flags & IR3_REG_HALF;
1894    unsigned src_flags = dst->flags & (IR3_REG_HALF | IR3_REG_SHARED);
1895 
1896    __ssa_dst(mov)->flags |= dst_flags;
1897    struct ir3_register *src =
1898       ir3_src_create(mov, INVALID_REG, IR3_REG_SSA | src_flags);
1899    src->wrmask = dst->wrmask;
1900    src->def = dst;
1901    debug_assert(!(dst->flags & IR3_REG_RELATIV));
1902    mov->cat1.src_type = mov->cat1.dst_type =
1903       (dst->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
1904    return mov;
1905 }
1906 
1907 static reduce_op_t
get_reduce_op(nir_op opc)1908 get_reduce_op(nir_op opc)
1909 {
1910    switch (opc) {
1911    case nir_op_iadd: return REDUCE_OP_ADD_U;
1912    case nir_op_fadd: return REDUCE_OP_ADD_F;
1913    case nir_op_imul: return REDUCE_OP_MUL_U;
1914    case nir_op_fmul: return REDUCE_OP_MUL_F;
1915    case nir_op_umin: return REDUCE_OP_MIN_U;
1916    case nir_op_imin: return REDUCE_OP_MIN_S;
1917    case nir_op_fmin: return REDUCE_OP_MIN_F;
1918    case nir_op_umax: return REDUCE_OP_MAX_U;
1919    case nir_op_imax: return REDUCE_OP_MAX_S;
1920    case nir_op_fmax: return REDUCE_OP_MAX_F;
1921    case nir_op_iand: return REDUCE_OP_AND_B;
1922    case nir_op_ior:  return REDUCE_OP_OR_B;
1923    case nir_op_ixor: return REDUCE_OP_XOR_B;
1924    default:
1925       unreachable("unknown NIR reduce op");
1926    }
1927 }
1928 
1929 static uint32_t
get_reduce_identity(nir_op opc,unsigned size)1930 get_reduce_identity(nir_op opc, unsigned size)
1931 {
1932    switch (opc) {
1933    case nir_op_iadd:
1934       return 0;
1935    case nir_op_fadd:
1936       return size == 32 ? fui(0.0f) : _mesa_float_to_half(0.0f);
1937    case nir_op_imul:
1938       return 1;
1939    case nir_op_fmul:
1940       return size == 32 ? fui(1.0f) : _mesa_float_to_half(1.0f);
1941    case nir_op_umax:
1942       return 0;
1943    case nir_op_imax:
1944       return size == 32 ? INT32_MIN : (uint32_t)INT16_MIN;
1945    case nir_op_fmax:
1946       return size == 32 ? fui(-INFINITY) : _mesa_float_to_half(-INFINITY);
1947    case nir_op_umin:
1948       return size == 32 ? UINT32_MAX : UINT16_MAX;
1949    case nir_op_imin:
1950       return size == 32 ? INT32_MAX : (uint32_t)INT16_MAX;
1951    case nir_op_fmin:
1952       return size == 32 ? fui(INFINITY) : _mesa_float_to_half(INFINITY);
1953    case nir_op_iand:
1954       return size == 32 ? ~0 : (size == 16 ? (uint32_t)(uint16_t)~0 : 1);
1955    case nir_op_ior:
1956       return 0;
1957    case nir_op_ixor:
1958       return 0;
1959    default:
1960       unreachable("unknown NIR reduce op");
1961    }
1962 }
1963 
1964 static struct ir3_instruction *
emit_intrinsic_reduce(struct ir3_context * ctx,nir_intrinsic_instr * intr)1965 emit_intrinsic_reduce(struct ir3_context *ctx, nir_intrinsic_instr *intr)
1966 {
1967    struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
1968    nir_op nir_reduce_op = (nir_op) nir_intrinsic_reduction_op(intr);
1969    reduce_op_t reduce_op = get_reduce_op(nir_reduce_op);
1970    unsigned dst_size = nir_dest_bit_size(intr->dest);
1971    unsigned flags = (ir3_bitsize(ctx, dst_size) == 16) ? IR3_REG_HALF : 0;
1972 
1973    /* Note: the shared reg is initialized to the identity, so we need it to
1974     * always be 32-bit even when the source isn't because half shared regs are
1975     * not supported.
1976     */
1977    struct ir3_instruction *identity =
1978       create_immed(ctx->block, get_reduce_identity(nir_reduce_op, dst_size));
1979    identity = ir3_READ_FIRST_MACRO(ctx->block, identity, 0);
1980    identity->dsts[0]->flags |= IR3_REG_SHARED;
1981 
1982    /* OPC_SCAN_MACRO has the following destinations:
1983     * - Exclusive scan result (interferes with source)
1984     * - Inclusive scan result
1985     * - Shared reg reduction result, must be initialized to the identity
1986     *
1987     * The loop computes all three results at the same time, we just have to
1988     * choose which destination to return.
1989     */
1990    struct ir3_instruction *scan =
1991       ir3_instr_create(ctx->block, OPC_SCAN_MACRO, 3, 2);
1992    scan->cat1.reduce_op = reduce_op;
1993 
1994    struct ir3_register *exclusive = __ssa_dst(scan);
1995    exclusive->flags |= flags | IR3_REG_EARLY_CLOBBER;
1996    struct ir3_register *inclusive = __ssa_dst(scan);
1997    inclusive->flags |= flags;
1998    struct ir3_register *reduce = __ssa_dst(scan);
1999    reduce->flags |= IR3_REG_SHARED;
2000 
2001    /* The 32-bit multiply macro reads its sources after writing a partial result
2002     * to the destination, therefore inclusive also interferes with the source.
2003     */
2004    if (reduce_op == REDUCE_OP_MUL_U && dst_size == 32)
2005       inclusive->flags |= IR3_REG_EARLY_CLOBBER;
2006 
2007    /* Normal source */
2008    __ssa_src(scan, src, 0);
2009 
2010    /* shared reg tied source */
2011    struct ir3_register *reduce_init = __ssa_src(scan, identity, IR3_REG_SHARED);
2012    ir3_reg_tie(reduce, reduce_init);
2013 
2014    struct ir3_register *dst;
2015    switch (intr->intrinsic) {
2016    case nir_intrinsic_reduce: dst = reduce; break;
2017    case nir_intrinsic_inclusive_scan: dst = inclusive; break;
2018    case nir_intrinsic_exclusive_scan: dst = exclusive; break;
2019    default:
2020       unreachable("unknown reduce intrinsic");
2021    }
2022 
2023    return create_multidst_mov(ctx->block, dst);
2024 }
2025 
2026 static void setup_input(struct ir3_context *ctx, nir_intrinsic_instr *intr);
2027 static void setup_output(struct ir3_context *ctx, nir_intrinsic_instr *intr);
2028 
2029 static void
emit_intrinsic(struct ir3_context * ctx,nir_intrinsic_instr * intr)2030 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
2031 {
2032    const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
2033    struct ir3_instruction **dst;
2034    struct ir3_instruction *const *src;
2035    struct ir3_block *b = ctx->block;
2036    unsigned dest_components = nir_intrinsic_dest_components(intr);
2037    int idx;
2038 
2039    if (info->has_dest) {
2040       dst = ir3_get_dst(ctx, &intr->dest, dest_components);
2041    } else {
2042       dst = NULL;
2043    }
2044 
2045    const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
2046    const unsigned primitive_param = const_state->offsets.primitive_param * 4;
2047    const unsigned primitive_map = const_state->offsets.primitive_map * 4;
2048 
2049    switch (intr->intrinsic) {
2050    case nir_intrinsic_load_uniform:
2051       idx = nir_intrinsic_base(intr);
2052       if (nir_src_is_const(intr->src[0])) {
2053          idx += nir_src_as_uint(intr->src[0]);
2054          for (int i = 0; i < dest_components; i++) {
2055             dst[i] = create_uniform_typed(
2056                b, idx + i,
2057                nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32);
2058          }
2059       } else {
2060          src = ir3_get_src(ctx, &intr->src[0]);
2061          for (int i = 0; i < dest_components; i++) {
2062             dst[i] = create_uniform_indirect(
2063                b, idx + i,
2064                nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32,
2065                ir3_get_addr0(ctx, src[0], 1));
2066          }
2067          /* NOTE: if relative addressing is used, we set
2068           * constlen in the compiler (to worst-case value)
2069           * since we don't know in the assembler what the max
2070           * addr reg value can be:
2071           */
2072          ctx->so->constlen =
2073             MAX2(ctx->so->constlen,
2074                  ctx->so->shader->num_reserved_user_consts +
2075                  const_state->ubo_state.size / 16);
2076       }
2077       break;
2078 
2079    case nir_intrinsic_load_vs_primitive_stride_ir3:
2080       dst[0] = create_uniform(b, primitive_param + 0);
2081       break;
2082    case nir_intrinsic_load_vs_vertex_stride_ir3:
2083       dst[0] = create_uniform(b, primitive_param + 1);
2084       break;
2085    case nir_intrinsic_load_hs_patch_stride_ir3:
2086       dst[0] = create_uniform(b, primitive_param + 2);
2087       break;
2088    case nir_intrinsic_load_patch_vertices_in:
2089       dst[0] = create_uniform(b, primitive_param + 3);
2090       break;
2091    case nir_intrinsic_load_tess_param_base_ir3:
2092       dst[0] = create_uniform(b, primitive_param + 4);
2093       dst[1] = create_uniform(b, primitive_param + 5);
2094       break;
2095    case nir_intrinsic_load_tess_factor_base_ir3:
2096       dst[0] = create_uniform(b, primitive_param + 6);
2097       dst[1] = create_uniform(b, primitive_param + 7);
2098       break;
2099 
2100    case nir_intrinsic_load_primitive_location_ir3:
2101       idx = nir_intrinsic_driver_location(intr);
2102       dst[0] = create_uniform(b, primitive_map + idx);
2103       break;
2104 
2105    case nir_intrinsic_load_gs_header_ir3:
2106       dst[0] = ctx->gs_header;
2107       break;
2108    case nir_intrinsic_load_tcs_header_ir3:
2109       dst[0] = ctx->tcs_header;
2110       break;
2111 
2112    case nir_intrinsic_load_rel_patch_id_ir3:
2113       dst[0] = ctx->rel_patch_id;
2114       break;
2115 
2116    case nir_intrinsic_load_primitive_id:
2117       if (!ctx->primitive_id) {
2118          ctx->primitive_id =
2119             create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
2120       }
2121       dst[0] = ctx->primitive_id;
2122       break;
2123 
2124    case nir_intrinsic_load_tess_coord:
2125       if (!ctx->tess_coord) {
2126          ctx->tess_coord =
2127             create_sysval_input(ctx, SYSTEM_VALUE_TESS_COORD, 0x3);
2128       }
2129       ir3_split_dest(b, dst, ctx->tess_coord, 0, 2);
2130 
2131       /* Unused, but ir3_put_dst() below wants to free something */
2132       dst[2] = create_immed(b, 0);
2133       break;
2134 
2135    case nir_intrinsic_end_patch_ir3:
2136       assert(ctx->so->type == MESA_SHADER_TESS_CTRL);
2137       struct ir3_instruction *end = ir3_PREDE(b);
2138       array_insert(b, b->keeps, end);
2139 
2140       end->barrier_class = IR3_BARRIER_EVERYTHING;
2141       end->barrier_conflict = IR3_BARRIER_EVERYTHING;
2142       break;
2143 
2144    case nir_intrinsic_store_global_ir3:
2145       ctx->funcs->emit_intrinsic_store_global_ir3(ctx, intr);
2146       break;
2147    case nir_intrinsic_load_global_ir3:
2148       ctx->funcs->emit_intrinsic_load_global_ir3(ctx, intr, dst);
2149       break;
2150 
2151    case nir_intrinsic_load_ubo:
2152       emit_intrinsic_load_ubo(ctx, intr, dst);
2153       break;
2154    case nir_intrinsic_load_ubo_vec4:
2155       emit_intrinsic_load_ubo_ldc(ctx, intr, dst);
2156       break;
2157    case nir_intrinsic_copy_ubo_to_uniform_ir3:
2158       emit_intrinsic_copy_ubo_to_uniform(ctx, intr);
2159       break;
2160    case nir_intrinsic_load_frag_coord:
2161       ir3_split_dest(b, dst, get_frag_coord(ctx, intr), 0, 4);
2162       break;
2163    case nir_intrinsic_load_sample_pos_from_id: {
2164       /* NOTE: blob seems to always use TYPE_F16 and then cov.f16f32,
2165        * but that doesn't seem necessary.
2166        */
2167       struct ir3_instruction *offset =
2168          ir3_RGETPOS(b, ir3_get_src(ctx, &intr->src[0])[0], 0);
2169       offset->dsts[0]->wrmask = 0x3;
2170       offset->cat5.type = TYPE_F32;
2171 
2172       ir3_split_dest(b, dst, offset, 0, 2);
2173 
2174       break;
2175    }
2176    case nir_intrinsic_load_size_ir3:
2177       if (!ctx->ij[IJ_PERSP_SIZE]) {
2178          ctx->ij[IJ_PERSP_SIZE] =
2179             create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE, 0x1);
2180       }
2181       dst[0] = ctx->ij[IJ_PERSP_SIZE];
2182       break;
2183    case nir_intrinsic_load_barycentric_centroid:
2184    case nir_intrinsic_load_barycentric_sample:
2185    case nir_intrinsic_load_barycentric_pixel:
2186       emit_intrinsic_barycentric(ctx, intr, dst);
2187       break;
2188    case nir_intrinsic_load_interpolated_input:
2189    case nir_intrinsic_load_input:
2190       setup_input(ctx, intr);
2191       break;
2192    case nir_intrinsic_load_kernel_input:
2193       emit_intrinsic_load_kernel_input(ctx, intr, dst);
2194       break;
2195    /* All SSBO intrinsics should have been lowered by 'lower_io_offsets'
2196     * pass and replaced by an ir3-specifc version that adds the
2197     * dword-offset in the last source.
2198     */
2199    case nir_intrinsic_load_ssbo_ir3:
2200       emit_intrinsic_load_ssbo(ctx, intr, dst);
2201       break;
2202    case nir_intrinsic_store_ssbo_ir3:
2203       if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
2204           !ctx->s->info.fs.early_fragment_tests)
2205          ctx->so->no_earlyz = true;
2206       ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr);
2207       break;
2208    case nir_intrinsic_get_ssbo_size:
2209       emit_intrinsic_ssbo_size(ctx, intr, dst);
2210       break;
2211    case nir_intrinsic_ssbo_atomic_add_ir3:
2212    case nir_intrinsic_ssbo_atomic_imin_ir3:
2213    case nir_intrinsic_ssbo_atomic_umin_ir3:
2214    case nir_intrinsic_ssbo_atomic_imax_ir3:
2215    case nir_intrinsic_ssbo_atomic_umax_ir3:
2216    case nir_intrinsic_ssbo_atomic_and_ir3:
2217    case nir_intrinsic_ssbo_atomic_or_ir3:
2218    case nir_intrinsic_ssbo_atomic_xor_ir3:
2219    case nir_intrinsic_ssbo_atomic_exchange_ir3:
2220    case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
2221       if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
2222           !ctx->s->info.fs.early_fragment_tests)
2223          ctx->so->no_earlyz = true;
2224       dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr);
2225       break;
2226    case nir_intrinsic_load_shared:
2227       emit_intrinsic_load_shared(ctx, intr, dst);
2228       break;
2229    case nir_intrinsic_store_shared:
2230       emit_intrinsic_store_shared(ctx, intr);
2231       break;
2232    case nir_intrinsic_shared_atomic_add:
2233    case nir_intrinsic_shared_atomic_imin:
2234    case nir_intrinsic_shared_atomic_umin:
2235    case nir_intrinsic_shared_atomic_imax:
2236    case nir_intrinsic_shared_atomic_umax:
2237    case nir_intrinsic_shared_atomic_and:
2238    case nir_intrinsic_shared_atomic_or:
2239    case nir_intrinsic_shared_atomic_xor:
2240    case nir_intrinsic_shared_atomic_exchange:
2241    case nir_intrinsic_shared_atomic_comp_swap:
2242       dst[0] = emit_intrinsic_atomic_shared(ctx, intr);
2243       break;
2244    case nir_intrinsic_load_scratch:
2245       emit_intrinsic_load_scratch(ctx, intr, dst);
2246       break;
2247    case nir_intrinsic_store_scratch:
2248       emit_intrinsic_store_scratch(ctx, intr);
2249       break;
2250    case nir_intrinsic_image_load:
2251    case nir_intrinsic_bindless_image_load:
2252       emit_intrinsic_load_image(ctx, intr, dst);
2253       break;
2254    case nir_intrinsic_image_store:
2255    case nir_intrinsic_bindless_image_store:
2256       if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
2257           !ctx->s->info.fs.early_fragment_tests)
2258          ctx->so->no_earlyz = true;
2259       ctx->funcs->emit_intrinsic_store_image(ctx, intr);
2260       break;
2261    case nir_intrinsic_image_size:
2262    case nir_intrinsic_bindless_image_size:
2263       ctx->funcs->emit_intrinsic_image_size(ctx, intr, dst);
2264       break;
2265    case nir_intrinsic_image_atomic_add:
2266    case nir_intrinsic_bindless_image_atomic_add:
2267    case nir_intrinsic_image_atomic_imin:
2268    case nir_intrinsic_bindless_image_atomic_imin:
2269    case nir_intrinsic_image_atomic_umin:
2270    case nir_intrinsic_bindless_image_atomic_umin:
2271    case nir_intrinsic_image_atomic_imax:
2272    case nir_intrinsic_bindless_image_atomic_imax:
2273    case nir_intrinsic_image_atomic_umax:
2274    case nir_intrinsic_bindless_image_atomic_umax:
2275    case nir_intrinsic_image_atomic_and:
2276    case nir_intrinsic_bindless_image_atomic_and:
2277    case nir_intrinsic_image_atomic_or:
2278    case nir_intrinsic_bindless_image_atomic_or:
2279    case nir_intrinsic_image_atomic_xor:
2280    case nir_intrinsic_bindless_image_atomic_xor:
2281    case nir_intrinsic_image_atomic_exchange:
2282    case nir_intrinsic_bindless_image_atomic_exchange:
2283    case nir_intrinsic_image_atomic_comp_swap:
2284    case nir_intrinsic_bindless_image_atomic_comp_swap:
2285       if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
2286           !ctx->s->info.fs.early_fragment_tests)
2287          ctx->so->no_earlyz = true;
2288       dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
2289       break;
2290    case nir_intrinsic_scoped_barrier:
2291    case nir_intrinsic_control_barrier:
2292    case nir_intrinsic_memory_barrier:
2293    case nir_intrinsic_group_memory_barrier:
2294    case nir_intrinsic_memory_barrier_buffer:
2295    case nir_intrinsic_memory_barrier_image:
2296    case nir_intrinsic_memory_barrier_shared:
2297    case nir_intrinsic_memory_barrier_tcs_patch:
2298       emit_intrinsic_barrier(ctx, intr);
2299       /* note that blk ptr no longer valid, make that obvious: */
2300       b = NULL;
2301       break;
2302    case nir_intrinsic_store_output:
2303       setup_output(ctx, intr);
2304       break;
2305    case nir_intrinsic_load_base_vertex:
2306    case nir_intrinsic_load_first_vertex:
2307       if (!ctx->basevertex) {
2308          ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
2309       }
2310       dst[0] = ctx->basevertex;
2311       break;
2312    case nir_intrinsic_load_draw_id:
2313       if (!ctx->draw_id) {
2314          ctx->draw_id = create_driver_param(ctx, IR3_DP_DRAWID);
2315       }
2316       dst[0] = ctx->draw_id;
2317       break;
2318    case nir_intrinsic_load_base_instance:
2319       if (!ctx->base_instance) {
2320          ctx->base_instance = create_driver_param(ctx, IR3_DP_INSTID_BASE);
2321       }
2322       dst[0] = ctx->base_instance;
2323       break;
2324    case nir_intrinsic_load_view_index:
2325       if (!ctx->view_index) {
2326          ctx->view_index =
2327             create_sysval_input(ctx, SYSTEM_VALUE_VIEW_INDEX, 0x1);
2328       }
2329       dst[0] = ctx->view_index;
2330       break;
2331    case nir_intrinsic_load_vertex_id_zero_base:
2332    case nir_intrinsic_load_vertex_id:
2333       if (!ctx->vertex_id) {
2334          gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id)
2335                                  ? SYSTEM_VALUE_VERTEX_ID
2336                                  : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
2337          ctx->vertex_id = create_sysval_input(ctx, sv, 0x1);
2338       }
2339       dst[0] = ctx->vertex_id;
2340       break;
2341    case nir_intrinsic_load_instance_id:
2342       if (!ctx->instance_id) {
2343          ctx->instance_id =
2344             create_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID, 0x1);
2345       }
2346       dst[0] = ctx->instance_id;
2347       break;
2348    case nir_intrinsic_load_sample_id:
2349       ctx->so->per_samp = true;
2350       FALLTHROUGH;
2351    case nir_intrinsic_load_sample_id_no_per_sample:
2352       if (!ctx->samp_id) {
2353          ctx->samp_id = create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID, 0x1);
2354          ctx->samp_id->dsts[0]->flags |= IR3_REG_HALF;
2355       }
2356       dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
2357       break;
2358    case nir_intrinsic_load_sample_mask_in:
2359       if (!ctx->samp_mask_in) {
2360          ctx->samp_mask_in =
2361             create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN, 0x1);
2362       }
2363       dst[0] = ctx->samp_mask_in;
2364       break;
2365    case nir_intrinsic_load_user_clip_plane:
2366       idx = nir_intrinsic_ucp_id(intr);
2367       for (int i = 0; i < dest_components; i++) {
2368          unsigned n = idx * 4 + i;
2369          dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
2370       }
2371       break;
2372    case nir_intrinsic_load_front_face:
2373       if (!ctx->frag_face) {
2374          ctx->so->frag_face = true;
2375          ctx->frag_face =
2376             create_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, 0x1);
2377          ctx->frag_face->dsts[0]->flags |= IR3_REG_HALF;
2378       }
2379       /* for fragface, we get -1 for back and 0 for front. However this is
2380        * the inverse of what nir expects (where ~0 is true).
2381        */
2382       dst[0] = ir3_CMPS_S(b, ctx->frag_face, 0,
2383                           create_immed_typed(b, 0, TYPE_U16), 0);
2384       dst[0]->cat2.condition = IR3_COND_EQ;
2385       break;
2386    case nir_intrinsic_load_local_invocation_id:
2387       if (!ctx->local_invocation_id) {
2388          ctx->local_invocation_id =
2389             create_sysval_input(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID, 0x7);
2390       }
2391       ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3);
2392       break;
2393    case nir_intrinsic_load_workgroup_id:
2394    case nir_intrinsic_load_workgroup_id_zero_base:
2395       if (ctx->compiler->has_shared_regfile) {
2396          if (!ctx->work_group_id) {
2397             ctx->work_group_id =
2398                create_sysval_input(ctx, SYSTEM_VALUE_WORKGROUP_ID, 0x7);
2399             ctx->work_group_id->dsts[0]->flags |= IR3_REG_SHARED;
2400          }
2401          ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
2402       } else {
2403          /* For a3xx/a4xx, this comes in via const injection by the hw */
2404          for (int i = 0; i < dest_components; i++) {
2405             dst[i] = create_driver_param(ctx, IR3_DP_WORKGROUP_ID_X + i);
2406          }
2407       }
2408       break;
2409    case nir_intrinsic_load_base_workgroup_id:
2410       for (int i = 0; i < dest_components; i++) {
2411          dst[i] = create_driver_param(ctx, IR3_DP_BASE_GROUP_X + i);
2412       }
2413       break;
2414    case nir_intrinsic_load_num_workgroups:
2415       for (int i = 0; i < dest_components; i++) {
2416          dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
2417       }
2418       break;
2419    case nir_intrinsic_load_workgroup_size:
2420       for (int i = 0; i < dest_components; i++) {
2421          dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
2422       }
2423       break;
2424    case nir_intrinsic_load_subgroup_size: {
2425       assert(ctx->so->type == MESA_SHADER_COMPUTE ||
2426              ctx->so->type == MESA_SHADER_FRAGMENT);
2427       enum ir3_driver_param size = ctx->so->type == MESA_SHADER_COMPUTE ?
2428          IR3_DP_CS_SUBGROUP_SIZE : IR3_DP_FS_SUBGROUP_SIZE;
2429       dst[0] = create_driver_param(ctx, size);
2430       break;
2431    }
2432    case nir_intrinsic_load_subgroup_id_shift_ir3:
2433       dst[0] = create_driver_param(ctx, IR3_DP_SUBGROUP_ID_SHIFT);
2434       break;
2435    case nir_intrinsic_load_work_dim:
2436       dst[0] = create_driver_param(ctx, IR3_DP_WORK_DIM);
2437       break;
2438    case nir_intrinsic_load_subgroup_invocation:
2439       assert(ctx->compiler->has_getfiberid);
2440       dst[0] = ir3_GETFIBERID(b);
2441       dst[0]->cat6.type = TYPE_U32;
2442       __ssa_dst(dst[0]);
2443       break;
2444    case nir_intrinsic_discard_if:
2445    case nir_intrinsic_discard:
2446    case nir_intrinsic_demote:
2447    case nir_intrinsic_demote_if:
2448    case nir_intrinsic_terminate:
2449    case nir_intrinsic_terminate_if: {
2450       struct ir3_instruction *cond, *kill;
2451 
2452       if (intr->intrinsic == nir_intrinsic_discard_if ||
2453           intr->intrinsic == nir_intrinsic_demote_if ||
2454           intr->intrinsic == nir_intrinsic_terminate_if) {
2455          /* conditional discard: */
2456          src = ir3_get_src(ctx, &intr->src[0]);
2457          cond = src[0];
2458       } else {
2459          /* unconditional discard: */
2460          cond = create_immed_typed(b, 1, ctx->compiler->bool_type);
2461       }
2462 
2463       /* NOTE: only cmps.*.* can write p0.x: */
2464       struct ir3_instruction *zero =
2465             create_immed_typed(b, 0, is_half(cond) ? TYPE_U16 : TYPE_U32);
2466       cond = ir3_CMPS_S(b, cond, 0, zero, 0);
2467       cond->cat2.condition = IR3_COND_NE;
2468 
2469       /* condition always goes in predicate register: */
2470       cond->dsts[0]->num = regid(REG_P0, 0);
2471       cond->dsts[0]->flags &= ~IR3_REG_SSA;
2472 
2473       if (intr->intrinsic == nir_intrinsic_demote ||
2474           intr->intrinsic == nir_intrinsic_demote_if) {
2475          kill = ir3_DEMOTE(b, cond, 0);
2476       } else {
2477          kill = ir3_KILL(b, cond, 0);
2478       }
2479 
2480       /* Side-effects should not be moved on a different side of the kill */
2481       kill->barrier_class = IR3_BARRIER_IMAGE_W | IR3_BARRIER_BUFFER_W;
2482       kill->barrier_conflict = IR3_BARRIER_IMAGE_W | IR3_BARRIER_BUFFER_W;
2483       kill->srcs[0]->num = regid(REG_P0, 0);
2484       array_insert(ctx->ir, ctx->ir->predicates, kill);
2485 
2486       array_insert(b, b->keeps, kill);
2487       ctx->so->has_kill = true;
2488 
2489       break;
2490    }
2491 
2492    case nir_intrinsic_cond_end_ir3: {
2493       struct ir3_instruction *cond, *kill;
2494 
2495       src = ir3_get_src(ctx, &intr->src[0]);
2496       cond = src[0];
2497 
2498       /* NOTE: only cmps.*.* can write p0.x: */
2499       struct ir3_instruction *zero =
2500             create_immed_typed(b, 0, is_half(cond) ? TYPE_U16 : TYPE_U32);
2501       cond = ir3_CMPS_S(b, cond, 0, zero, 0);
2502       cond->cat2.condition = IR3_COND_NE;
2503 
2504       /* condition always goes in predicate register: */
2505       cond->dsts[0]->num = regid(REG_P0, 0);
2506 
2507       kill = ir3_PREDT(b, cond, 0);
2508 
2509       kill->barrier_class = IR3_BARRIER_EVERYTHING;
2510       kill->barrier_conflict = IR3_BARRIER_EVERYTHING;
2511 
2512       array_insert(ctx->ir, ctx->ir->predicates, kill);
2513       array_insert(b, b->keeps, kill);
2514       break;
2515    }
2516 
2517    case nir_intrinsic_vote_any:
2518    case nir_intrinsic_vote_all: {
2519       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2520       struct ir3_instruction *pred = ir3_get_predicate(ctx, src);
2521       if (intr->intrinsic == nir_intrinsic_vote_any)
2522          dst[0] = ir3_ANY_MACRO(ctx->block, pred, 0);
2523       else
2524          dst[0] = ir3_ALL_MACRO(ctx->block, pred, 0);
2525       dst[0]->srcs[0]->num = regid(REG_P0, 0);
2526       array_insert(ctx->ir, ctx->ir->predicates, dst[0]);
2527       break;
2528    }
2529    case nir_intrinsic_elect:
2530       dst[0] = ir3_ELECT_MACRO(ctx->block);
2531       /* This may expand to a divergent if/then, so allocate stack space for
2532        * it.
2533        */
2534       ctx->max_stack = MAX2(ctx->max_stack, ctx->stack + 1);
2535       break;
2536    case nir_intrinsic_preamble_start_ir3:
2537       dst[0] = ir3_SHPS_MACRO(ctx->block);
2538       ctx->max_stack = MAX2(ctx->max_stack, ctx->stack + 1);
2539       break;
2540 
2541    case nir_intrinsic_read_invocation_cond_ir3: {
2542       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2543       struct ir3_instruction *cond = ir3_get_src(ctx, &intr->src[1])[0];
2544       dst[0] = ir3_READ_COND_MACRO(ctx->block, ir3_get_predicate(ctx, cond), 0,
2545                                    src, 0);
2546       dst[0]->dsts[0]->flags |= IR3_REG_SHARED;
2547       dst[0]->srcs[0]->num = regid(REG_P0, 0);
2548       array_insert(ctx->ir, ctx->ir->predicates, dst[0]);
2549       ctx->max_stack = MAX2(ctx->max_stack, ctx->stack + 1);
2550       break;
2551    }
2552 
2553    case nir_intrinsic_read_first_invocation: {
2554       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2555       dst[0] = ir3_READ_FIRST_MACRO(ctx->block, src, 0);
2556       dst[0]->dsts[0]->flags |= IR3_REG_SHARED;
2557       ctx->max_stack = MAX2(ctx->max_stack, ctx->stack + 1);
2558       break;
2559    }
2560 
2561    case nir_intrinsic_ballot: {
2562       struct ir3_instruction *ballot;
2563       unsigned components = intr->dest.ssa.num_components;
2564       if (nir_src_is_const(intr->src[0]) && nir_src_as_bool(intr->src[0])) {
2565          /* ballot(true) is just MOVMSK */
2566          ballot = ir3_MOVMSK(ctx->block, components);
2567       } else {
2568          struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2569          struct ir3_instruction *pred = ir3_get_predicate(ctx, src);
2570          ballot = ir3_BALLOT_MACRO(ctx->block, pred, components);
2571          ballot->srcs[0]->num = regid(REG_P0, 0);
2572          array_insert(ctx->ir, ctx->ir->predicates, ballot);
2573          ctx->max_stack = MAX2(ctx->max_stack, ctx->stack + 1);
2574       }
2575       ir3_split_dest(ctx->block, dst, ballot, 0, components);
2576       break;
2577    }
2578 
2579    case nir_intrinsic_quad_broadcast: {
2580       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2581       struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[1])[0];
2582 
2583       type_t dst_type = type_uint_size(nir_dest_bit_size(intr->dest));
2584 
2585       if (dst_type != TYPE_U32)
2586          idx = ir3_COV(ctx->block, idx, TYPE_U32, dst_type);
2587 
2588       dst[0] = ir3_QUAD_SHUFFLE_BRCST(ctx->block, src, 0, idx, 0);
2589       dst[0]->cat5.type = dst_type;
2590       break;
2591    }
2592 
2593    case nir_intrinsic_quad_swap_horizontal: {
2594       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2595       dst[0] = ir3_QUAD_SHUFFLE_HORIZ(ctx->block, src, 0);
2596       dst[0]->cat5.type = type_uint_size(nir_dest_bit_size(intr->dest));
2597       break;
2598    }
2599 
2600    case nir_intrinsic_quad_swap_vertical: {
2601       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2602       dst[0] = ir3_QUAD_SHUFFLE_VERT(ctx->block, src, 0);
2603       dst[0]->cat5.type = type_uint_size(nir_dest_bit_size(intr->dest));
2604       break;
2605    }
2606 
2607    case nir_intrinsic_quad_swap_diagonal: {
2608       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
2609       dst[0] = ir3_QUAD_SHUFFLE_DIAG(ctx->block, src, 0);
2610       dst[0]->cat5.type = type_uint_size(nir_dest_bit_size(intr->dest));
2611       break;
2612    }
2613 
2614    case nir_intrinsic_load_shared_ir3:
2615       emit_intrinsic_load_shared_ir3(ctx, intr, dst);
2616       break;
2617    case nir_intrinsic_store_shared_ir3:
2618       emit_intrinsic_store_shared_ir3(ctx, intr);
2619       break;
2620    case nir_intrinsic_bindless_resource_ir3:
2621       dst[0] = ir3_get_src(ctx, &intr->src[0])[0];
2622       break;
2623    case nir_intrinsic_global_atomic_add_ir3:
2624    case nir_intrinsic_global_atomic_imin_ir3:
2625    case nir_intrinsic_global_atomic_umin_ir3:
2626    case nir_intrinsic_global_atomic_imax_ir3:
2627    case nir_intrinsic_global_atomic_umax_ir3:
2628    case nir_intrinsic_global_atomic_and_ir3:
2629    case nir_intrinsic_global_atomic_or_ir3:
2630    case nir_intrinsic_global_atomic_xor_ir3:
2631    case nir_intrinsic_global_atomic_exchange_ir3:
2632    case nir_intrinsic_global_atomic_comp_swap_ir3: {
2633       dst[0] = ctx->funcs->emit_intrinsic_atomic_global(ctx, intr);
2634       break;
2635    }
2636 
2637    case nir_intrinsic_reduce:
2638    case nir_intrinsic_inclusive_scan:
2639    case nir_intrinsic_exclusive_scan:
2640       dst[0] = emit_intrinsic_reduce(ctx, intr);
2641       break;
2642 
2643    case nir_intrinsic_preamble_end_ir3: {
2644       struct ir3_instruction *instr = ir3_SHPE(ctx->block);
2645       instr->barrier_class = instr->barrier_conflict = IR3_BARRIER_CONST_W;
2646       array_insert(b, b->keeps, instr);
2647       break;
2648    }
2649    case nir_intrinsic_store_uniform_ir3: {
2650       unsigned components = nir_src_num_components(intr->src[0]);
2651       unsigned dst = nir_intrinsic_base(intr);
2652       unsigned dst_lo = dst & 0xff;
2653       unsigned dst_hi = dst >> 8;
2654 
2655       struct ir3_instruction *src =
2656          ir3_create_collect(b, ir3_get_src(ctx, &intr->src[0]), components);
2657       struct ir3_instruction *a1 = NULL;
2658       if (dst_hi) {
2659          /* Encode only the high part of the destination in a1.x to increase the
2660           * chance that we can reuse the a1.x value in subsequent stc
2661           * instructions.
2662           */
2663          a1 = ir3_get_addr1(ctx, dst_hi << 8);
2664       }
2665 
2666       struct ir3_instruction *stc =
2667          ir3_STC(ctx->block, create_immed(b, dst_lo),  0, src, 0);
2668       stc->cat6.iim_val = components;
2669       stc->cat6.type = TYPE_U32;
2670       stc->barrier_conflict = IR3_BARRIER_CONST_W;
2671       if (a1) {
2672          ir3_instr_set_address(stc, a1);
2673          stc->flags |= IR3_INSTR_A1EN;
2674       }
2675       array_insert(b, b->keeps, stc);
2676       break;
2677    }
2678    default:
2679       ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
2680                         nir_intrinsic_infos[intr->intrinsic].name);
2681       break;
2682    }
2683 
2684    if (info->has_dest)
2685       ir3_put_dst(ctx, &intr->dest);
2686 }
2687 
2688 static void
emit_load_const(struct ir3_context * ctx,nir_load_const_instr * instr)2689 emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
2690 {
2691    struct ir3_instruction **dst =
2692       ir3_get_dst_ssa(ctx, &instr->def, instr->def.num_components);
2693    unsigned bit_size = ir3_bitsize(ctx, instr->def.bit_size);
2694 
2695    if (bit_size <= 8) {
2696       for (int i = 0; i < instr->def.num_components; i++)
2697          dst[i] = create_immed_typed(ctx->block, instr->value[i].u8, TYPE_U8);
2698    } else if (bit_size <= 16) {
2699       for (int i = 0; i < instr->def.num_components; i++)
2700          dst[i] = create_immed_typed(ctx->block, instr->value[i].u16, TYPE_U16);
2701    } else {
2702       for (int i = 0; i < instr->def.num_components; i++)
2703          dst[i] = create_immed_typed(ctx->block, instr->value[i].u32, TYPE_U32);
2704    }
2705 }
2706 
2707 static void
emit_undef(struct ir3_context * ctx,nir_ssa_undef_instr * undef)2708 emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
2709 {
2710    struct ir3_instruction **dst =
2711       ir3_get_dst_ssa(ctx, &undef->def, undef->def.num_components);
2712    type_t type = utype_for_size(ir3_bitsize(ctx, undef->def.bit_size));
2713 
2714    /* backend doesn't want undefined instructions, so just plug
2715     * in 0.0..
2716     */
2717    for (int i = 0; i < undef->def.num_components; i++)
2718       dst[i] = create_immed_typed(ctx->block, fui(0.0), type);
2719 }
2720 
2721 /*
2722  * texture fetch/sample instructions:
2723  */
2724 
2725 static type_t
get_tex_dest_type(nir_tex_instr * tex)2726 get_tex_dest_type(nir_tex_instr *tex)
2727 {
2728    type_t type;
2729 
2730    switch (tex->dest_type) {
2731    case nir_type_float32:
2732       return TYPE_F32;
2733    case nir_type_float16:
2734       return TYPE_F16;
2735    case nir_type_int32:
2736       return TYPE_S32;
2737    case nir_type_int16:
2738       return TYPE_S16;
2739    case nir_type_bool32:
2740    case nir_type_uint32:
2741       return TYPE_U32;
2742    case nir_type_bool16:
2743    case nir_type_uint16:
2744       return TYPE_U16;
2745    case nir_type_invalid:
2746    default:
2747       unreachable("bad dest_type");
2748    }
2749 
2750    return type;
2751 }
2752 
2753 static void
tex_info(nir_tex_instr * tex,unsigned * flagsp,unsigned * coordsp)2754 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
2755 {
2756    unsigned coords =
2757       glsl_get_sampler_dim_coordinate_components(tex->sampler_dim);
2758    unsigned flags = 0;
2759 
2760    /* note: would use tex->coord_components.. except txs.. also,
2761     * since array index goes after shadow ref, we don't want to
2762     * count it:
2763     */
2764    if (coords == 3)
2765       flags |= IR3_INSTR_3D;
2766 
2767    if (tex->is_shadow && tex->op != nir_texop_lod)
2768       flags |= IR3_INSTR_S;
2769 
2770    if (tex->is_array && tex->op != nir_texop_lod)
2771       flags |= IR3_INSTR_A;
2772 
2773    *flagsp = flags;
2774    *coordsp = coords;
2775 }
2776 
2777 /* Gets the sampler/texture idx as a hvec2.  Which could either be dynamic
2778  * or immediate (in which case it will get lowered later to a non .s2en
2779  * version of the tex instruction which encode tex/samp as immediates:
2780  */
2781 static struct tex_src_info
get_tex_samp_tex_src(struct ir3_context * ctx,nir_tex_instr * tex)2782 get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex)
2783 {
2784    struct ir3_block *b = ctx->block;
2785    struct tex_src_info info = {0};
2786    int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
2787    int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle);
2788    struct ir3_instruction *texture, *sampler;
2789 
2790    if (texture_idx >= 0 || sampler_idx >= 0) {
2791       /* Bindless case */
2792       info.flags |= IR3_INSTR_B;
2793 
2794       if (tex->texture_non_uniform || tex->sampler_non_uniform)
2795          info.flags |= IR3_INSTR_NONUNIF;
2796 
2797       /* Gather information required to determine which encoding to
2798        * choose as well as for prefetch.
2799        */
2800       nir_intrinsic_instr *bindless_tex = NULL;
2801       bool tex_const;
2802       if (texture_idx >= 0) {
2803          ctx->so->bindless_tex = true;
2804          bindless_tex = ir3_bindless_resource(tex->src[texture_idx].src);
2805          assert(bindless_tex);
2806          info.tex_base = nir_intrinsic_desc_set(bindless_tex);
2807          tex_const = nir_src_is_const(bindless_tex->src[0]);
2808          if (tex_const)
2809             info.tex_idx = nir_src_as_uint(bindless_tex->src[0]);
2810       } else {
2811          /* To simplify some of the logic below, assume the index is
2812           * constant 0 when it's not enabled.
2813           */
2814          tex_const = true;
2815          info.tex_idx = 0;
2816       }
2817       nir_intrinsic_instr *bindless_samp = NULL;
2818       bool samp_const;
2819       if (sampler_idx >= 0) {
2820          ctx->so->bindless_samp = true;
2821          bindless_samp = ir3_bindless_resource(tex->src[sampler_idx].src);
2822          assert(bindless_samp);
2823          info.samp_base = nir_intrinsic_desc_set(bindless_samp);
2824          samp_const = nir_src_is_const(bindless_samp->src[0]);
2825          if (samp_const)
2826             info.samp_idx = nir_src_as_uint(bindless_samp->src[0]);
2827       } else {
2828          samp_const = true;
2829          info.samp_idx = 0;
2830       }
2831 
2832       /* Choose encoding. */
2833       if (tex_const && samp_const && info.tex_idx < 256 &&
2834           info.samp_idx < 256) {
2835          if (info.tex_idx < 16 && info.samp_idx < 16 &&
2836              (!bindless_tex || !bindless_samp ||
2837               info.tex_base == info.samp_base)) {
2838             /* Everything fits within the instruction */
2839             info.base = info.tex_base;
2840          } else {
2841             info.base = info.tex_base;
2842             info.a1_val = info.tex_idx << 3 | info.samp_base;
2843             info.flags |= IR3_INSTR_A1EN;
2844          }
2845          info.samp_tex = NULL;
2846       } else {
2847          info.flags |= IR3_INSTR_S2EN;
2848          /* In the indirect case, we only use a1.x to store the sampler
2849           * base if it differs from the texture base.
2850           */
2851          if (!bindless_tex || !bindless_samp ||
2852              info.tex_base == info.samp_base) {
2853             info.base = info.tex_base;
2854          } else {
2855             info.base = info.tex_base;
2856             info.a1_val = info.samp_base;
2857             info.flags |= IR3_INSTR_A1EN;
2858          }
2859 
2860          /* Note: the indirect source is now a vec2 instead of hvec2, and
2861           * for some reason the texture and sampler are swapped.
2862           */
2863          struct ir3_instruction *texture, *sampler;
2864 
2865          if (bindless_tex) {
2866             texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
2867          } else {
2868             texture = create_immed(b, 0);
2869          }
2870 
2871          if (bindless_samp) {
2872             sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
2873          } else {
2874             sampler = create_immed(b, 0);
2875          }
2876          info.samp_tex = ir3_collect(b, texture, sampler);
2877       }
2878    } else {
2879       info.flags |= IR3_INSTR_S2EN;
2880       texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
2881       sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
2882       if (texture_idx >= 0) {
2883          texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
2884          texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16);
2885       } else {
2886          /* TODO what to do for dynamic case? I guess we only need the
2887           * max index for astc srgb workaround so maybe not a problem
2888           * to worry about if we don't enable indirect samplers for
2889           * a4xx?
2890           */
2891          ctx->max_texture_index =
2892             MAX2(ctx->max_texture_index, tex->texture_index);
2893          texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16);
2894          info.tex_idx = tex->texture_index;
2895       }
2896 
2897       if (sampler_idx >= 0) {
2898          sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
2899          sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16);
2900       } else {
2901          sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16);
2902          info.samp_idx = tex->texture_index;
2903       }
2904 
2905       info.samp_tex = ir3_collect(b, sampler, texture);
2906    }
2907 
2908    return info;
2909 }
2910 
2911 static void
emit_tex(struct ir3_context * ctx,nir_tex_instr * tex)2912 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
2913 {
2914    struct ir3_block *b = ctx->block;
2915    struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
2916    struct ir3_instruction *const *coord, *const *off, *const *ddx, *const *ddy;
2917    struct ir3_instruction *lod, *compare, *proj, *sample_index;
2918    struct tex_src_info info = {0};
2919    bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
2920    unsigned i, coords, flags, ncomp;
2921    unsigned nsrc0 = 0, nsrc1 = 0;
2922    type_t type;
2923    opc_t opc = 0;
2924 
2925    ncomp = nir_dest_num_components(tex->dest);
2926 
2927    coord = off = ddx = ddy = NULL;
2928    lod = proj = compare = sample_index = NULL;
2929 
2930    dst = ir3_get_dst(ctx, &tex->dest, ncomp);
2931 
2932    for (unsigned i = 0; i < tex->num_srcs; i++) {
2933       switch (tex->src[i].src_type) {
2934       case nir_tex_src_coord:
2935          coord = ir3_get_src(ctx, &tex->src[i].src);
2936          break;
2937       case nir_tex_src_bias:
2938          lod = ir3_get_src(ctx, &tex->src[i].src)[0];
2939          has_bias = true;
2940          break;
2941       case nir_tex_src_lod:
2942          lod = ir3_get_src(ctx, &tex->src[i].src)[0];
2943          has_lod = true;
2944          break;
2945       case nir_tex_src_comparator: /* shadow comparator */
2946          compare = ir3_get_src(ctx, &tex->src[i].src)[0];
2947          break;
2948       case nir_tex_src_projector:
2949          proj = ir3_get_src(ctx, &tex->src[i].src)[0];
2950          has_proj = true;
2951          break;
2952       case nir_tex_src_offset:
2953          off = ir3_get_src(ctx, &tex->src[i].src);
2954          has_off = true;
2955          break;
2956       case nir_tex_src_ddx:
2957          ddx = ir3_get_src(ctx, &tex->src[i].src);
2958          break;
2959       case nir_tex_src_ddy:
2960          ddy = ir3_get_src(ctx, &tex->src[i].src);
2961          break;
2962       case nir_tex_src_ms_index:
2963          sample_index = ir3_get_src(ctx, &tex->src[i].src)[0];
2964          break;
2965       case nir_tex_src_texture_offset:
2966       case nir_tex_src_sampler_offset:
2967       case nir_tex_src_texture_handle:
2968       case nir_tex_src_sampler_handle:
2969          /* handled in get_tex_samp_src() */
2970          break;
2971       default:
2972          ir3_context_error(ctx, "Unhandled NIR tex src type: %d\n",
2973                            tex->src[i].src_type);
2974          return;
2975       }
2976    }
2977 
2978    switch (tex->op) {
2979    case nir_texop_tex_prefetch:
2980       compile_assert(ctx, !has_bias);
2981       compile_assert(ctx, !has_lod);
2982       compile_assert(ctx, !compare);
2983       compile_assert(ctx, !has_proj);
2984       compile_assert(ctx, !has_off);
2985       compile_assert(ctx, !ddx);
2986       compile_assert(ctx, !ddy);
2987       compile_assert(ctx, !sample_index);
2988       compile_assert(
2989          ctx, nir_tex_instr_src_index(tex, nir_tex_src_texture_offset) < 0);
2990       compile_assert(
2991          ctx, nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset) < 0);
2992 
2993       if (ctx->so->num_sampler_prefetch < ctx->prefetch_limit) {
2994          opc = OPC_META_TEX_PREFETCH;
2995          ctx->so->num_sampler_prefetch++;
2996          break;
2997       }
2998       FALLTHROUGH;
2999    case nir_texop_tex:
3000       opc = has_lod ? OPC_SAML : OPC_SAM;
3001       break;
3002    case nir_texop_txb:
3003       opc = OPC_SAMB;
3004       break;
3005    case nir_texop_txl:
3006       opc = OPC_SAML;
3007       break;
3008    case nir_texop_txd:
3009       opc = OPC_SAMGQ;
3010       break;
3011    case nir_texop_txf:
3012       opc = OPC_ISAML;
3013       break;
3014    case nir_texop_lod:
3015       opc = OPC_GETLOD;
3016       break;
3017    case nir_texop_tg4:
3018       switch (tex->component) {
3019       case 0:
3020          opc = OPC_GATHER4R;
3021          break;
3022       case 1:
3023          opc = OPC_GATHER4G;
3024          break;
3025       case 2:
3026          opc = OPC_GATHER4B;
3027          break;
3028       case 3:
3029          opc = OPC_GATHER4A;
3030          break;
3031       }
3032       break;
3033    case nir_texop_txf_ms_fb:
3034    case nir_texop_txf_ms:
3035       opc = OPC_ISAMM;
3036       break;
3037    default:
3038       ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
3039       return;
3040    }
3041 
3042    tex_info(tex, &flags, &coords);
3043 
3044    /*
3045     * lay out the first argument in the proper order:
3046     *  - actual coordinates first
3047     *  - shadow reference
3048     *  - array index
3049     *  - projection w
3050     *  - starting at offset 4, dpdx.xy, dpdy.xy
3051     *
3052     * bias/lod go into the second arg
3053     */
3054 
3055    /* insert tex coords: */
3056    for (i = 0; i < coords; i++)
3057       src0[i] = coord[i];
3058 
3059    nsrc0 = i;
3060 
3061    /* scale up integer coords for TXF based on the LOD */
3062    if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
3063       assert(has_lod);
3064       for (i = 0; i < coords; i++)
3065          src0[i] = ir3_SHL_B(b, src0[i], 0, lod, 0);
3066    }
3067 
3068    if (coords == 1) {
3069       /* hw doesn't do 1d, so we treat it as 2d with
3070        * height of 1, and patch up the y coord.
3071        */
3072       if (is_isam(opc)) {
3073          src0[nsrc0++] = create_immed(b, 0);
3074       } else {
3075          src0[nsrc0++] = create_immed(b, fui(0.5));
3076       }
3077    }
3078 
3079    if (tex->is_shadow && tex->op != nir_texop_lod)
3080       src0[nsrc0++] = compare;
3081 
3082    if (tex->is_array && tex->op != nir_texop_lod) {
3083       struct ir3_instruction *idx = coord[coords];
3084 
3085       /* the array coord for cube arrays needs 0.5 added to it */
3086       if (ctx->compiler->array_index_add_half && !is_isam(opc))
3087          idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
3088 
3089       src0[nsrc0++] = idx;
3090    }
3091 
3092    if (has_proj) {
3093       src0[nsrc0++] = proj;
3094       flags |= IR3_INSTR_P;
3095    }
3096 
3097    /* pad to 4, then ddx/ddy: */
3098    if (tex->op == nir_texop_txd) {
3099       while (nsrc0 < 4)
3100          src0[nsrc0++] = create_immed(b, fui(0.0));
3101       for (i = 0; i < coords; i++)
3102          src0[nsrc0++] = ddx[i];
3103       if (coords < 2)
3104          src0[nsrc0++] = create_immed(b, fui(0.0));
3105       for (i = 0; i < coords; i++)
3106          src0[nsrc0++] = ddy[i];
3107       if (coords < 2)
3108          src0[nsrc0++] = create_immed(b, fui(0.0));
3109    }
3110 
3111    /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
3112     * with scaled x coord according to requested sample:
3113     */
3114    if (opc == OPC_ISAMM) {
3115       if (ctx->compiler->txf_ms_with_isaml) {
3116          /* the samples are laid out in x dimension as
3117           *     0 1 2 3
3118           * x_ms = (x << ms) + sample_index;
3119           */
3120          struct ir3_instruction *ms;
3121          ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
3122 
3123          src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
3124          src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
3125 
3126          opc = OPC_ISAML;
3127       } else {
3128          src0[nsrc0++] = sample_index;
3129       }
3130    }
3131 
3132    /*
3133     * second argument (if applicable):
3134     *  - offsets
3135     *  - lod
3136     *  - bias
3137     */
3138    if (has_off | has_lod | has_bias) {
3139       if (has_off) {
3140          unsigned off_coords = coords;
3141          if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
3142             off_coords--;
3143          for (i = 0; i < off_coords; i++)
3144             src1[nsrc1++] = off[i];
3145          if (off_coords < 2)
3146             src1[nsrc1++] = create_immed(b, fui(0.0));
3147          flags |= IR3_INSTR_O;
3148       }
3149 
3150       if (has_lod | has_bias)
3151          src1[nsrc1++] = lod;
3152    }
3153 
3154    type = get_tex_dest_type(tex);
3155 
3156    if (opc == OPC_GETLOD)
3157       type = TYPE_S32;
3158 
3159    if (tex->op == nir_texop_txf_ms_fb) {
3160       /* only expect a single txf_ms_fb per shader: */
3161       compile_assert(ctx, !ctx->so->fb_read);
3162       compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT);
3163 
3164       ctx->so->fb_read = true;
3165       info.samp_tex = ir3_collect(
3166          b, create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
3167          create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16));
3168       info.flags = IR3_INSTR_S2EN;
3169 
3170       ctx->so->num_samp++;
3171    } else {
3172       info = get_tex_samp_tex_src(ctx, tex);
3173    }
3174 
3175    bool tg4_swizzle_fixup = false;
3176    if (tex->op == nir_texop_tg4 && ctx->compiler->gen == 4 &&
3177          ctx->sampler_swizzles[tex->texture_index] != 0x688 /* rgba */) {
3178       uint16_t swizzles = ctx->sampler_swizzles[tex->texture_index];
3179       uint16_t swizzle = (swizzles >> (tex->component * 3)) & 7;
3180       if (swizzle > 3) {
3181          /* this would mean that we can just return 0 / 1, no texturing
3182           * necessary
3183           */
3184          struct ir3_instruction *imm = create_immed(b,
3185                type_float(type) ? fui(swizzle - 4) : (swizzle - 4));
3186          for (int i = 0; i < 4; i++)
3187             dst[i] = imm;
3188          ir3_put_dst(ctx, &tex->dest);
3189          return;
3190       }
3191       opc = OPC_GATHER4R + swizzle;
3192       tg4_swizzle_fixup = true;
3193    }
3194 
3195    struct ir3_instruction *col0 = ir3_create_collect(b, src0, nsrc0);
3196    struct ir3_instruction *col1 = ir3_create_collect(b, src1, nsrc1);
3197 
3198    if (opc == OPC_META_TEX_PREFETCH) {
3199       int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
3200 
3201       compile_assert(ctx, tex->src[idx].src.is_ssa);
3202 
3203       sam = ir3_SAM(ctx->in_block, opc, type, MASK(ncomp), 0, NULL,
3204                     get_barycentric(ctx, IJ_PERSP_PIXEL), 0);
3205       sam->prefetch.input_offset = ir3_nir_coord_offset(tex->src[idx].src.ssa);
3206       /* make sure not to add irrelevant flags like S2EN */
3207       sam->flags = flags | (info.flags & IR3_INSTR_B);
3208       sam->prefetch.tex = info.tex_idx;
3209       sam->prefetch.samp = info.samp_idx;
3210       sam->prefetch.tex_base = info.tex_base;
3211       sam->prefetch.samp_base = info.samp_base;
3212    } else {
3213       info.flags |= flags;
3214       sam = emit_sam(ctx, opc, info, type, MASK(ncomp), col0, col1);
3215    }
3216 
3217    if (tg4_swizzle_fixup) {
3218       /* TODO: fix-up for ASTC when alpha is selected? */
3219       array_insert(ctx->ir, ctx->ir->tg4, sam);
3220 
3221       ir3_split_dest(b, dst, sam, 0, 4);
3222 
3223       uint8_t tex_bits = ctx->sampler_swizzles[tex->texture_index] >> 12;
3224       if (!type_float(type) && tex_bits != 3 /* 32bpp */ &&
3225             tex_bits != 0 /* key unset */) {
3226          uint8_t bits = 0;
3227          switch (tex_bits) {
3228          case 1: /* 8bpp */
3229             bits = 8;
3230             break;
3231          case 2: /* 16bpp */
3232             bits = 16;
3233             break;
3234          case 4: /* 10bpp or 2bpp for alpha */
3235             if (opc == OPC_GATHER4A)
3236                bits = 2;
3237             else
3238                bits = 10;
3239             break;
3240          default:
3241             debug_assert(0);
3242          }
3243 
3244          sam->cat5.type = TYPE_F32;
3245          for (int i = 0; i < 4; i++) {
3246             /* scale and offset the unorm data */
3247             dst[i] = ir3_MAD_F32(b, dst[i], 0, create_immed(b, fui((1 << bits) - 1)), 0, create_immed(b, fui(0.5f)), 0);
3248             /* convert the scaled value to integer */
3249             dst[i] = ir3_COV(b, dst[i], TYPE_F32, TYPE_U32);
3250             /* sign extend for signed values */
3251             if (type == TYPE_S32) {
3252                dst[i] = ir3_SHL_B(b, dst[i], 0, create_immed(b, 32 - bits), 0);
3253                dst[i] = ir3_ASHR_B(b, dst[i], 0, create_immed(b, 32 - bits), 0);
3254             }
3255          }
3256       }
3257    } else if ((ctx->astc_srgb & (1 << tex->texture_index)) &&
3258        tex->op != nir_texop_tg4 && /* leave out tg4, unless it's on alpha? */
3259        !nir_tex_instr_is_query(tex)) {
3260       assert(opc != OPC_META_TEX_PREFETCH);
3261 
3262       /* only need first 3 components: */
3263       sam->dsts[0]->wrmask = 0x7;
3264       ir3_split_dest(b, dst, sam, 0, 3);
3265 
3266       /* we need to sample the alpha separately with a non-SRGB
3267        * texture state:
3268        */
3269       sam = ir3_SAM(b, opc, type, 0b1000, flags | info.flags, info.samp_tex,
3270                     col0, col1);
3271 
3272       array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
3273 
3274       /* fixup .w component: */
3275       ir3_split_dest(b, &dst[3], sam, 3, 1);
3276    } else {
3277       /* normal (non-workaround) case: */
3278       ir3_split_dest(b, dst, sam, 0, ncomp);
3279    }
3280 
3281    /* GETLOD returns results in 4.8 fixed point */
3282    if (opc == OPC_GETLOD) {
3283       struct ir3_instruction *factor = create_immed(b, fui(1.0 / 256));
3284 
3285       compile_assert(ctx, tex->dest_type == nir_type_float32);
3286       for (i = 0; i < 2; i++) {
3287          dst[i] =
3288             ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_S32, TYPE_F32), 0, factor, 0);
3289       }
3290    }
3291 
3292    ir3_put_dst(ctx, &tex->dest);
3293 }
3294 
3295 static void
emit_tex_info(struct ir3_context * ctx,nir_tex_instr * tex,unsigned idx)3296 emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx)
3297 {
3298    struct ir3_block *b = ctx->block;
3299    struct ir3_instruction **dst, *sam;
3300    type_t dst_type = get_tex_dest_type(tex);
3301    struct tex_src_info info = get_tex_samp_tex_src(ctx, tex);
3302 
3303    dst = ir3_get_dst(ctx, &tex->dest, 1);
3304 
3305    sam = emit_sam(ctx, OPC_GETINFO, info, dst_type, 1 << idx, NULL, NULL);
3306 
3307    /* even though there is only one component, since it ends
3308     * up in .y/.z/.w rather than .x, we need a split_dest()
3309     */
3310    ir3_split_dest(b, dst, sam, idx, 1);
3311 
3312    /* The # of levels comes from getinfo.z. We need to add 1 to it, since
3313     * the value in TEX_CONST_0 is zero-based.
3314     */
3315    if (ctx->compiler->levels_add_one)
3316       dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
3317 
3318    ir3_put_dst(ctx, &tex->dest);
3319 }
3320 
3321 static void
emit_tex_txs(struct ir3_context * ctx,nir_tex_instr * tex)3322 emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
3323 {
3324    struct ir3_block *b = ctx->block;
3325    struct ir3_instruction **dst, *sam;
3326    struct ir3_instruction *lod;
3327    unsigned flags, coords;
3328    type_t dst_type = get_tex_dest_type(tex);
3329    struct tex_src_info info = get_tex_samp_tex_src(ctx, tex);
3330 
3331    tex_info(tex, &flags, &coords);
3332    info.flags |= flags;
3333 
3334    /* Actually we want the number of dimensions, not coordinates. This
3335     * distinction only matters for cubes.
3336     */
3337    if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
3338       coords = 2;
3339 
3340    dst = ir3_get_dst(ctx, &tex->dest, 4);
3341 
3342    int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod);
3343    compile_assert(ctx, lod_idx >= 0);
3344 
3345    lod = ir3_get_src(ctx, &tex->src[lod_idx].src)[0];
3346 
3347    if (tex->sampler_dim != GLSL_SAMPLER_DIM_BUF) {
3348       sam = emit_sam(ctx, OPC_GETSIZE, info, dst_type, 0b1111, lod, NULL);
3349    } else {
3350       /*
3351        * The maximum value which OPC_GETSIZE could return for one dimension
3352        * is 0x007ff0, however sampler buffer could be much bigger.
3353        * Blob uses OPC_GETBUF for them.
3354        */
3355       sam = emit_sam(ctx, OPC_GETBUF, info, dst_type, 0b1111, NULL, NULL);
3356    }
3357 
3358    ir3_split_dest(b, dst, sam, 0, 4);
3359 
3360    /* Array size actually ends up in .w rather than .z. This doesn't
3361     * matter for miplevel 0, but for higher mips the value in z is
3362     * minified whereas w stays. Also, the value in TEX_CONST_3_DEPTH is
3363     * returned, which means that we have to add 1 to it for arrays.
3364     */
3365    if (tex->is_array) {
3366       if (ctx->compiler->levels_add_one) {
3367          dst[coords] = ir3_ADD_U(b, dst[3], 0, create_immed(b, 1), 0);
3368       } else {
3369          dst[coords] = ir3_MOV(b, dst[3], TYPE_U32);
3370       }
3371    }
3372 
3373    ir3_put_dst(ctx, &tex->dest);
3374 }
3375 
3376 /* phi instructions are left partially constructed.  We don't resolve
3377  * their srcs until the end of the shader, since (eg. loops) one of
3378  * the phi's srcs might be defined after the phi due to back edges in
3379  * the CFG.
3380  */
3381 static void
emit_phi(struct ir3_context * ctx,nir_phi_instr * nphi)3382 emit_phi(struct ir3_context *ctx, nir_phi_instr *nphi)
3383 {
3384    struct ir3_instruction *phi, **dst;
3385 
3386    /* NOTE: phi's should be lowered to scalar at this point */
3387    compile_assert(ctx, nphi->dest.ssa.num_components == 1);
3388 
3389    dst = ir3_get_dst(ctx, &nphi->dest, 1);
3390 
3391    phi = ir3_instr_create(ctx->block, OPC_META_PHI, 1,
3392                           exec_list_length(&nphi->srcs));
3393    __ssa_dst(phi);
3394    phi->phi.nphi = nphi;
3395 
3396    dst[0] = phi;
3397 
3398    ir3_put_dst(ctx, &nphi->dest);
3399 }
3400 
3401 static struct ir3_block *get_block(struct ir3_context *ctx,
3402                                    const nir_block *nblock);
3403 
3404 static struct ir3_instruction *
read_phi_src(struct ir3_context * ctx,struct ir3_block * blk,struct ir3_instruction * phi,nir_phi_instr * nphi)3405 read_phi_src(struct ir3_context *ctx, struct ir3_block *blk,
3406              struct ir3_instruction *phi, nir_phi_instr *nphi)
3407 {
3408    if (!blk->nblock) {
3409       struct ir3_instruction *continue_phi =
3410          ir3_instr_create(blk, OPC_META_PHI, 1, blk->predecessors_count);
3411       __ssa_dst(continue_phi)->flags = phi->dsts[0]->flags;
3412 
3413       for (unsigned i = 0; i < blk->predecessors_count; i++) {
3414          struct ir3_instruction *src =
3415             read_phi_src(ctx, blk->predecessors[i], phi, nphi);
3416          if (src)
3417             __ssa_src(continue_phi, src, 0);
3418          else
3419             ir3_src_create(continue_phi, INVALID_REG, phi->dsts[0]->flags);
3420       }
3421 
3422       return continue_phi;
3423    }
3424 
3425    nir_foreach_phi_src (nsrc, nphi) {
3426       if (blk->nblock == nsrc->pred) {
3427          if (nsrc->src.ssa->parent_instr->type == nir_instr_type_ssa_undef) {
3428             /* Create an ir3 undef */
3429             return NULL;
3430          } else {
3431             return ir3_get_src(ctx, &nsrc->src)[0];
3432          }
3433       }
3434    }
3435 
3436    unreachable("couldn't find phi node ir3 block");
3437    return NULL;
3438 }
3439 
3440 static void
resolve_phis(struct ir3_context * ctx,struct ir3_block * block)3441 resolve_phis(struct ir3_context *ctx, struct ir3_block *block)
3442 {
3443    foreach_instr (phi, &block->instr_list) {
3444       if (phi->opc != OPC_META_PHI)
3445          break;
3446 
3447       nir_phi_instr *nphi = phi->phi.nphi;
3448 
3449       if (!nphi) /* skip continue phis created above */
3450          continue;
3451 
3452       for (unsigned i = 0; i < block->predecessors_count; i++) {
3453          struct ir3_block *pred = block->predecessors[i];
3454          struct ir3_instruction *src = read_phi_src(ctx, pred, phi, nphi);
3455          if (src) {
3456             __ssa_src(phi, src, 0);
3457          } else {
3458             /* Create an ir3 undef */
3459             ir3_src_create(phi, INVALID_REG, phi->dsts[0]->flags);
3460          }
3461       }
3462    }
3463 }
3464 
3465 static void
emit_jump(struct ir3_context * ctx,nir_jump_instr * jump)3466 emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
3467 {
3468    switch (jump->type) {
3469    case nir_jump_break:
3470    case nir_jump_continue:
3471    case nir_jump_return:
3472       /* I *think* we can simply just ignore this, and use the
3473        * successor block link to figure out where we need to
3474        * jump to for break/continue
3475        */
3476       break;
3477    default:
3478       ir3_context_error(ctx, "Unhandled NIR jump type: %d\n", jump->type);
3479       break;
3480    }
3481 }
3482 
3483 static void
emit_instr(struct ir3_context * ctx,nir_instr * instr)3484 emit_instr(struct ir3_context *ctx, nir_instr *instr)
3485 {
3486    switch (instr->type) {
3487    case nir_instr_type_alu:
3488       emit_alu(ctx, nir_instr_as_alu(instr));
3489       break;
3490    case nir_instr_type_deref:
3491       /* ignored, handled as part of the intrinsic they are src to */
3492       break;
3493    case nir_instr_type_intrinsic:
3494       emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
3495       break;
3496    case nir_instr_type_load_const:
3497       emit_load_const(ctx, nir_instr_as_load_const(instr));
3498       break;
3499    case nir_instr_type_ssa_undef:
3500       emit_undef(ctx, nir_instr_as_ssa_undef(instr));
3501       break;
3502    case nir_instr_type_tex: {
3503       nir_tex_instr *tex = nir_instr_as_tex(instr);
3504       /* couple tex instructions get special-cased:
3505        */
3506       switch (tex->op) {
3507       case nir_texop_txs:
3508          emit_tex_txs(ctx, tex);
3509          break;
3510       case nir_texop_query_levels:
3511          emit_tex_info(ctx, tex, 2);
3512          break;
3513       case nir_texop_texture_samples:
3514          emit_tex_info(ctx, tex, 3);
3515          break;
3516       default:
3517          emit_tex(ctx, tex);
3518          break;
3519       }
3520       break;
3521    }
3522    case nir_instr_type_jump:
3523       emit_jump(ctx, nir_instr_as_jump(instr));
3524       break;
3525    case nir_instr_type_phi:
3526       emit_phi(ctx, nir_instr_as_phi(instr));
3527       break;
3528    case nir_instr_type_call:
3529    case nir_instr_type_parallel_copy:
3530       ir3_context_error(ctx, "Unhandled NIR instruction type: %d\n",
3531                         instr->type);
3532       break;
3533    }
3534 }
3535 
3536 static struct ir3_block *
get_block(struct ir3_context * ctx,const nir_block * nblock)3537 get_block(struct ir3_context *ctx, const nir_block *nblock)
3538 {
3539    struct ir3_block *block;
3540    struct hash_entry *hentry;
3541 
3542    hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
3543    if (hentry)
3544       return hentry->data;
3545 
3546    block = ir3_block_create(ctx->ir);
3547    block->nblock = nblock;
3548    _mesa_hash_table_insert(ctx->block_ht, nblock, block);
3549 
3550    return block;
3551 }
3552 
3553 static struct ir3_block *
get_block_or_continue(struct ir3_context * ctx,const nir_block * nblock)3554 get_block_or_continue(struct ir3_context *ctx, const nir_block *nblock)
3555 {
3556    struct hash_entry *hentry;
3557 
3558    hentry = _mesa_hash_table_search(ctx->continue_block_ht, nblock);
3559    if (hentry)
3560       return hentry->data;
3561 
3562    return get_block(ctx, nblock);
3563 }
3564 
3565 static struct ir3_block *
create_continue_block(struct ir3_context * ctx,const nir_block * nblock)3566 create_continue_block(struct ir3_context *ctx, const nir_block *nblock)
3567 {
3568    struct ir3_block *block = ir3_block_create(ctx->ir);
3569    block->nblock = NULL;
3570    _mesa_hash_table_insert(ctx->continue_block_ht, nblock, block);
3571    return block;
3572 }
3573 
3574 static void
emit_block(struct ir3_context * ctx,nir_block * nblock)3575 emit_block(struct ir3_context *ctx, nir_block *nblock)
3576 {
3577    ctx->block = get_block(ctx, nblock);
3578 
3579    list_addtail(&ctx->block->node, &ctx->ir->block_list);
3580 
3581    ctx->block->loop_id = ctx->loop_id;
3582    ctx->block->loop_depth = ctx->loop_depth;
3583 
3584    /* re-emit addr register in each block if needed: */
3585    for (int i = 0; i < ARRAY_SIZE(ctx->addr0_ht); i++) {
3586       _mesa_hash_table_destroy(ctx->addr0_ht[i], NULL);
3587       ctx->addr0_ht[i] = NULL;
3588    }
3589 
3590    _mesa_hash_table_u64_destroy(ctx->addr1_ht);
3591    ctx->addr1_ht = NULL;
3592 
3593    nir_foreach_instr (instr, nblock) {
3594       ctx->cur_instr = instr;
3595       emit_instr(ctx, instr);
3596       ctx->cur_instr = NULL;
3597       if (ctx->error)
3598          return;
3599    }
3600 
3601    for (int i = 0; i < ARRAY_SIZE(ctx->block->successors); i++) {
3602       if (nblock->successors[i]) {
3603          ctx->block->successors[i] =
3604             get_block_or_continue(ctx, nblock->successors[i]);
3605          ctx->block->physical_successors[i] = ctx->block->successors[i];
3606       }
3607    }
3608 
3609    _mesa_hash_table_clear(ctx->sel_cond_conversions, NULL);
3610 }
3611 
3612 static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list);
3613 
3614 static void
emit_if(struct ir3_context * ctx,nir_if * nif)3615 emit_if(struct ir3_context *ctx, nir_if *nif)
3616 {
3617    struct ir3_instruction *condition = ir3_get_src(ctx, &nif->condition)[0];
3618 
3619    if (condition->opc == OPC_ANY_MACRO && condition->block == ctx->block) {
3620       ctx->block->condition = ssa(condition->srcs[0]);
3621       ctx->block->brtype = IR3_BRANCH_ANY;
3622    } else if (condition->opc == OPC_ALL_MACRO &&
3623               condition->block == ctx->block) {
3624       ctx->block->condition = ssa(condition->srcs[0]);
3625       ctx->block->brtype = IR3_BRANCH_ALL;
3626    } else if (condition->opc == OPC_ELECT_MACRO &&
3627               condition->block == ctx->block) {
3628       ctx->block->condition = NULL;
3629       ctx->block->brtype = IR3_BRANCH_GETONE;
3630    } else if (condition->opc == OPC_SHPS_MACRO &&
3631               condition->block == ctx->block) {
3632       /* TODO: technically this only works if the block is the only user of the
3633        * shps, but we only use it in very constrained scenarios so this should
3634        * be ok.
3635        */
3636       ctx->block->condition = NULL;
3637       ctx->block->brtype = IR3_BRANCH_SHPS;
3638    } else {
3639       ctx->block->condition = ir3_get_predicate(ctx, condition);
3640       ctx->block->brtype = IR3_BRANCH_COND;
3641    }
3642 
3643    emit_cf_list(ctx, &nif->then_list);
3644    emit_cf_list(ctx, &nif->else_list);
3645 
3646    struct ir3_block *last_then = get_block(ctx, nir_if_last_then_block(nif));
3647    struct ir3_block *first_else = get_block(ctx, nir_if_first_else_block(nif));
3648    assert(last_then->physical_successors[0] &&
3649           !last_then->physical_successors[1]);
3650    last_then->physical_successors[1] = first_else;
3651 
3652    struct ir3_block *last_else = get_block(ctx, nir_if_last_else_block(nif));
3653    struct ir3_block *after_if =
3654       get_block(ctx, nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
3655    assert(last_else->physical_successors[0] &&
3656           !last_else->physical_successors[1]);
3657    if (after_if != last_else->physical_successors[0])
3658       last_else->physical_successors[1] = after_if;
3659 }
3660 
3661 static void
emit_loop(struct ir3_context * ctx,nir_loop * nloop)3662 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
3663 {
3664    unsigned old_loop_id = ctx->loop_id;
3665    ctx->loop_id = ctx->so->loops + 1;
3666    ctx->loop_depth++;
3667 
3668    struct nir_block *nstart = nir_loop_first_block(nloop);
3669    struct ir3_block *continue_blk = NULL;
3670 
3671    /* There's always one incoming edge from outside the loop, and if there
3672     * are more than two backedges from inside the loop (so more than 2 total
3673     * edges) then we need to create a continue block after the loop to ensure
3674     * that control reconverges at the end of each loop iteration.
3675     */
3676    if (nstart->predecessors->entries > 2) {
3677       continue_blk = create_continue_block(ctx, nstart);
3678    }
3679 
3680    emit_cf_list(ctx, &nloop->body);
3681 
3682    if (continue_blk) {
3683       struct ir3_block *start = get_block(ctx, nstart);
3684       continue_blk->successors[0] = start;
3685       continue_blk->physical_successors[0] = start;
3686       continue_blk->loop_id = ctx->loop_id;
3687       continue_blk->loop_depth = ctx->loop_depth;
3688       list_addtail(&continue_blk->node, &ctx->ir->block_list);
3689    }
3690 
3691    ctx->so->loops++;
3692    ctx->loop_depth--;
3693    ctx->loop_id = old_loop_id;
3694 }
3695 
3696 static void
stack_push(struct ir3_context * ctx)3697 stack_push(struct ir3_context *ctx)
3698 {
3699    ctx->stack++;
3700    ctx->max_stack = MAX2(ctx->max_stack, ctx->stack);
3701 }
3702 
3703 static void
stack_pop(struct ir3_context * ctx)3704 stack_pop(struct ir3_context *ctx)
3705 {
3706    compile_assert(ctx, ctx->stack > 0);
3707    ctx->stack--;
3708 }
3709 
3710 static void
emit_cf_list(struct ir3_context * ctx,struct exec_list * list)3711 emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
3712 {
3713    foreach_list_typed (nir_cf_node, node, node, list) {
3714       switch (node->type) {
3715       case nir_cf_node_block:
3716          emit_block(ctx, nir_cf_node_as_block(node));
3717          break;
3718       case nir_cf_node_if:
3719          stack_push(ctx);
3720          emit_if(ctx, nir_cf_node_as_if(node));
3721          stack_pop(ctx);
3722          break;
3723       case nir_cf_node_loop:
3724          stack_push(ctx);
3725          emit_loop(ctx, nir_cf_node_as_loop(node));
3726          stack_pop(ctx);
3727          break;
3728       case nir_cf_node_function:
3729          ir3_context_error(ctx, "TODO\n");
3730          break;
3731       }
3732    }
3733 }
3734 
3735 /* emit stream-out code.  At this point, the current block is the original
3736  * (nir) end block, and nir ensures that all flow control paths terminate
3737  * into the end block.  We re-purpose the original end block to generate
3738  * the 'if (vtxcnt < maxvtxcnt)' condition, then append the conditional
3739  * block holding stream-out write instructions, followed by the new end
3740  * block:
3741  *
3742  *   blockOrigEnd {
3743  *      p0.x = (vtxcnt < maxvtxcnt)
3744  *      // succs: blockStreamOut, blockNewEnd
3745  *   }
3746  *   blockStreamOut {
3747  *      // preds: blockOrigEnd
3748  *      ... stream-out instructions ...
3749  *      // succs: blockNewEnd
3750  *   }
3751  *   blockNewEnd {
3752  *      // preds: blockOrigEnd, blockStreamOut
3753  *   }
3754  */
3755 static void
emit_stream_out(struct ir3_context * ctx)3756 emit_stream_out(struct ir3_context *ctx)
3757 {
3758    struct ir3 *ir = ctx->ir;
3759    struct ir3_stream_output_info *strmout = &ctx->so->shader->stream_output;
3760    struct ir3_block *orig_end_block, *stream_out_block, *new_end_block;
3761    struct ir3_instruction *vtxcnt, *maxvtxcnt, *cond;
3762    struct ir3_instruction *bases[IR3_MAX_SO_BUFFERS];
3763 
3764    /* create vtxcnt input in input block at top of shader,
3765     * so that it is seen as live over the entire duration
3766     * of the shader:
3767     */
3768    vtxcnt = create_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, 0x1);
3769    maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
3770 
3771    /* at this point, we are at the original 'end' block,
3772     * re-purpose this block to stream-out condition, then
3773     * append stream-out block and new-end block
3774     */
3775    orig_end_block = ctx->block;
3776 
3777    // maybe w/ store_global intrinsic, we could do this
3778    // stuff in nir->nir pass
3779 
3780    stream_out_block = ir3_block_create(ir);
3781    list_addtail(&stream_out_block->node, &ir->block_list);
3782 
3783    new_end_block = ir3_block_create(ir);
3784    list_addtail(&new_end_block->node, &ir->block_list);
3785 
3786    orig_end_block->successors[0] = stream_out_block;
3787    orig_end_block->successors[1] = new_end_block;
3788 
3789    orig_end_block->physical_successors[0] = stream_out_block;
3790    orig_end_block->physical_successors[1] = new_end_block;
3791 
3792    stream_out_block->successors[0] = new_end_block;
3793 
3794    stream_out_block->physical_successors[0] = new_end_block;
3795 
3796    /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
3797    cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
3798    cond->dsts[0]->num = regid(REG_P0, 0);
3799    cond->dsts[0]->flags &= ~IR3_REG_SSA;
3800    cond->cat2.condition = IR3_COND_LT;
3801 
3802    /* condition goes on previous block to the conditional,
3803     * since it is used to pick which of the two successor
3804     * paths to take:
3805     */
3806    orig_end_block->condition = cond;
3807 
3808    /* switch to stream_out_block to generate the stream-out
3809     * instructions:
3810     */
3811    ctx->block = stream_out_block;
3812 
3813    /* Calculate base addresses based on vtxcnt.  Instructions
3814     * generated for bases not used in following loop will be
3815     * stripped out in the backend.
3816     */
3817    for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
3818       const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
3819       unsigned stride = strmout->stride[i];
3820       struct ir3_instruction *base, *off;
3821 
3822       base = create_uniform(ctx->block, regid(const_state->offsets.tfbo, i));
3823 
3824       /* 24-bit should be enough: */
3825       off = ir3_MUL_U24(ctx->block, vtxcnt, 0,
3826                         create_immed(ctx->block, stride * 4), 0);
3827 
3828       bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0);
3829    }
3830 
3831    /* Generate the per-output store instructions: */
3832    for (unsigned i = 0; i < strmout->num_outputs; i++) {
3833       for (unsigned j = 0; j < strmout->output[i].num_components; j++) {
3834          unsigned c = j + strmout->output[i].start_component;
3835          struct ir3_instruction *base, *out, *stg;
3836 
3837          base = bases[strmout->output[i].output_buffer];
3838          out = ctx->outputs[regid(strmout->output[i].register_index, c)];
3839 
3840          stg = ir3_STG(
3841             ctx->block, base, 0,
3842             create_immed(ctx->block, (strmout->output[i].dst_offset + j) * 4),
3843             0, out, 0, create_immed(ctx->block, 1), 0);
3844          stg->cat6.type = TYPE_U32;
3845 
3846          array_insert(ctx->block, ctx->block->keeps, stg);
3847       }
3848    }
3849 
3850    /* and finally switch to the new_end_block: */
3851    ctx->block = new_end_block;
3852 }
3853 
3854 static void
setup_predecessors(struct ir3 * ir)3855 setup_predecessors(struct ir3 *ir)
3856 {
3857    foreach_block (block, &ir->block_list) {
3858       for (int i = 0; i < ARRAY_SIZE(block->successors); i++) {
3859          if (block->successors[i])
3860             ir3_block_add_predecessor(block->successors[i], block);
3861          if (block->physical_successors[i])
3862             ir3_block_add_physical_predecessor(block->physical_successors[i],
3863                                                block);
3864       }
3865    }
3866 }
3867 
3868 static void
emit_function(struct ir3_context * ctx,nir_function_impl * impl)3869 emit_function(struct ir3_context *ctx, nir_function_impl *impl)
3870 {
3871    nir_metadata_require(impl, nir_metadata_block_index);
3872 
3873    compile_assert(ctx, ctx->stack == 0);
3874 
3875    emit_cf_list(ctx, &impl->body);
3876    emit_block(ctx, impl->end_block);
3877 
3878    compile_assert(ctx, ctx->stack == 0);
3879 
3880    /* at this point, we should have a single empty block,
3881     * into which we emit the 'end' instruction.
3882     */
3883    compile_assert(ctx, list_is_empty(&ctx->block->instr_list));
3884 
3885    /* If stream-out (aka transform-feedback) enabled, emit the
3886     * stream-out instructions, followed by a new empty block (into
3887     * which the 'end' instruction lands).
3888     *
3889     * NOTE: it is done in this order, rather than inserting before
3890     * we emit end_block, because NIR guarantees that all blocks
3891     * flow into end_block, and that end_block has no successors.
3892     * So by re-purposing end_block as the first block of stream-
3893     * out, we guarantee that all exit paths flow into the stream-
3894     * out instructions.
3895     */
3896    if ((ctx->compiler->gen < 5) &&
3897        (ctx->so->shader->stream_output.num_outputs > 0) &&
3898        !ctx->so->binning_pass) {
3899       debug_assert(ctx->so->type == MESA_SHADER_VERTEX);
3900       emit_stream_out(ctx);
3901    }
3902 
3903    setup_predecessors(ctx->ir);
3904    foreach_block (block, &ctx->ir->block_list) {
3905       resolve_phis(ctx, block);
3906    }
3907 }
3908 
3909 static void
setup_input(struct ir3_context * ctx,nir_intrinsic_instr * intr)3910 setup_input(struct ir3_context *ctx, nir_intrinsic_instr *intr)
3911 {
3912    struct ir3_shader_variant *so = ctx->so;
3913    struct ir3_instruction *coord = NULL;
3914 
3915    if (intr->intrinsic == nir_intrinsic_load_interpolated_input)
3916       coord = ir3_create_collect(ctx->block, ir3_get_src(ctx, &intr->src[0]), 2);
3917 
3918    compile_assert(ctx, nir_src_is_const(intr->src[coord ? 1 : 0]));
3919 
3920    unsigned frac = nir_intrinsic_component(intr);
3921    unsigned offset = nir_src_as_uint(intr->src[coord ? 1 : 0]);
3922    unsigned ncomp = nir_intrinsic_dest_components(intr);
3923    unsigned n = nir_intrinsic_base(intr) + offset;
3924    unsigned slot = nir_intrinsic_io_semantics(intr).location + offset;
3925    unsigned compmask;
3926 
3927    /* Inputs are loaded using ldlw or ldg for other stages. */
3928    compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT ||
3929                           ctx->so->type == MESA_SHADER_VERTEX);
3930 
3931    if (ctx->so->type == MESA_SHADER_FRAGMENT)
3932       compmask = BITFIELD_MASK(ncomp) << frac;
3933    else
3934       compmask = BITFIELD_MASK(ncomp + frac);
3935 
3936    /* for a4xx+ rasterflat */
3937    if (so->inputs[n].rasterflat && ctx->so->key.rasterflat)
3938       coord = NULL;
3939 
3940    so->total_in += util_bitcount(compmask & ~so->inputs[n].compmask);
3941 
3942    so->inputs[n].slot = slot;
3943    so->inputs[n].compmask |= compmask;
3944    so->inputs_count = MAX2(so->inputs_count, n + 1);
3945    compile_assert(ctx, so->inputs_count < ARRAY_SIZE(so->inputs));
3946    so->inputs[n].flat = !coord;
3947 
3948    if (ctx->so->type == MESA_SHADER_FRAGMENT) {
3949       compile_assert(ctx, slot != VARYING_SLOT_POS);
3950 
3951       so->inputs[n].bary = true;
3952 
3953       for (int i = 0; i < ncomp; i++) {
3954          unsigned idx = (n * 4) + i + frac;
3955          ctx->last_dst[i] = create_frag_input(ctx, coord, idx);
3956       }
3957    } else {
3958       struct ir3_instruction *input = NULL;
3959 
3960       foreach_input (in, ctx->ir) {
3961          if (in->input.inidx == n) {
3962             input = in;
3963             break;
3964          }
3965       }
3966 
3967       if (!input) {
3968          input = create_input(ctx, compmask);
3969          input->input.inidx = n;
3970       } else {
3971          /* For aliased inputs, just append to the wrmask.. ie. if we
3972           * first see a vec2 index at slot N, and then later a vec4,
3973           * the wrmask of the resulting overlapped vec2 and vec4 is 0xf
3974           */
3975          input->dsts[0]->wrmask |= compmask;
3976       }
3977 
3978       for (int i = 0; i < ncomp + frac; i++) {
3979          unsigned idx = (n * 4) + i;
3980          compile_assert(ctx, idx < ctx->ninputs);
3981 
3982          /* fixup the src wrmask to avoid validation fail */
3983          if (ctx->inputs[idx] && (ctx->inputs[idx] != input)) {
3984             ctx->inputs[idx]->srcs[0]->wrmask = input->dsts[0]->wrmask;
3985             continue;
3986          }
3987 
3988          ir3_split_dest(ctx->block, &ctx->inputs[idx], input, i, 1);
3989       }
3990 
3991       for (int i = 0; i < ncomp; i++) {
3992          unsigned idx = (n * 4) + i + frac;
3993          ctx->last_dst[i] = ctx->inputs[idx];
3994       }
3995    }
3996 }
3997 
3998 /* Initially we assign non-packed inloc's for varyings, as we don't really
3999  * know up-front which components will be unused.  After all the compilation
4000  * stages we scan the shader to see which components are actually used, and
4001  * re-pack the inlocs to eliminate unneeded varyings.
4002  */
4003 static void
pack_inlocs(struct ir3_context * ctx)4004 pack_inlocs(struct ir3_context *ctx)
4005 {
4006    struct ir3_shader_variant *so = ctx->so;
4007    uint8_t used_components[so->inputs_count];
4008 
4009    memset(used_components, 0, sizeof(used_components));
4010 
4011    /*
4012     * First Step: scan shader to find which bary.f/ldlv remain:
4013     */
4014 
4015    foreach_block (block, &ctx->ir->block_list) {
4016       foreach_instr (instr, &block->instr_list) {
4017          if (is_input(instr)) {
4018             unsigned inloc = instr->srcs[0]->iim_val;
4019             unsigned i = inloc / 4;
4020             unsigned j = inloc % 4;
4021 
4022             compile_assert(ctx, instr->srcs[0]->flags & IR3_REG_IMMED);
4023             compile_assert(ctx, i < so->inputs_count);
4024 
4025             used_components[i] |= 1 << j;
4026          } else if (instr->opc == OPC_META_TEX_PREFETCH) {
4027             for (int n = 0; n < 2; n++) {
4028                unsigned inloc = instr->prefetch.input_offset + n;
4029                unsigned i = inloc / 4;
4030                unsigned j = inloc % 4;
4031 
4032                compile_assert(ctx, i < so->inputs_count);
4033 
4034                used_components[i] |= 1 << j;
4035             }
4036          }
4037       }
4038    }
4039 
4040    /*
4041     * Second Step: reassign varying inloc/slots:
4042     */
4043 
4044    unsigned inloc = 0;
4045 
4046    /* for clip+cull distances, unused components can't be eliminated because
4047     * they're read by fixed-function, even if there's a hole.  Note that
4048     * clip/cull distance arrays must be declared in the FS, so we can just
4049     * use the NIR clip/cull distances to avoid reading ucp_enables in the
4050     * shader key.
4051     */
4052    unsigned clip_cull_mask = so->clip_mask | so->cull_mask;
4053 
4054    for (unsigned i = 0; i < so->inputs_count; i++) {
4055       unsigned compmask = 0, maxcomp = 0;
4056 
4057       so->inputs[i].inloc = inloc;
4058       so->inputs[i].bary = false;
4059 
4060       if (so->inputs[i].slot == VARYING_SLOT_CLIP_DIST0 ||
4061           so->inputs[i].slot == VARYING_SLOT_CLIP_DIST1) {
4062          if (so->inputs[i].slot == VARYING_SLOT_CLIP_DIST0)
4063             compmask = clip_cull_mask & 0xf;
4064          else
4065             compmask = clip_cull_mask >> 4;
4066          used_components[i] = compmask;
4067       }
4068 
4069       for (unsigned j = 0; j < 4; j++) {
4070          if (!(used_components[i] & (1 << j)))
4071             continue;
4072 
4073          compmask |= (1 << j);
4074          maxcomp = j + 1;
4075 
4076          /* at this point, since used_components[i] mask is only
4077           * considering varyings (ie. not sysvals) we know this
4078           * is a varying:
4079           */
4080          so->inputs[i].bary = true;
4081       }
4082 
4083       if (so->inputs[i].bary) {
4084          so->varying_in++;
4085          so->inputs[i].compmask = (1 << maxcomp) - 1;
4086          inloc += maxcomp;
4087       }
4088    }
4089 
4090    /*
4091     * Third Step: reassign packed inloc's:
4092     */
4093 
4094    foreach_block (block, &ctx->ir->block_list) {
4095       foreach_instr (instr, &block->instr_list) {
4096          if (is_input(instr)) {
4097             unsigned inloc = instr->srcs[0]->iim_val;
4098             unsigned i = inloc / 4;
4099             unsigned j = inloc % 4;
4100 
4101             instr->srcs[0]->iim_val = so->inputs[i].inloc + j;
4102          } else if (instr->opc == OPC_META_TEX_PREFETCH) {
4103             unsigned i = instr->prefetch.input_offset / 4;
4104             unsigned j = instr->prefetch.input_offset % 4;
4105             instr->prefetch.input_offset = so->inputs[i].inloc + j;
4106          }
4107       }
4108    }
4109 }
4110 
4111 static void
setup_output(struct ir3_context * ctx,nir_intrinsic_instr * intr)4112 setup_output(struct ir3_context *ctx, nir_intrinsic_instr *intr)
4113 {
4114    struct ir3_shader_variant *so = ctx->so;
4115    nir_io_semantics io = nir_intrinsic_io_semantics(intr);
4116 
4117    compile_assert(ctx, nir_src_is_const(intr->src[1]));
4118 
4119    unsigned offset = nir_src_as_uint(intr->src[1]);
4120    unsigned n = nir_intrinsic_base(intr) + offset;
4121    unsigned frac = nir_intrinsic_component(intr);
4122    unsigned ncomp = nir_intrinsic_src_components(intr, 0);
4123 
4124    /* For per-view variables, each user-facing slot corresponds to multiple
4125     * views, each with a corresponding driver_location, and the offset is for
4126     * the driver_location. To properly figure out of the slot, we'd need to
4127     * plumb through the number of views. However, for now we only use
4128     * per-view with gl_Position, so we assume that the variable is not an
4129     * array or matrix (so there are no indirect accesses to the variable
4130     * itself) and the indirect offset corresponds to the view.
4131     */
4132    unsigned slot = io.location + (io.per_view ? 0 : offset);
4133 
4134    if (ctx->so->type == MESA_SHADER_FRAGMENT) {
4135       switch (slot) {
4136       case FRAG_RESULT_DEPTH:
4137          so->writes_pos = true;
4138          break;
4139       case FRAG_RESULT_COLOR:
4140          if (!ctx->s->info.fs.color_is_dual_source) {
4141             so->color0_mrt = 1;
4142          } else {
4143             slot = FRAG_RESULT_DATA0 + io.dual_source_blend_index;
4144          }
4145          break;
4146       case FRAG_RESULT_SAMPLE_MASK:
4147          so->writes_smask = true;
4148          break;
4149       case FRAG_RESULT_STENCIL:
4150          so->writes_stencilref = true;
4151          break;
4152       default:
4153          slot += io.dual_source_blend_index; /* For dual-src blend */
4154          if (slot >= FRAG_RESULT_DATA0)
4155             break;
4156          ir3_context_error(ctx, "unknown FS output name: %s\n",
4157                            gl_frag_result_name(slot));
4158       }
4159    } else if (ctx->so->type == MESA_SHADER_VERTEX ||
4160               ctx->so->type == MESA_SHADER_TESS_EVAL ||
4161               ctx->so->type == MESA_SHADER_GEOMETRY) {
4162       switch (slot) {
4163       case VARYING_SLOT_POS:
4164          so->writes_pos = true;
4165          break;
4166       case VARYING_SLOT_PSIZ:
4167          so->writes_psize = true;
4168          break;
4169       case VARYING_SLOT_PRIMITIVE_ID:
4170       case VARYING_SLOT_GS_VERTEX_FLAGS_IR3:
4171          debug_assert(ctx->so->type == MESA_SHADER_GEOMETRY);
4172          FALLTHROUGH;
4173       case VARYING_SLOT_COL0:
4174       case VARYING_SLOT_COL1:
4175       case VARYING_SLOT_BFC0:
4176       case VARYING_SLOT_BFC1:
4177       case VARYING_SLOT_FOGC:
4178       case VARYING_SLOT_CLIP_DIST0:
4179       case VARYING_SLOT_CLIP_DIST1:
4180       case VARYING_SLOT_CLIP_VERTEX:
4181       case VARYING_SLOT_LAYER:
4182       case VARYING_SLOT_VIEWPORT:
4183          break;
4184       default:
4185          if (slot >= VARYING_SLOT_VAR0)
4186             break;
4187          if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
4188             break;
4189          ir3_context_error(ctx, "unknown %s shader output name: %s\n",
4190                            _mesa_shader_stage_to_string(ctx->so->type),
4191                            gl_varying_slot_name_for_stage(slot, ctx->so->type));
4192       }
4193    } else {
4194       ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
4195    }
4196 
4197    so->outputs_count = MAX2(so->outputs_count, n + 1);
4198    compile_assert(ctx, so->outputs_count <= ARRAY_SIZE(so->outputs));
4199 
4200    so->outputs[n].slot = slot;
4201    if (io.per_view)
4202       so->outputs[n].view = offset;
4203 
4204    for (int i = 0; i < ncomp; i++) {
4205       unsigned idx = (n * 4) + i + frac;
4206       compile_assert(ctx, idx < ctx->noutputs);
4207       ctx->outputs[idx] = create_immed(ctx->block, fui(0.0));
4208    }
4209 
4210    /* if varying packing doesn't happen, we could end up in a situation
4211     * with "holes" in the output, and since the per-generation code that
4212     * sets up varying linkage registers doesn't expect to have more than
4213     * one varying per vec4 slot, pad the holes.
4214     *
4215     * Note that this should probably generate a performance warning of
4216     * some sort.
4217     */
4218    for (int i = 0; i < frac; i++) {
4219       unsigned idx = (n * 4) + i;
4220       if (!ctx->outputs[idx]) {
4221          ctx->outputs[idx] = create_immed(ctx->block, fui(0.0));
4222       }
4223    }
4224 
4225    struct ir3_instruction *const *src = ir3_get_src(ctx, &intr->src[0]);
4226    for (int i = 0; i < ncomp; i++) {
4227       unsigned idx = (n * 4) + i + frac;
4228       ctx->outputs[idx] = src[i];
4229    }
4230 }
4231 
4232 static bool
uses_load_input(struct ir3_shader_variant * so)4233 uses_load_input(struct ir3_shader_variant *so)
4234 {
4235    return so->type == MESA_SHADER_VERTEX || so->type == MESA_SHADER_FRAGMENT;
4236 }
4237 
4238 static bool
uses_store_output(struct ir3_shader_variant * so)4239 uses_store_output(struct ir3_shader_variant *so)
4240 {
4241    switch (so->type) {
4242    case MESA_SHADER_VERTEX:
4243       return !so->key.has_gs && !so->key.tessellation;
4244    case MESA_SHADER_TESS_EVAL:
4245       return !so->key.has_gs;
4246    case MESA_SHADER_GEOMETRY:
4247    case MESA_SHADER_FRAGMENT:
4248       return true;
4249    case MESA_SHADER_TESS_CTRL:
4250    case MESA_SHADER_COMPUTE:
4251    case MESA_SHADER_KERNEL:
4252       return false;
4253    default:
4254       unreachable("unknown stage");
4255    }
4256 }
4257 
4258 static void
emit_instructions(struct ir3_context * ctx)4259 emit_instructions(struct ir3_context *ctx)
4260 {
4261    nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
4262 
4263    /* some varying setup which can't be done in setup_input(): */
4264    if (ctx->so->type == MESA_SHADER_FRAGMENT) {
4265       nir_foreach_shader_in_variable (var, ctx->s) {
4266          /* if any varyings have 'sample' qualifer, that triggers us
4267           * to run in per-sample mode:
4268           */
4269          if (var->data.sample)
4270             ctx->so->per_samp = true;
4271 
4272          /* set rasterflat flag for front/back color */
4273          if (var->data.interpolation == INTERP_MODE_NONE) {
4274             switch (var->data.location) {
4275             case VARYING_SLOT_COL0:
4276             case VARYING_SLOT_COL1:
4277             case VARYING_SLOT_BFC0:
4278             case VARYING_SLOT_BFC1:
4279                ctx->so->inputs[var->data.driver_location].rasterflat = true;
4280                break;
4281             default:
4282                break;
4283             }
4284          }
4285       }
4286    }
4287 
4288    if (uses_load_input(ctx->so)) {
4289       ctx->so->inputs_count = ctx->s->num_inputs;
4290       compile_assert(ctx, ctx->so->inputs_count < ARRAY_SIZE(ctx->so->inputs));
4291       ctx->ninputs = ctx->s->num_inputs * 4;
4292       ctx->inputs = rzalloc_array(ctx, struct ir3_instruction *, ctx->ninputs);
4293    } else {
4294       ctx->ninputs = 0;
4295       ctx->so->inputs_count = 0;
4296    }
4297 
4298    if (uses_store_output(ctx->so)) {
4299       ctx->noutputs = ctx->s->num_outputs * 4;
4300       ctx->outputs =
4301          rzalloc_array(ctx, struct ir3_instruction *, ctx->noutputs);
4302    } else {
4303       ctx->noutputs = 0;
4304    }
4305 
4306    ctx->ir = ir3_create(ctx->compiler, ctx->so);
4307 
4308    /* Create inputs in first block: */
4309    ctx->block = get_block(ctx, nir_start_block(fxn));
4310    ctx->in_block = ctx->block;
4311 
4312    /* for fragment shader, the vcoord input register is used as the
4313     * base for bary.f varying fetch instrs:
4314     *
4315     * TODO defer creating ctx->ij_pixel and corresponding sysvals
4316     * until emit_intrinsic when we know they are actually needed.
4317     * For now, we defer creating ctx->ij_centroid, etc, since we
4318     * only need ij_pixel for "old style" varying inputs (ie.
4319     * tgsi_to_nir)
4320     */
4321    if (ctx->so->type == MESA_SHADER_FRAGMENT) {
4322       ctx->ij[IJ_PERSP_PIXEL] = create_input(ctx, 0x3);
4323    }
4324 
4325    /* Defer add_sysval_input() stuff until after setup_inputs(),
4326     * because sysvals need to be appended after varyings:
4327     */
4328    if (ctx->ij[IJ_PERSP_PIXEL]) {
4329       add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL, 0x3,
4330                                 ctx->ij[IJ_PERSP_PIXEL]);
4331    }
4332 
4333    /* Tesselation shaders always need primitive ID for indexing the
4334     * BO. Geometry shaders don't always need it but when they do it has be
4335     * delivered and unclobbered in the VS. To make things easy, we always
4336     * make room for it in VS/DS.
4337     */
4338    bool has_tess = ctx->so->key.tessellation != IR3_TESS_NONE;
4339    bool has_gs = ctx->so->key.has_gs;
4340    switch (ctx->so->type) {
4341    case MESA_SHADER_VERTEX:
4342       if (has_tess) {
4343          ctx->tcs_header =
4344             create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1);
4345          ctx->rel_patch_id =
4346             create_sysval_input(ctx, SYSTEM_VALUE_REL_PATCH_ID_IR3, 0x1);
4347          ctx->primitive_id =
4348             create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
4349       } else if (has_gs) {
4350          ctx->gs_header =
4351             create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1);
4352          ctx->primitive_id =
4353             create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
4354       }
4355       break;
4356    case MESA_SHADER_TESS_CTRL:
4357       ctx->tcs_header =
4358          create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1);
4359       ctx->rel_patch_id =
4360          create_sysval_input(ctx, SYSTEM_VALUE_REL_PATCH_ID_IR3, 0x1);
4361       break;
4362    case MESA_SHADER_TESS_EVAL:
4363       if (has_gs) {
4364          ctx->gs_header =
4365             create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1);
4366          ctx->primitive_id =
4367             create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
4368       }
4369       ctx->rel_patch_id =
4370          create_sysval_input(ctx, SYSTEM_VALUE_REL_PATCH_ID_IR3, 0x1);
4371       break;
4372    case MESA_SHADER_GEOMETRY:
4373       ctx->gs_header =
4374          create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1);
4375       break;
4376    default:
4377       break;
4378    }
4379 
4380    /* Find # of samplers. Just assume that we'll be reading from images.. if
4381     * it is write-only we don't have to count it, but after lowering derefs
4382     * is too late to compact indices for that.
4383     */
4384    ctx->so->num_samp =
4385       BITSET_LAST_BIT(ctx->s->info.textures_used) + ctx->s->info.num_images;
4386 
4387    /* Save off clip+cull information. */
4388    ctx->so->clip_mask = MASK(ctx->s->info.clip_distance_array_size);
4389    ctx->so->cull_mask = MASK(ctx->s->info.cull_distance_array_size)
4390                         << ctx->s->info.clip_distance_array_size;
4391 
4392    ctx->so->pvtmem_size = ctx->s->scratch_size;
4393    ctx->so->shared_size = ctx->s->info.shared_size;
4394 
4395    /* NOTE: need to do something more clever when we support >1 fxn */
4396    nir_foreach_register (reg, &fxn->registers) {
4397       ir3_declare_array(ctx, reg);
4398    }
4399 
4400    if (ctx->so->type == MESA_SHADER_TESS_CTRL &&
4401        ctx->compiler->tess_use_shared) {
4402       struct ir3_instruction *barrier = ir3_BAR(ctx->block);
4403       barrier->flags = IR3_INSTR_SS | IR3_INSTR_SY;
4404       barrier->barrier_class = IR3_BARRIER_EVERYTHING;
4405       array_insert(ctx->block, ctx->block->keeps, barrier);
4406       ctx->so->has_barrier = true;
4407    }
4408 
4409    /* And emit the body: */
4410    ctx->impl = fxn;
4411    emit_function(ctx, fxn);
4412 }
4413 
4414 /* Fixup tex sampler state for astc/srgb workaround instructions.  We
4415  * need to assign the tex state indexes for these after we know the
4416  * max tex index.
4417  */
4418 static void
fixup_astc_srgb(struct ir3_context * ctx)4419 fixup_astc_srgb(struct ir3_context *ctx)
4420 {
4421    struct ir3_shader_variant *so = ctx->so;
4422    /* indexed by original tex idx, value is newly assigned alpha sampler
4423     * state tex idx.  Zero is invalid since there is at least one sampler
4424     * if we get here.
4425     */
4426    unsigned alt_tex_state[16] = {0};
4427    unsigned tex_idx = ctx->max_texture_index + 1;
4428    unsigned idx = 0;
4429 
4430    so->astc_srgb.base = tex_idx;
4431 
4432    for (unsigned i = 0; i < ctx->ir->astc_srgb_count; i++) {
4433       struct ir3_instruction *sam = ctx->ir->astc_srgb[i];
4434 
4435       compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
4436 
4437       if (alt_tex_state[sam->cat5.tex] == 0) {
4438          /* assign new alternate/alpha tex state slot: */
4439          alt_tex_state[sam->cat5.tex] = tex_idx++;
4440          so->astc_srgb.orig_idx[idx++] = sam->cat5.tex;
4441          so->astc_srgb.count++;
4442       }
4443 
4444       sam->cat5.tex = alt_tex_state[sam->cat5.tex];
4445    }
4446 }
4447 
4448 /* Fixup tex sampler state for tg4 workaround instructions.  We
4449  * need to assign the tex state indexes for these after we know the
4450  * max tex index.
4451  */
4452 static void
fixup_tg4(struct ir3_context * ctx)4453 fixup_tg4(struct ir3_context *ctx)
4454 {
4455    struct ir3_shader_variant *so = ctx->so;
4456    /* indexed by original tex idx, value is newly assigned alpha sampler
4457     * state tex idx.  Zero is invalid since there is at least one sampler
4458     * if we get here.
4459     */
4460    unsigned alt_tex_state[16] = {0};
4461    unsigned tex_idx = ctx->max_texture_index + so->astc_srgb.count + 1;
4462    unsigned idx = 0;
4463 
4464    so->tg4.base = tex_idx;
4465 
4466    for (unsigned i = 0; i < ctx->ir->tg4_count; i++) {
4467       struct ir3_instruction *sam = ctx->ir->tg4[i];
4468 
4469       compile_assert(ctx, sam->cat5.tex < ARRAY_SIZE(alt_tex_state));
4470 
4471       if (alt_tex_state[sam->cat5.tex] == 0) {
4472          /* assign new alternate/alpha tex state slot: */
4473          alt_tex_state[sam->cat5.tex] = tex_idx++;
4474          so->tg4.orig_idx[idx++] = sam->cat5.tex;
4475          so->tg4.count++;
4476       }
4477 
4478       sam->cat5.tex = alt_tex_state[sam->cat5.tex];
4479    }
4480 }
4481 
4482 static bool
output_slot_used_for_binning(gl_varying_slot slot)4483 output_slot_used_for_binning(gl_varying_slot slot)
4484 {
4485    return slot == VARYING_SLOT_POS || slot == VARYING_SLOT_PSIZ ||
4486           slot == VARYING_SLOT_CLIP_DIST0 || slot == VARYING_SLOT_CLIP_DIST1 ||
4487           slot == VARYING_SLOT_VIEWPORT;
4488 }
4489 
4490 static struct ir3_instruction *
find_end(struct ir3 * ir)4491 find_end(struct ir3 *ir)
4492 {
4493    foreach_block_rev (block, &ir->block_list) {
4494       foreach_instr_rev (instr, &block->instr_list) {
4495          if (instr->opc == OPC_END || instr->opc == OPC_CHMASK)
4496             return instr;
4497       }
4498    }
4499    unreachable("couldn't find end instruction");
4500 }
4501 
4502 static void
fixup_binning_pass(struct ir3_context * ctx,struct ir3_instruction * end)4503 fixup_binning_pass(struct ir3_context *ctx, struct ir3_instruction *end)
4504 {
4505    struct ir3_shader_variant *so = ctx->so;
4506    unsigned i, j;
4507 
4508    /* first pass, remove unused outputs from the IR level outputs: */
4509    for (i = 0, j = 0; i < end->srcs_count; i++) {
4510       unsigned outidx = end->end.outidxs[i];
4511       unsigned slot = so->outputs[outidx].slot;
4512 
4513       if (output_slot_used_for_binning(slot)) {
4514          end->srcs[j] = end->srcs[i];
4515          end->end.outidxs[j] = end->end.outidxs[i];
4516          j++;
4517       }
4518    }
4519    end->srcs_count = j;
4520 
4521    /* second pass, cleanup the unused slots in ir3_shader_variant::outputs
4522     * table:
4523     */
4524    for (i = 0, j = 0; i < so->outputs_count; i++) {
4525       unsigned slot = so->outputs[i].slot;
4526 
4527       if (output_slot_used_for_binning(slot)) {
4528          so->outputs[j] = so->outputs[i];
4529 
4530          /* fixup outidx to point to new output table entry: */
4531          for (unsigned k = 0; k < end->srcs_count; k++) {
4532             if (end->end.outidxs[k] == i) {
4533                end->end.outidxs[k] = j;
4534                break;
4535             }
4536          }
4537 
4538          j++;
4539       }
4540    }
4541    so->outputs_count = j;
4542 }
4543 
4544 static void
collect_tex_prefetches(struct ir3_context * ctx,struct ir3 * ir)4545 collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir)
4546 {
4547    unsigned idx = 0;
4548 
4549    /* Collect sampling instructions eligible for pre-dispatch. */
4550    foreach_block (block, &ir->block_list) {
4551       foreach_instr_safe (instr, &block->instr_list) {
4552          if (instr->opc == OPC_META_TEX_PREFETCH) {
4553             assert(idx < ARRAY_SIZE(ctx->so->sampler_prefetch));
4554             struct ir3_sampler_prefetch *fetch =
4555                &ctx->so->sampler_prefetch[idx];
4556             idx++;
4557 
4558             if (instr->flags & IR3_INSTR_B) {
4559                fetch->cmd = IR3_SAMPLER_BINDLESS_PREFETCH_CMD;
4560                /* In bindless mode, the index is actually the base */
4561                fetch->tex_id = instr->prefetch.tex_base;
4562                fetch->samp_id = instr->prefetch.samp_base;
4563                fetch->tex_bindless_id = instr->prefetch.tex;
4564                fetch->samp_bindless_id = instr->prefetch.samp;
4565             } else {
4566                fetch->cmd = IR3_SAMPLER_PREFETCH_CMD;
4567                fetch->tex_id = instr->prefetch.tex;
4568                fetch->samp_id = instr->prefetch.samp;
4569             }
4570             fetch->wrmask = instr->dsts[0]->wrmask;
4571             fetch->dst = instr->dsts[0]->num;
4572             fetch->src = instr->prefetch.input_offset;
4573 
4574             /* These are the limits on a5xx/a6xx, we might need to
4575              * revisit if SP_FS_PREFETCH[n] changes on later gens:
4576              */
4577             assert(fetch->dst <= 0x3f);
4578             assert(fetch->tex_id <= 0x1f);
4579             assert(fetch->samp_id <= 0xf);
4580 
4581             ctx->so->total_in =
4582                MAX2(ctx->so->total_in, instr->prefetch.input_offset + 2);
4583 
4584             fetch->half_precision = !!(instr->dsts[0]->flags & IR3_REG_HALF);
4585 
4586             /* Remove the prefetch placeholder instruction: */
4587             list_delinit(&instr->node);
4588          }
4589       }
4590    }
4591 }
4592 
4593 int
ir3_compile_shader_nir(struct ir3_compiler * compiler,struct ir3_shader_variant * so)4594 ir3_compile_shader_nir(struct ir3_compiler *compiler,
4595                        struct ir3_shader_variant *so)
4596 {
4597    struct ir3_context *ctx;
4598    struct ir3 *ir;
4599    int ret = 0, max_bary;
4600    bool progress;
4601 
4602    assert(!so->ir);
4603 
4604    ctx = ir3_context_init(compiler, so);
4605    if (!ctx) {
4606       DBG("INIT failed!");
4607       ret = -1;
4608       goto out;
4609    }
4610 
4611    emit_instructions(ctx);
4612 
4613    if (ctx->error) {
4614       DBG("EMIT failed!");
4615       ret = -1;
4616       goto out;
4617    }
4618 
4619    ir = so->ir = ctx->ir;
4620 
4621    if (gl_shader_stage_is_compute(so->type)) {
4622       so->local_size[0] = ctx->s->info.workgroup_size[0];
4623       so->local_size[1] = ctx->s->info.workgroup_size[1];
4624       so->local_size[2] = ctx->s->info.workgroup_size[2];
4625       so->local_size_variable = ctx->s->info.workgroup_size_variable;
4626    }
4627 
4628    /* Vertex shaders in a tessellation or geometry pipeline treat END as a
4629     * NOP and has an epilogue that writes the VS outputs to local storage, to
4630     * be read by the HS.  Then it resets execution mask (chmask) and chains
4631     * to the next shader (chsh). There are also a few output values which we
4632     * must send to the next stage via registers, and in order for both stages
4633     * to agree on the register used we must force these to be in specific
4634     * registers.
4635     */
4636    if ((so->type == MESA_SHADER_VERTEX &&
4637         (so->key.has_gs || so->key.tessellation)) ||
4638        (so->type == MESA_SHADER_TESS_EVAL && so->key.has_gs)) {
4639       struct ir3_instruction *outputs[3];
4640       unsigned outidxs[3];
4641       unsigned regids[3];
4642       unsigned outputs_count = 0;
4643 
4644       if (ctx->primitive_id) {
4645          unsigned n = so->outputs_count++;
4646          so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID;
4647 
4648          struct ir3_instruction *out = ir3_collect(ctx->block, ctx->primitive_id);
4649          outputs[outputs_count] = out;
4650          outidxs[outputs_count] = n;
4651          if (so->type == MESA_SHADER_VERTEX && ctx->rel_patch_id)
4652             regids[outputs_count] = regid(0, 2);
4653          else
4654             regids[outputs_count] = regid(0, 1);
4655          outputs_count++;
4656       }
4657 
4658       if (so->type == MESA_SHADER_VERTEX && ctx->rel_patch_id) {
4659          unsigned n = so->outputs_count++;
4660          so->outputs[n].slot = VARYING_SLOT_REL_PATCH_ID_IR3;
4661          struct ir3_instruction *out = ir3_collect(ctx->block, ctx->rel_patch_id);
4662          outputs[outputs_count] = out;
4663          outidxs[outputs_count] = n;
4664          regids[outputs_count] = regid(0, 1);
4665          outputs_count++;
4666       }
4667 
4668       if (ctx->gs_header) {
4669          unsigned n = so->outputs_count++;
4670          so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3;
4671          struct ir3_instruction *out = ir3_collect(ctx->block, ctx->gs_header);
4672          outputs[outputs_count] = out;
4673          outidxs[outputs_count] = n;
4674          regids[outputs_count] = regid(0, 0);
4675          outputs_count++;
4676       }
4677 
4678       if (ctx->tcs_header) {
4679          unsigned n = so->outputs_count++;
4680          so->outputs[n].slot = VARYING_SLOT_TCS_HEADER_IR3;
4681          struct ir3_instruction *out = ir3_collect(ctx->block, ctx->tcs_header);
4682          outputs[outputs_count] = out;
4683          outidxs[outputs_count] = n;
4684          regids[outputs_count] = regid(0, 0);
4685          outputs_count++;
4686       }
4687 
4688       struct ir3_instruction *chmask =
4689          ir3_instr_create(ctx->block, OPC_CHMASK, 0, outputs_count);
4690       chmask->barrier_class = IR3_BARRIER_EVERYTHING;
4691       chmask->barrier_conflict = IR3_BARRIER_EVERYTHING;
4692 
4693       for (unsigned i = 0; i < outputs_count; i++)
4694          __ssa_src(chmask, outputs[i], 0)->num = regids[i];
4695 
4696       chmask->end.outidxs = ralloc_array(chmask, unsigned, outputs_count);
4697       memcpy(chmask->end.outidxs, outidxs, sizeof(unsigned) * outputs_count);
4698 
4699       array_insert(ctx->block, ctx->block->keeps, chmask);
4700 
4701       struct ir3_instruction *chsh = ir3_CHSH(ctx->block);
4702       chsh->barrier_class = IR3_BARRIER_EVERYTHING;
4703       chsh->barrier_conflict = IR3_BARRIER_EVERYTHING;
4704    } else {
4705       assert((ctx->noutputs % 4) == 0);
4706       unsigned outidxs[ctx->noutputs / 4];
4707       struct ir3_instruction *outputs[ctx->noutputs / 4];
4708       unsigned outputs_count = 0;
4709 
4710       struct ir3_block *b = ctx->block;
4711       /* Insert these collect's in the block before the end-block if
4712        * possible, so that any moves they generate can be shuffled around to
4713        * reduce nop's:
4714        */
4715       if (ctx->block->predecessors_count == 1)
4716          b = ctx->block->predecessors[0];
4717 
4718       /* Setup IR level outputs, which are "collects" that gather
4719        * the scalar components of outputs.
4720        */
4721       for (unsigned i = 0; i < ctx->noutputs; i += 4) {
4722          unsigned ncomp = 0;
4723          /* figure out the # of components written:
4724           *
4725           * TODO do we need to handle holes, ie. if .x and .z
4726           * components written, but .y component not written?
4727           */
4728          for (unsigned j = 0; j < 4; j++) {
4729             if (!ctx->outputs[i + j])
4730                break;
4731             ncomp++;
4732          }
4733 
4734          /* Note that in some stages, like TCS, store_output is
4735           * lowered to memory writes, so no components of the
4736           * are "written" from the PoV of traditional store-
4737           * output instructions:
4738           */
4739          if (!ncomp)
4740             continue;
4741 
4742          struct ir3_instruction *out =
4743             ir3_create_collect(b, &ctx->outputs[i], ncomp);
4744 
4745          int outidx = i / 4;
4746          assert(outidx < so->outputs_count);
4747 
4748          outidxs[outputs_count] = outidx;
4749          outputs[outputs_count] = out;
4750          outputs_count++;
4751       }
4752 
4753       /* for a6xx+, binning and draw pass VS use same VBO state, so we
4754        * need to make sure not to remove any inputs that are used by
4755        * the nonbinning VS.
4756        */
4757       if (ctx->compiler->gen >= 6 && so->binning_pass &&
4758           so->type == MESA_SHADER_VERTEX) {
4759          for (int i = 0; i < ctx->ninputs; i++) {
4760             struct ir3_instruction *in = ctx->inputs[i];
4761 
4762             if (!in)
4763                continue;
4764 
4765             unsigned n = i / 4;
4766             unsigned c = i % 4;
4767 
4768             debug_assert(n < so->nonbinning->inputs_count);
4769 
4770             if (so->nonbinning->inputs[n].sysval)
4771                continue;
4772 
4773             /* be sure to keep inputs, even if only used in VS */
4774             if (so->nonbinning->inputs[n].compmask & (1 << c))
4775                array_insert(in->block, in->block->keeps, in);
4776          }
4777       }
4778 
4779       struct ir3_instruction *end =
4780          ir3_instr_create(ctx->block, OPC_END, 0, outputs_count);
4781 
4782       for (unsigned i = 0; i < outputs_count; i++) {
4783          __ssa_src(end, outputs[i], 0);
4784       }
4785 
4786       end->end.outidxs = ralloc_array(end, unsigned, outputs_count);
4787       memcpy(end->end.outidxs, outidxs, sizeof(unsigned) * outputs_count);
4788 
4789       array_insert(ctx->block, ctx->block->keeps, end);
4790 
4791       /* at this point, for binning pass, throw away unneeded outputs: */
4792       if (so->binning_pass && (ctx->compiler->gen < 6))
4793          fixup_binning_pass(ctx, end);
4794    }
4795 
4796    ir3_debug_print(ir, "AFTER: nir->ir3");
4797    ir3_validate(ir);
4798 
4799    IR3_PASS(ir, ir3_remove_unreachable);
4800 
4801    IR3_PASS(ir, ir3_array_to_ssa);
4802 
4803    do {
4804       progress = false;
4805 
4806       /* the folding doesn't seem to work reliably on a4xx */
4807       if (ctx->compiler->gen != 4)
4808          progress |= IR3_PASS(ir, ir3_cf);
4809       progress |= IR3_PASS(ir, ir3_cp, so);
4810       progress |= IR3_PASS(ir, ir3_cse);
4811       progress |= IR3_PASS(ir, ir3_dce, so);
4812    } while (progress);
4813 
4814    /* at this point, for binning pass, throw away unneeded outputs:
4815     * Note that for a6xx and later, we do this after ir3_cp to ensure
4816     * that the uniform/constant layout for BS and VS matches, so that
4817     * we can re-use same VS_CONST state group.
4818     */
4819    if (so->binning_pass && (ctx->compiler->gen >= 6)) {
4820       fixup_binning_pass(ctx, find_end(ctx->so->ir));
4821       /* cleanup the result of removing unneeded outputs: */
4822       while (IR3_PASS(ir, ir3_dce, so)) {
4823       }
4824    }
4825 
4826    IR3_PASS(ir, ir3_sched_add_deps);
4827 
4828    /* At this point, all the dead code should be long gone: */
4829    assert(!IR3_PASS(ir, ir3_dce, so));
4830 
4831    ret = ir3_sched(ir);
4832    if (ret) {
4833       DBG("SCHED failed!");
4834       goto out;
4835    }
4836 
4837    ir3_debug_print(ir, "AFTER: ir3_sched");
4838 
4839    if (IR3_PASS(ir, ir3_cp_postsched)) {
4840       /* cleanup the result of removing unneeded mov's: */
4841       while (IR3_PASS(ir, ir3_dce, so)) {
4842       }
4843    }
4844 
4845    /* Pre-assign VS inputs on a6xx+ binning pass shader, to align
4846     * with draw pass VS, so binning and draw pass can both use the
4847     * same VBO state.
4848     *
4849     * Note that VS inputs are expected to be full precision.
4850     */
4851    bool pre_assign_inputs = (ir->compiler->gen >= 6) &&
4852                             (ir->type == MESA_SHADER_VERTEX) &&
4853                             so->binning_pass;
4854 
4855    if (pre_assign_inputs) {
4856       foreach_input (in, ir) {
4857          assert(in->opc == OPC_META_INPUT);
4858          unsigned inidx = in->input.inidx;
4859 
4860          in->dsts[0]->num = so->nonbinning->inputs[inidx].regid;
4861       }
4862    } else if (ctx->tcs_header) {
4863       /* We need to have these values in the same registers between VS and TCS
4864        * since the VS chains to TCS and doesn't get the sysvals redelivered.
4865        */
4866 
4867       ctx->tcs_header->dsts[0]->num = regid(0, 0);
4868       ctx->rel_patch_id->dsts[0]->num = regid(0, 1);
4869       if (ctx->primitive_id)
4870          ctx->primitive_id->dsts[0]->num = regid(0, 2);
4871    } else if (ctx->gs_header) {
4872       /* We need to have these values in the same registers between producer
4873        * (VS or DS) and GS since the producer chains to GS and doesn't get
4874        * the sysvals redelivered.
4875        */
4876 
4877       ctx->gs_header->dsts[0]->num = regid(0, 0);
4878       if (ctx->primitive_id)
4879          ctx->primitive_id->dsts[0]->num = regid(0, 1);
4880    } else if (so->num_sampler_prefetch) {
4881       assert(so->type == MESA_SHADER_FRAGMENT);
4882       int idx = 0;
4883 
4884       foreach_input (instr, ir) {
4885          if (instr->input.sysval != SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL)
4886             continue;
4887 
4888          assert(idx < 2);
4889          instr->dsts[0]->num = idx;
4890          idx++;
4891       }
4892    }
4893 
4894    ret = ir3_ra(so);
4895 
4896    if (ret) {
4897       mesa_loge("ir3_ra() failed!");
4898       goto out;
4899    }
4900 
4901    IR3_PASS(ir, ir3_postsched, so);
4902 
4903    IR3_PASS(ir, ir3_lower_subgroups);
4904 
4905    if (so->type == MESA_SHADER_FRAGMENT)
4906       pack_inlocs(ctx);
4907 
4908    /*
4909     * Fixup inputs/outputs to point to the actual registers assigned:
4910     *
4911     * 1) initialize to r63.x (invalid/unused)
4912     * 2) iterate IR level inputs/outputs and update the variants
4913     *    inputs/outputs table based on the assigned registers for
4914     *    the remaining inputs/outputs.
4915     */
4916 
4917    for (unsigned i = 0; i < so->inputs_count; i++)
4918       so->inputs[i].regid = INVALID_REG;
4919    for (unsigned i = 0; i < so->outputs_count; i++)
4920       so->outputs[i].regid = INVALID_REG;
4921 
4922    struct ir3_instruction *end = find_end(so->ir);
4923 
4924    for (unsigned i = 0; i < end->srcs_count; i++) {
4925       unsigned outidx = end->end.outidxs[i];
4926       struct ir3_register *reg = end->srcs[i];
4927 
4928       so->outputs[outidx].regid = reg->num;
4929       so->outputs[outidx].half = !!(reg->flags & IR3_REG_HALF);
4930    }
4931 
4932    foreach_input (in, ir) {
4933       assert(in->opc == OPC_META_INPUT);
4934       unsigned inidx = in->input.inidx;
4935 
4936       if (pre_assign_inputs && !so->inputs[inidx].sysval) {
4937          if (VALIDREG(so->nonbinning->inputs[inidx].regid)) {
4938             compile_assert(
4939                ctx, in->dsts[0]->num == so->nonbinning->inputs[inidx].regid);
4940             compile_assert(ctx, !!(in->dsts[0]->flags & IR3_REG_HALF) ==
4941                                    so->nonbinning->inputs[inidx].half);
4942          }
4943          so->inputs[inidx].regid = so->nonbinning->inputs[inidx].regid;
4944          so->inputs[inidx].half = so->nonbinning->inputs[inidx].half;
4945       } else {
4946          so->inputs[inidx].regid = in->dsts[0]->num;
4947          so->inputs[inidx].half = !!(in->dsts[0]->flags & IR3_REG_HALF);
4948       }
4949    }
4950 
4951    if (ctx->astc_srgb)
4952       fixup_astc_srgb(ctx);
4953 
4954    if (ctx->compiler->gen == 4 && ctx->s->info.uses_texture_gather)
4955       fixup_tg4(ctx);
4956 
4957    /* We need to do legalize after (for frag shader's) the "bary.f"
4958     * offsets (inloc) have been assigned.
4959     */
4960    IR3_PASS(ir, ir3_legalize, so, &max_bary);
4961 
4962    /* Set (ss)(sy) on first TCS and GEOMETRY instructions, since we don't
4963     * know what we might have to wait on when coming in from VS chsh.
4964     */
4965    if (so->type == MESA_SHADER_TESS_CTRL || so->type == MESA_SHADER_GEOMETRY) {
4966       foreach_block (block, &ir->block_list) {
4967          foreach_instr (instr, &block->instr_list) {
4968             instr->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
4969             break;
4970          }
4971       }
4972    }
4973 
4974    so->branchstack = ctx->max_stack;
4975 
4976    /* Note that max_bary counts inputs that are not bary.f'd for FS: */
4977    if (so->type == MESA_SHADER_FRAGMENT)
4978       so->total_in = max_bary + 1;
4979 
4980    /* Collect sampling instructions eligible for pre-dispatch. */
4981    collect_tex_prefetches(ctx, ir);
4982 
4983    if (so->type == MESA_SHADER_FRAGMENT &&
4984        ctx->s->info.fs.needs_quad_helper_invocations)
4985       so->need_pixlod = true;
4986 
4987 out:
4988    if (ret) {
4989       if (so->ir)
4990          ir3_destroy(so->ir);
4991       so->ir = NULL;
4992    }
4993    ir3_context_free(ctx);
4994 
4995    return ret;
4996 }
4997