1 /*
2  * Copyright © 2016-2018 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "v3d_compiler.h"
25 
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32 
33 static inline void
vir_TMU_WRITE(struct v3d_compile * c,enum v3d_qpu_waddr waddr,struct qreg val)34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val)
35 {
36         /* XXX perf: We should figure out how to merge ALU operations
37          * producing the val with this MOV, when possible.
38          */
39         vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
40 }
41 
42 static inline void
vir_TMU_WRITE_or_count(struct v3d_compile * c,enum v3d_qpu_waddr waddr,struct qreg val,uint32_t * tmu_writes)43 vir_TMU_WRITE_or_count(struct v3d_compile *c,
44                        enum v3d_qpu_waddr waddr,
45                        struct qreg val,
46                        uint32_t *tmu_writes)
47 {
48         if (tmu_writes)
49                 (*tmu_writes)++;
50         else
51                 vir_TMU_WRITE(c, waddr, val);
52 }
53 
54 static void
vir_WRTMUC(struct v3d_compile * c,enum quniform_contents contents,uint32_t data)55 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
56 {
57         struct qinst *inst = vir_NOP(c);
58         inst->qpu.sig.wrtmuc = true;
59         inst->uniform = vir_get_uniform_index(c, contents, data);
60 }
61 
62 static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
63         .per_pixel_mask_enable = true,
64 };
65 
66 static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
67         .op = V3D_TMU_OP_REGULAR,
68 };
69 
70 /**
71  * If 'tmu_writes' is not NULL, then it just counts required register writes,
72  * otherwise, it emits the actual register writes.
73  *
74  * It is important to notice that emitting register writes for the current
75  * TMU operation may trigger a TMU flush, since it is possible that any
76  * of the inputs required for the register writes is the result of a pending
77  * TMU operation. If that happens we need to make sure that it doesn't happen
78  * in the middle of the TMU register writes for the current TMU operation,
79  * which is why we always call ntq_get_src() even if we are only interested in
80  * register write counts.
81  */
82 static void
handle_tex_src(struct v3d_compile * c,nir_tex_instr * instr,unsigned src_idx,unsigned non_array_components,struct V3D41_TMU_CONFIG_PARAMETER_2 * p2_unpacked,struct qreg * s_out,unsigned * tmu_writes)83 handle_tex_src(struct v3d_compile *c,
84                nir_tex_instr *instr,
85                unsigned src_idx,
86                unsigned non_array_components,
87                struct V3D41_TMU_CONFIG_PARAMETER_2 *p2_unpacked,
88                struct qreg *s_out,
89                unsigned *tmu_writes)
90 {
91         /* Either we are calling this just to count required TMU writes, or we
92          * are calling this to emit the actual TMU writes.
93          */
94         assert(tmu_writes || (s_out && p2_unpacked));
95 
96         struct qreg s;
97         switch (instr->src[src_idx].src_type) {
98         case nir_tex_src_coord:
99                 /* S triggers the lookup, so save it for the end. */
100                 s = ntq_get_src(c, instr->src[src_idx].src, 0);
101                 if (tmu_writes)
102                         (*tmu_writes)++;
103                 else
104                         *s_out = s;
105 
106                 if (non_array_components > 1) {
107                         struct qreg src =
108                                 ntq_get_src(c, instr->src[src_idx].src, 1);
109                         vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src,
110                                                 tmu_writes);
111                 }
112 
113                 if (non_array_components > 2) {
114                         struct qreg src =
115                                 ntq_get_src(c, instr->src[src_idx].src, 2);
116                         vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUR, src,
117                                                tmu_writes);
118                 }
119 
120                 if (instr->is_array) {
121                         struct qreg src =
122                                 ntq_get_src(c, instr->src[src_idx].src,
123                                             instr->coord_components - 1);
124                         vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUI, src,
125                                                tmu_writes);
126                 }
127                 break;
128 
129         case nir_tex_src_bias: {
130                 struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
131                 vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUB, src, tmu_writes);
132                 break;
133         }
134 
135         case nir_tex_src_lod: {
136                 struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
137                 vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUB, src, tmu_writes);
138                 if (!tmu_writes) {
139                         /* With texel fetch automatic LOD is already disabled,
140                          * and disable_autolod must not be enabled. For
141                          * non-cubes we can use the register TMUSLOD, that
142                          * implicitly sets disable_autolod.
143                          */
144                         assert(p2_unpacked);
145                         if (instr->op != nir_texop_txf &&
146                             instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
147                                     p2_unpacked->disable_autolod = true;
148                         }
149                }
150                break;
151         }
152 
153         case nir_tex_src_comparator: {
154                 struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
155                 vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUDREF, src, tmu_writes);
156                 break;
157         }
158 
159         case nir_tex_src_offset: {
160                 bool is_const_offset = nir_src_is_const(instr->src[src_idx].src);
161                 if (is_const_offset) {
162                         if (!tmu_writes) {
163                                 p2_unpacked->offset_s =
164                                         nir_src_comp_as_int(instr->src[src_idx].src, 0);
165                                 if (non_array_components >= 2)
166                                         p2_unpacked->offset_t =
167                                                 nir_src_comp_as_int(instr->src[src_idx].src, 1);
168                                 if (non_array_components >= 3)
169                                         p2_unpacked->offset_r =
170                                                 nir_src_comp_as_int(instr->src[src_idx].src, 2);
171                         }
172                 } else {
173                         struct qreg src_0 =
174                                 ntq_get_src(c, instr->src[src_idx].src, 0);
175                         struct qreg src_1 =
176                                 ntq_get_src(c, instr->src[src_idx].src, 1);
177                         if (!tmu_writes) {
178                                 struct qreg mask = vir_uniform_ui(c, 0xf);
179                                 struct qreg x, y, offset;
180 
181                                 x = vir_AND(c, src_0, mask);
182                                 y = vir_AND(c, src_1, mask);
183                                 offset = vir_OR(c, x,
184                                                 vir_SHL(c, y, vir_uniform_ui(c, 4)));
185 
186                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF, offset);
187                         } else {
188                                 (*tmu_writes)++;
189                         }
190                 }
191                 break;
192         }
193 
194         default:
195                 unreachable("unknown texture source");
196         }
197 }
198 
199 static void
vir_tex_handle_srcs(struct v3d_compile * c,nir_tex_instr * instr,struct V3D41_TMU_CONFIG_PARAMETER_2 * p2_unpacked,struct qreg * s,unsigned * tmu_writes)200 vir_tex_handle_srcs(struct v3d_compile *c,
201                     nir_tex_instr *instr,
202                     struct V3D41_TMU_CONFIG_PARAMETER_2 *p2_unpacked,
203                     struct qreg *s,
204                     unsigned *tmu_writes)
205 {
206         unsigned non_array_components = instr->op != nir_texop_lod ?
207                 instr->coord_components - instr->is_array :
208                 instr->coord_components;
209 
210         for (unsigned i = 0; i < instr->num_srcs; i++) {
211                 handle_tex_src(c, instr, i, non_array_components,
212                                p2_unpacked, s, tmu_writes);
213         }
214 }
215 
216 static unsigned
get_required_tex_tmu_writes(struct v3d_compile * c,nir_tex_instr * instr)217 get_required_tex_tmu_writes(struct v3d_compile *c, nir_tex_instr *instr)
218 {
219         unsigned tmu_writes = 0;
220         vir_tex_handle_srcs(c, instr, NULL, NULL, &tmu_writes);
221         return tmu_writes;
222 }
223 
224 void
v3d40_vir_emit_tex(struct v3d_compile * c,nir_tex_instr * instr)225 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
226 {
227         assert(instr->op != nir_texop_lod || c->devinfo->ver >= 42);
228 
229         unsigned texture_idx = instr->texture_index;
230         unsigned sampler_idx = instr->sampler_index;
231 
232         struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
233         };
234 
235         /* Limit the number of channels returned to both how many the NIR
236          * instruction writes and how many the instruction could produce.
237          */
238         p0_unpacked.return_words_of_texture_data =
239                 instr->dest.is_ssa ?
240                 nir_ssa_def_components_read(&instr->dest.ssa) :
241                 (1 << instr->dest.reg.reg->num_components) - 1;
242         assert(p0_unpacked.return_words_of_texture_data != 0);
243 
244         struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
245                 .op = V3D_TMU_OP_REGULAR,
246                 .gather_mode = instr->op == nir_texop_tg4,
247                 .gather_component = instr->component,
248                 .coefficient_mode = instr->op == nir_texop_txd,
249                 .disable_autolod = instr->op == nir_texop_tg4
250         };
251 
252         const unsigned tmu_writes = get_required_tex_tmu_writes(c, instr);
253 
254         /* The input FIFO has 16 slots across all threads so if we require
255          * more than that we need to lower thread count.
256          */
257         while (tmu_writes > 16 / c->threads)
258                 c->threads /= 2;
259 
260        /* If pipelining this TMU operation would overflow TMU fifos, we need
261         * to flush any outstanding TMU operations.
262         */
263         const unsigned dest_components =
264            util_bitcount(p0_unpacked.return_words_of_texture_data);
265         if (ntq_tmu_fifo_overflow(c, dest_components))
266                 ntq_flush_tmu(c);
267 
268         /* Process tex sources emitting corresponding TMU writes */
269         struct qreg s = { };
270         vir_tex_handle_srcs(c, instr, &p2_unpacked, &s, NULL);
271 
272         uint32_t p0_packed;
273         V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
274                                           (uint8_t *)&p0_packed,
275                                           &p0_unpacked);
276 
277         uint32_t p2_packed;
278         V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
279                                           (uint8_t *)&p2_packed,
280                                           &p2_unpacked);
281 
282         /* We manually set the LOD Query bit (see
283          * V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific
284          * feature over V41 we are using
285          */
286         if (instr->op == nir_texop_lod)
287            p2_packed |= 1UL << 24;
288 
289         /* Load texture_idx number into the high bits of the texture address field,
290          * which will be be used by the driver to decide which texture to put
291          * in the actual address field.
292          */
293         p0_packed |= texture_idx << 24;
294 
295         vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
296 
297         /* Even if the texture operation doesn't need a sampler by
298          * itself, we still need to add the sampler configuration
299          * parameter if the output is 32 bit
300          */
301         bool output_type_32_bit =
302                 c->key->sampler[sampler_idx].return_size == 32 &&
303                 !instr->is_shadow;
304 
305         /* p1 is optional, but we can skip it only if p2 can be skipped too */
306         bool needs_p2_config =
307                 (instr->op == nir_texop_lod ||
308                  memcmp(&p2_unpacked, &p2_unpacked_default,
309                         sizeof(p2_unpacked)) != 0);
310 
311         /* To handle the cases were we can't just use p1_unpacked_default */
312         bool non_default_p1_config = nir_tex_instr_need_sampler(instr) ||
313                 output_type_32_bit;
314 
315         if (non_default_p1_config) {
316                 struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
317                         .output_type_32_bit = output_type_32_bit,
318 
319                         .unnormalized_coordinates = (instr->sampler_dim ==
320                                                      GLSL_SAMPLER_DIM_RECT),
321                 };
322 
323                 /* Word enables can't ask for more channels than the
324                  * output type could provide (2 for f16, 4 for
325                  * 32-bit).
326                  */
327                 assert(!p1_unpacked.output_type_32_bit ||
328                        p0_unpacked.return_words_of_texture_data < (1 << 4));
329                 assert(p1_unpacked.output_type_32_bit ||
330                        p0_unpacked.return_words_of_texture_data < (1 << 2));
331 
332                 uint32_t p1_packed;
333                 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
334                                                   (uint8_t *)&p1_packed,
335                                                   &p1_unpacked);
336 
337                 if (nir_tex_instr_need_sampler(instr)) {
338                         /* Load sampler_idx number into the high bits of the
339                          * sampler address field, which will be be used by the
340                          * driver to decide which sampler to put in the actual
341                          * address field.
342                          */
343                         p1_packed |= sampler_idx << 24;
344 
345                         vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
346                 } else {
347                         /* In this case, we don't need to merge in any
348                          * sampler state from the API and can just use
349                          * our packed bits */
350                         vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
351                 }
352         } else if (needs_p2_config) {
353                 /* Configuration parameters need to be set up in
354                  * order, and if P2 is needed, you need to set up P1
355                  * too even if sampler info is not needed by the
356                  * texture operation. But we can set up default info,
357                  * and avoid asking the driver for the sampler state
358                  * address
359                  */
360                 uint32_t p1_packed_default;
361                 V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
362                                                   (uint8_t *)&p1_packed_default,
363                                                   &p1_unpacked_default);
364                 vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed_default);
365         }
366 
367         if (needs_p2_config)
368                 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
369 
370         /* Emit retiring TMU write */
371         if (instr->op == nir_texop_txf) {
372                 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
373                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s);
374         } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
375                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s);
376         } else if (instr->op == nir_texop_txl) {
377                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSLOD, s);
378         } else {
379                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s);
380         }
381 
382         ntq_add_pending_tmu_flush(c, &instr->dest,
383                                   p0_unpacked.return_words_of_texture_data);
384 }
385 
386 static uint32_t
v3d40_image_load_store_tmu_op(nir_intrinsic_instr * instr)387 v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
388 {
389         switch (instr->intrinsic) {
390         case nir_intrinsic_image_load:
391         case nir_intrinsic_image_store:
392                 return V3D_TMU_OP_REGULAR;
393         case nir_intrinsic_image_atomic_add:
394                 return v3d_get_op_for_atomic_add(instr, 3);
395         case nir_intrinsic_image_atomic_imin:
396                 return V3D_TMU_OP_WRITE_SMIN;
397         case nir_intrinsic_image_atomic_umin:
398                 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
399         case nir_intrinsic_image_atomic_imax:
400                 return V3D_TMU_OP_WRITE_SMAX;
401         case nir_intrinsic_image_atomic_umax:
402                 return V3D_TMU_OP_WRITE_UMAX;
403         case nir_intrinsic_image_atomic_and:
404                 return V3D_TMU_OP_WRITE_AND_READ_INC;
405         case nir_intrinsic_image_atomic_or:
406                 return V3D_TMU_OP_WRITE_OR_READ_DEC;
407         case nir_intrinsic_image_atomic_xor:
408                 return V3D_TMU_OP_WRITE_XOR_READ_NOT;
409         case nir_intrinsic_image_atomic_exchange:
410                 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
411         case nir_intrinsic_image_atomic_comp_swap:
412                 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
413         default:
414                 unreachable("unknown image intrinsic");
415         };
416 }
417 
418 /**
419  * If 'tmu_writes' is not NULL, then it just counts required register writes,
420  * otherwise, it emits the actual register writes.
421  *
422  * It is important to notice that emitting register writes for the current
423  * TMU operation may trigger a TMU flush, since it is possible that any
424  * of the inputs required for the register writes is the result of a pending
425  * TMU operation. If that happens we need to make sure that it doesn't happen
426  * in the middle of the TMU register writes for the current TMU operation,
427  * which is why we always call ntq_get_src() even if we are only interested in
428  * register write counts.
429  */
430 static void
vir_image_emit_register_writes(struct v3d_compile * c,nir_intrinsic_instr * instr,bool atomic_add_replaced,uint32_t * tmu_writes)431 vir_image_emit_register_writes(struct v3d_compile *c,
432                                nir_intrinsic_instr *instr,
433                                bool atomic_add_replaced,
434                                uint32_t *tmu_writes)
435 {
436         if (tmu_writes)
437                 *tmu_writes = 0;
438 
439         bool is_1d = false;
440         switch (nir_intrinsic_image_dim(instr)) {
441         case GLSL_SAMPLER_DIM_1D:
442                 is_1d = true;
443                 break;
444         case GLSL_SAMPLER_DIM_BUF:
445                 break;
446         case GLSL_SAMPLER_DIM_2D:
447         case GLSL_SAMPLER_DIM_RECT:
448         case GLSL_SAMPLER_DIM_CUBE: {
449                 struct qreg src = ntq_get_src(c, instr->src[1], 1);
450                 vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src, tmu_writes);
451                 break;
452         }
453         case GLSL_SAMPLER_DIM_3D: {
454                 struct qreg src_1_1 = ntq_get_src(c, instr->src[1], 1);
455                 struct qreg src_1_2 = ntq_get_src(c, instr->src[1], 2);
456                 vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src_1_1, tmu_writes);
457                 vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUR, src_1_2, tmu_writes);
458                 break;
459         }
460         default:
461                 unreachable("bad image sampler dim");
462         }
463 
464         /* In order to fetch on a cube map, we need to interpret it as
465          * 2D arrays, where the third coord would be the face index.
466          */
467         if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE ||
468             nir_intrinsic_image_array(instr)) {
469                 struct qreg src = ntq_get_src(c, instr->src[1], is_1d ? 1 : 2);
470                 vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUI, src, tmu_writes);
471         }
472 
473         /* Emit the data writes for atomics or image store. */
474         if (instr->intrinsic != nir_intrinsic_image_load &&
475             !atomic_add_replaced) {
476                 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
477                         struct qreg src_3_i = ntq_get_src(c, instr->src[3], i);
478                         vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUD, src_3_i,
479                                                tmu_writes);
480                 }
481 
482                 /* Second atomic argument */
483                 if (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap) {
484                         struct qreg src_4_0 = ntq_get_src(c, instr->src[4], 0);
485                         vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUD, src_4_0,
486                                                tmu_writes);
487                 }
488         }
489 
490         struct qreg src_1_0 = ntq_get_src(c, instr->src[1], 0);
491         if (!tmu_writes && vir_in_nonuniform_control_flow(c) &&
492             instr->intrinsic != nir_intrinsic_image_load) {
493                 vir_set_pf(c, vir_MOV_dest(c, vir_nop_reg(), c->execute),
494                            V3D_QPU_PF_PUSHZ);
495         }
496 
497         vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUSF, src_1_0, tmu_writes);
498 
499         if (!tmu_writes && vir_in_nonuniform_control_flow(c) &&
500             instr->intrinsic != nir_intrinsic_image_load) {
501                 struct qinst *last_inst =
502                         (struct  qinst *)c->cur_block->instructions.prev;
503                 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
504         }
505 }
506 
507 static unsigned
get_required_image_tmu_writes(struct v3d_compile * c,nir_intrinsic_instr * instr,bool atomic_add_replaced)508 get_required_image_tmu_writes(struct v3d_compile *c,
509                               nir_intrinsic_instr *instr,
510                               bool atomic_add_replaced)
511 {
512         unsigned tmu_writes;
513         vir_image_emit_register_writes(c, instr, atomic_add_replaced,
514                                        &tmu_writes);
515         return tmu_writes;
516 }
517 
518 void
v3d40_vir_emit_image_load_store(struct v3d_compile * c,nir_intrinsic_instr * instr)519 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
520                                 nir_intrinsic_instr *instr)
521 {
522         unsigned format = nir_intrinsic_format(instr);
523         unsigned unit = nir_src_as_uint(instr->src[0]);
524 
525         struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
526         };
527 
528         struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
529                 .per_pixel_mask_enable = true,
530                 .output_type_32_bit = v3d_gl_format_is_return_32(format),
531         };
532 
533         struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
534 
535         /* Limit the number of channels returned to both how many the NIR
536          * instruction writes and how many the instruction could produce.
537          */
538         uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
539         if (!p1_unpacked.output_type_32_bit)
540                 instr_return_channels = (instr_return_channels + 1) / 2;
541 
542         p0_unpacked.return_words_of_texture_data =
543                 (1 << instr_return_channels) - 1;
544 
545         p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
546 
547         /* If we were able to replace atomic_add for an inc/dec, then we
548          * need/can to do things slightly different, like not loading the
549          * amount to add/sub, as that is implicit.
550          */
551         bool atomic_add_replaced =
552                 (instr->intrinsic == nir_intrinsic_image_atomic_add &&
553                  (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
554                   p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
555 
556         uint32_t p0_packed;
557         V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
558                                           (uint8_t *)&p0_packed,
559                                           &p0_unpacked);
560 
561         /* Load unit number into the high bits of the texture or sampler
562          * address field, which will be be used by the driver to decide which
563          * texture to put in the actual address field.
564          */
565         p0_packed |= unit << 24;
566 
567         uint32_t p1_packed;
568         V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
569                                           (uint8_t *)&p1_packed,
570                                           &p1_unpacked);
571 
572         uint32_t p2_packed;
573         V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
574                                           (uint8_t *)&p2_packed,
575                                           &p2_unpacked);
576 
577         if (instr->intrinsic != nir_intrinsic_image_load)
578                 c->tmu_dirty_rcl = true;
579 
580 
581         const uint32_t tmu_writes =
582                 get_required_image_tmu_writes(c, instr, atomic_add_replaced);
583 
584         /* The input FIFO has 16 slots across all threads so if we require
585          * more than that we need to lower thread count.
586          */
587         while (tmu_writes > 16 / c->threads)
588                 c->threads /= 2;
589 
590        /* If pipelining this TMU operation would overflow TMU fifos, we need
591         * to flush any outstanding TMU operations.
592         */
593         if (ntq_tmu_fifo_overflow(c, instr_return_channels))
594                 ntq_flush_tmu(c);
595 
596         vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
597         if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)))
598                    vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
599         if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)))
600                    vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
601 
602         vir_image_emit_register_writes(c, instr, atomic_add_replaced, NULL);
603 
604         ntq_add_pending_tmu_flush(c, &instr->dest,
605                                   p0_unpacked.return_words_of_texture_data);
606 }
607