1 /*
2  * Copyright © 2018-2019 Igalia S.L.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "compiler/nir/nir_builder.h"
25 #include "ir3_nir.h"
26 
27 /**
28  * This pass moves to NIR certain offset computations for different I/O
29  * ops that are currently implemented on the IR3 backend compiler, to
30  * give NIR a chance to optimize them:
31  *
32  * - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic
33  *   is emitted that replaces the original one, adding a new source that
34  *   holds the result of the original byte-offset source divided by 4.
35  */
36 
37 /* Returns the ir3-specific intrinsic opcode corresponding to an SSBO
38  * instruction that is handled by this pass. It also conveniently returns
39  * the offset source index in @offset_src_idx.
40  *
41  * If @intrinsic is not SSBO, or it is not handled by the pass, -1 is
42  * returned.
43  */
44 static int
get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,uint8_t * offset_src_idx)45 get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
46                                      uint8_t *offset_src_idx)
47 {
48    debug_assert(offset_src_idx);
49 
50    *offset_src_idx = 1;
51 
52    switch (intrinsic) {
53    case nir_intrinsic_store_ssbo:
54       *offset_src_idx = 2;
55       return nir_intrinsic_store_ssbo_ir3;
56    case nir_intrinsic_load_ssbo:
57       return nir_intrinsic_load_ssbo_ir3;
58    case nir_intrinsic_ssbo_atomic_add:
59       return nir_intrinsic_ssbo_atomic_add_ir3;
60    case nir_intrinsic_ssbo_atomic_imin:
61       return nir_intrinsic_ssbo_atomic_imin_ir3;
62    case nir_intrinsic_ssbo_atomic_umin:
63       return nir_intrinsic_ssbo_atomic_umin_ir3;
64    case nir_intrinsic_ssbo_atomic_imax:
65       return nir_intrinsic_ssbo_atomic_imax_ir3;
66    case nir_intrinsic_ssbo_atomic_umax:
67       return nir_intrinsic_ssbo_atomic_umax_ir3;
68    case nir_intrinsic_ssbo_atomic_and:
69       return nir_intrinsic_ssbo_atomic_and_ir3;
70    case nir_intrinsic_ssbo_atomic_or:
71       return nir_intrinsic_ssbo_atomic_or_ir3;
72    case nir_intrinsic_ssbo_atomic_xor:
73       return nir_intrinsic_ssbo_atomic_xor_ir3;
74    case nir_intrinsic_ssbo_atomic_exchange:
75       return nir_intrinsic_ssbo_atomic_exchange_ir3;
76    case nir_intrinsic_ssbo_atomic_comp_swap:
77       return nir_intrinsic_ssbo_atomic_comp_swap_ir3;
78    default:
79       break;
80    }
81 
82    return -1;
83 }
84 
85 static nir_ssa_def *
check_and_propagate_bit_shift32(nir_builder * b,nir_alu_instr * alu_instr,int32_t direction,int32_t shift)86 check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
87                                 int32_t direction, int32_t shift)
88 {
89    debug_assert(alu_instr->src[1].src.is_ssa);
90    nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
91 
92    /* Only propagate if the shift is a const value so we can check value range
93     * statically.
94     */
95    nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);
96    if (!const_val)
97       return NULL;
98 
99    int32_t current_shift = const_val[0].i32 * direction;
100    int32_t new_shift = current_shift + shift;
101 
102    /* If the merge would reverse the direction, bail out.
103     * e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
104     */
105    if (current_shift * new_shift < 0)
106       return NULL;
107 
108    /* If the propagation would overflow an int32_t, bail out too to be on the
109     * safe side.
110     */
111    if (new_shift < -31 || new_shift > 31)
112       return NULL;
113 
114    /* Add or substract shift depending on the final direction (SHR vs. SHL). */
115    if (shift * direction < 0)
116       shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
117    else
118       shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));
119 
120    return shift_ssa;
121 }
122 
123 nir_ssa_def *
ir3_nir_try_propagate_bit_shift(nir_builder * b,nir_ssa_def * offset,int32_t shift)124 ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset,
125                                 int32_t shift)
126 {
127    nir_instr *offset_instr = offset->parent_instr;
128    if (offset_instr->type != nir_instr_type_alu)
129       return NULL;
130 
131    nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
132    nir_ssa_def *shift_ssa;
133    nir_ssa_def *new_offset = NULL;
134 
135    /* the first src could be something like ssa_18.x, but we only want
136     * the single component.  Otherwise the ishl/ishr/ushr could turn
137     * into a vec4 operation:
138     */
139    nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);
140 
141    switch (alu->op) {
142    case nir_op_ishl:
143       shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
144       if (shift_ssa)
145          new_offset = nir_ishl(b, src0, shift_ssa);
146       break;
147    case nir_op_ishr:
148       shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
149       if (shift_ssa)
150          new_offset = nir_ishr(b, src0, shift_ssa);
151       break;
152    case nir_op_ushr:
153       shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
154       if (shift_ssa)
155          new_offset = nir_ushr(b, src0, shift_ssa);
156       break;
157    default:
158       return NULL;
159    }
160 
161    return new_offset;
162 }
163 
164 /* isam doesn't have an "untyped" field, so it can only load 1 component at a
165  * time because our storage buffer descriptors use a 1-component format.
166  * Therefore we need to scalarize any loads that would use isam.
167  */
168 static void
scalarize_load(nir_intrinsic_instr * intrinsic,nir_builder * b)169 scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b)
170 {
171    struct nir_ssa_def *results[NIR_MAX_VEC_COMPONENTS];
172 
173    nir_ssa_def *descriptor = intrinsic->src[0].ssa;
174    nir_ssa_def *offset = intrinsic->src[1].ssa;
175    nir_ssa_def *new_offset = intrinsic->src[2].ssa;
176    unsigned comp_size = intrinsic->dest.ssa.bit_size / 8;
177    for (unsigned i = 0; i < intrinsic->dest.ssa.num_components; i++) {
178       results[i] =
179          nir_load_ssbo_ir3(b, 1, intrinsic->dest.ssa.bit_size, descriptor,
180                            nir_iadd(b, offset, nir_imm_int(b, i * comp_size)),
181                            nir_iadd(b, new_offset, nir_imm_int(b, i)),
182                            .access = nir_intrinsic_access(intrinsic),
183                            .align_mul = nir_intrinsic_align_mul(intrinsic),
184                            .align_offset = nir_intrinsic_align_offset(intrinsic));
185    }
186 
187    nir_ssa_def *result = nir_vec(b, results, intrinsic->dest.ssa.num_components);
188 
189    nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, result);
190 
191    nir_instr_remove(&intrinsic->instr);
192 }
193 
194 static bool
lower_offset_for_ssbo(nir_intrinsic_instr * intrinsic,nir_builder * b,unsigned ir3_ssbo_opcode,uint8_t offset_src_idx)195 lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
196                       unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
197 {
198    unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
199    int shift = 2;
200 
201    bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
202    nir_ssa_def *new_dest = NULL;
203 
204    /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
205    if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
206        (!has_dest && intrinsic->src[0].ssa->bit_size == 16))
207       shift = 1;
208 
209    /* Here we create a new intrinsic and copy over all contents from the old
210     * one. */
211 
212    nir_intrinsic_instr *new_intrinsic;
213    nir_src *target_src;
214 
215    b->cursor = nir_before_instr(&intrinsic->instr);
216 
217    /* 'offset_src_idx' holds the index of the source that represent the offset. */
218    new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
219 
220    debug_assert(intrinsic->src[offset_src_idx].is_ssa);
221    nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
222 
223    /* Since we don't have value range checking, we first try to propagate
224     * the division by 4 ('offset >> 2') into another bit-shift instruction that
225     * possibly defines the offset. If that's the case, we emit a similar
226     * instructions adjusting (merging) the shift value.
227     *
228     * Here we use the convention that shifting right is negative while shifting
229     * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
230     */
231    nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
232 
233    /* The new source that will hold the dword-offset is always the last
234     * one for every intrinsic.
235     */
236    target_src = &new_intrinsic->src[num_srcs];
237    *target_src = nir_src_for_ssa(offset);
238 
239    if (has_dest) {
240       debug_assert(intrinsic->dest.is_ssa);
241       nir_ssa_def *dest = &intrinsic->dest.ssa;
242       nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
243                         dest->num_components, dest->bit_size, NULL);
244       new_dest = &new_intrinsic->dest.ssa;
245    }
246 
247    for (unsigned i = 0; i < num_srcs; i++)
248       new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
249 
250    nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
251 
252    new_intrinsic->num_components = intrinsic->num_components;
253 
254    /* If we managed to propagate the division by 4, just use the new offset
255     * register and don't emit the SHR.
256     */
257    if (new_offset)
258       offset = new_offset;
259    else
260       offset = nir_ushr(b, offset, nir_imm_int(b, shift));
261 
262    /* Insert the new intrinsic right before the old one. */
263    nir_builder_instr_insert(b, &new_intrinsic->instr);
264 
265    /* Replace the last source of the new intrinsic by the result of
266     * the offset divided by 4.
267     */
268    nir_instr_rewrite_src(&new_intrinsic->instr, target_src,
269                          nir_src_for_ssa(offset));
270 
271    if (has_dest) {
272       /* Replace the uses of the original destination by that
273        * of the new intrinsic.
274        */
275       nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
276    }
277 
278    /* Finally remove the original intrinsic. */
279    nir_instr_remove(&intrinsic->instr);
280 
281    if (new_intrinsic->intrinsic == nir_intrinsic_load_ssbo_ir3 &&
282        (nir_intrinsic_access(new_intrinsic) & ACCESS_CAN_REORDER) &&
283        ir3_bindless_resource(new_intrinsic->src[0]) &&
284        new_intrinsic->num_components > 1)
285       scalarize_load(new_intrinsic, b);
286 
287    return true;
288 }
289 
290 static bool
lower_io_offsets_block(nir_block * block,nir_builder * b,void * mem_ctx)291 lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
292 {
293    bool progress = false;
294 
295    nir_foreach_instr_safe (instr, block) {
296       if (instr->type != nir_instr_type_intrinsic)
297          continue;
298 
299       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
300 
301       /* SSBO */
302       int ir3_intrinsic;
303       uint8_t offset_src_idx;
304       ir3_intrinsic =
305          get_ir3_intrinsic_for_ssbo_intrinsic(intr->intrinsic, &offset_src_idx);
306       if (ir3_intrinsic != -1) {
307          progress |= lower_offset_for_ssbo(intr, b, (unsigned)ir3_intrinsic,
308                                            offset_src_idx);
309       }
310    }
311 
312    return progress;
313 }
314 
315 static bool
lower_io_offsets_func(nir_function_impl * impl)316 lower_io_offsets_func(nir_function_impl *impl)
317 {
318    void *mem_ctx = ralloc_parent(impl);
319    nir_builder b;
320    nir_builder_init(&b, impl);
321 
322    bool progress = false;
323    nir_foreach_block_safe (block, impl) {
324       progress |= lower_io_offsets_block(block, &b, mem_ctx);
325    }
326 
327    if (progress) {
328       nir_metadata_preserve(impl,
329                             nir_metadata_block_index | nir_metadata_dominance);
330    }
331 
332    return progress;
333 }
334 
335 bool
ir3_nir_lower_io_offsets(nir_shader * shader)336 ir3_nir_lower_io_offsets(nir_shader *shader)
337 {
338    bool progress = false;
339 
340    nir_foreach_function (function, shader) {
341       if (function->impl)
342          progress |= lower_io_offsets_func(function->impl);
343    }
344 
345    return progress;
346 }
347