1 /*
2  * Copyright (C) 2021 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "compiler.h"
25 #include "compiler/nir/nir_builder.h"
26 
27 /* Divergent attribute access is undefined behaviour. To avoid divergence,
28  * lower to an if-chain like:
29  *
30  *   value = 0;
31  *   if (lane == 0)
32  *      value = ld()
33  *   else if (lane == 1)
34  *      value = ld()
35  *   ...
36  *   else if (lane == MAX_LANE)
37  *      value = ld()
38  */
39 
40 static bool
bi_lower_divergent_indirects_impl(nir_builder * b,nir_instr * instr,void * data)41 bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
42 {
43         if (instr->type != nir_instr_type_intrinsic)
44                 return false;
45 
46         nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
47         gl_shader_stage stage = b->shader->info.stage;
48         nir_src *offset;
49 
50         /* Not all indirect access needs this workaround */
51         switch (intr->intrinsic) {
52         case nir_intrinsic_load_input:
53         case nir_intrinsic_load_interpolated_input:
54                 /* Attributes and varyings */
55                 offset = nir_get_io_offset_src(intr);
56                 break;
57 
58         case nir_intrinsic_store_output:
59                 /* Varyings only */
60                 if (stage == MESA_SHADER_FRAGMENT)
61                         return false;
62 
63                 offset = nir_get_io_offset_src(intr);
64                 break;
65 
66         case nir_intrinsic_image_atomic_add:
67         case nir_intrinsic_image_atomic_imin:
68         case nir_intrinsic_image_atomic_umin:
69         case nir_intrinsic_image_atomic_imax:
70         case nir_intrinsic_image_atomic_umax:
71         case nir_intrinsic_image_atomic_and:
72         case nir_intrinsic_image_atomic_or:
73         case nir_intrinsic_image_atomic_xor:
74         case nir_intrinsic_image_load:
75         case nir_intrinsic_image_store:
76                 /* Any image access */
77                 offset = &intr->src[0];
78                 break;
79         default:
80                 return false;
81         }
82 
83         if (!nir_src_is_divergent(*offset))
84                 return false;
85 
86         /* This indirect does need it */
87 
88         b->cursor = nir_before_instr(instr);
89         nir_ssa_def *lane = nir_load_subgroup_invocation(b);
90         unsigned *lanes = data;
91 
92         /* Write zero in a funny way to bypass lower_load_const_to_scalar */
93         bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
94         unsigned size = has_dest ? nir_dest_bit_size(intr->dest) : 32;
95         nir_ssa_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL;
96         nir_ssa_def *zeroes[4] = { zero, zero, zero, zero };
97         nir_ssa_def *res = has_dest ?
98                 nir_vec(b, zeroes, nir_dest_num_components(intr->dest)) : NULL;
99 
100         for (unsigned i = 0; i < (*lanes); ++i) {
101                 nir_push_if(b, nir_ieq_imm(b, lane, i));
102 
103                 nir_instr *c = nir_instr_clone(b->shader, instr);
104                 nir_intrinsic_instr *c_intr = nir_instr_as_intrinsic(c);
105                 nir_builder_instr_insert(b, c);
106                 nir_pop_if(b, NULL);
107 
108                 if (has_dest) {
109                         assert(c_intr->dest.is_ssa);
110                         nir_ssa_def *c_ssa = &c_intr->dest.ssa;
111                         res = nir_if_phi(b, c_ssa, res);
112                 }
113         }
114 
115         if (has_dest)
116                 nir_ssa_def_rewrite_uses(&intr->dest.ssa, res);
117 
118         nir_instr_remove(instr);
119         return true;
120 }
121 
122 bool
bi_lower_divergent_indirects(nir_shader * shader,unsigned lanes)123 bi_lower_divergent_indirects(nir_shader *shader, unsigned lanes)
124 {
125         return nir_shader_instructions_pass(shader,
126                         bi_lower_divergent_indirects_impl,
127                         nir_metadata_none, &lanes);
128 }
129