1 /*
2  * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef IR3_CONTEXT_H_
28 #define IR3_CONTEXT_H_
29 
30 #include "ir3.h"
31 #include "ir3_compiler.h"
32 #include "ir3_nir.h"
33 
34 /* for conditionally setting boolean flag(s): */
35 #define COND(bool, val) ((bool) ? (val) : 0)
36 
37 #define DBG(fmt, ...)                                                          \
38    do {                                                                        \
39       mesa_logd("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__);         \
40    } while (0)
41 
42 /**
43  * The context for compilation of a single shader.
44  */
45 struct ir3_context {
46    struct ir3_compiler *compiler;
47    const struct ir3_context_funcs *funcs;
48 
49    struct nir_shader *s;
50 
51    struct nir_instr *cur_instr; /* current instruction, just for debug */
52 
53    struct ir3 *ir;
54    struct ir3_shader_variant *so;
55 
56    /* Tables of scalar inputs/outputs.  Because of the way varying packing
57     * works, we could have inputs w/ fractional location, which is a bit
58     * awkward to deal with unless we keep track of the split scalar in/
59     * out components.
60     *
61     * These *only* have inputs/outputs that are touched by load_*input and
62     * store_output.
63     */
64    unsigned ninputs, noutputs;
65    struct ir3_instruction **inputs;
66    struct ir3_instruction **outputs;
67 
68    struct ir3_block *block;    /* the current block */
69    struct ir3_block *in_block; /* block created for shader inputs */
70 
71    nir_function_impl *impl;
72 
73    /* For fragment shaders, varyings are not actual shader inputs,
74     * instead the hw passes a ij coord which is used with
75     * bary.f.
76     *
77     * But NIR doesn't know that, it still declares varyings as
78     * inputs.  So we do all the input tracking normally and fix
79     * things up after compile_instructions()
80     */
81    struct ir3_instruction *ij[IJ_COUNT];
82 
83    /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
84    struct ir3_instruction *frag_face, *frag_coord;
85 
86    /* For vertex shaders, keep track of the system values sources */
87    struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance,
88       *draw_id, *view_index;
89 
90    /* For fragment shaders: */
91    struct ir3_instruction *samp_id, *samp_mask_in;
92 
93    /* For geometry shaders: */
94    struct ir3_instruction *primitive_id;
95    struct ir3_instruction *gs_header;
96 
97    /* For tessellation shaders: */
98    struct ir3_instruction *patch_vertices_in;
99    struct ir3_instruction *tcs_header;
100    struct ir3_instruction *tess_coord;
101    struct ir3_instruction *rel_patch_id;
102 
103    /* Compute shader inputs: */
104    struct ir3_instruction *local_invocation_id, *work_group_id;
105 
106    /* mapping from nir_register to defining instruction: */
107    struct hash_table *def_ht;
108 
109    unsigned num_arrays;
110 
111    /* Tracking for max level of flowcontrol (branchstack) needed
112     * by a5xx+:
113     */
114    unsigned stack, max_stack;
115 
116    unsigned loop_id;
117    unsigned loop_depth;
118 
119    /* a common pattern for indirect addressing is to request the
120     * same address register multiple times.  To avoid generating
121     * duplicate instruction sequences (which our backend does not
122     * try to clean up, since that should be done as the NIR stage)
123     * we cache the address value generated for a given src value:
124     *
125     * Note that we have to cache these per alignment, since same
126     * src used for an array of vec1 cannot be also used for an
127     * array of vec4.
128     */
129    struct hash_table *addr0_ht[4];
130 
131    /* The same for a1.x. We only support immediate values for a1.x, as this
132     * is the only use so far.
133     */
134    struct hash_table_u64 *addr1_ht;
135 
136    struct hash_table *sel_cond_conversions;
137 
138    /* last dst array, for indirect we need to insert a var-store.
139     */
140    struct ir3_instruction **last_dst;
141    unsigned last_dst_n;
142 
143    /* maps nir_block to ir3_block, mostly for the purposes of
144     * figuring out the blocks successors
145     */
146    struct hash_table *block_ht;
147 
148    /* maps nir_block at the top of a loop to ir3_block collecting continue
149     * edges.
150     */
151    struct hash_table *continue_block_ht;
152 
153    /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
154    unsigned astc_srgb;
155 
156    unsigned samples; /* bitmask of x,y sample shifts */
157 
158    unsigned max_texture_index;
159 
160    unsigned prefetch_limit;
161 
162    /* set if we encounter something we can't handle yet, so we
163     * can bail cleanly and fallback to TGSI compiler f/e
164     */
165    bool error;
166 };
167 
168 struct ir3_context_funcs {
169    void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx,
170                                     nir_intrinsic_instr *intr,
171                                     struct ir3_instruction **dst);
172    void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx,
173                                      nir_intrinsic_instr *intr);
174    struct ir3_instruction *(*emit_intrinsic_atomic_ssbo)(
175       struct ir3_context *ctx, nir_intrinsic_instr *intr);
176    void (*emit_intrinsic_load_image)(struct ir3_context *ctx,
177                                      nir_intrinsic_instr *intr,
178                                      struct ir3_instruction **dst);
179    void (*emit_intrinsic_store_image)(struct ir3_context *ctx,
180                                       nir_intrinsic_instr *intr);
181    struct ir3_instruction *(*emit_intrinsic_atomic_image)(
182       struct ir3_context *ctx, nir_intrinsic_instr *intr);
183    void (*emit_intrinsic_image_size)(struct ir3_context *ctx,
184                                      nir_intrinsic_instr *intr,
185                                      struct ir3_instruction **dst);
186    void (*emit_intrinsic_load_global_ir3)(struct ir3_context *ctx,
187                                           nir_intrinsic_instr *intr,
188                                           struct ir3_instruction **dst);
189    void (*emit_intrinsic_store_global_ir3)(struct ir3_context *ctx,
190                                            nir_intrinsic_instr *intr);
191 };
192 
193 extern const struct ir3_context_funcs ir3_a4xx_funcs;
194 extern const struct ir3_context_funcs ir3_a6xx_funcs;
195 
196 struct ir3_context *ir3_context_init(struct ir3_compiler *compiler,
197                                      struct ir3_shader_variant *so);
198 void ir3_context_free(struct ir3_context *ctx);
199 
200 struct ir3_instruction **ir3_get_dst_ssa(struct ir3_context *ctx,
201                                          nir_ssa_def *dst, unsigned n);
202 struct ir3_instruction **ir3_get_dst(struct ir3_context *ctx, nir_dest *dst,
203                                      unsigned n);
204 struct ir3_instruction *const *ir3_get_src(struct ir3_context *ctx,
205                                            nir_src *src);
206 void ir3_put_dst(struct ir3_context *ctx, nir_dest *dst);
207 struct ir3_instruction *ir3_create_collect(struct ir3_block *block,
208                                            struct ir3_instruction *const *arr,
209                                            unsigned arrsz);
210 void ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
211                     struct ir3_instruction *src, unsigned base, unsigned n);
212 void ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc);
213 void ir3_handle_nonuniform(struct ir3_instruction *instr,
214                            nir_intrinsic_instr *intrin);
215 void emit_intrinsic_image_size_tex(struct ir3_context *ctx,
216                                    nir_intrinsic_instr *intr,
217                                    struct ir3_instruction **dst);
218 
219 #define ir3_collect(block, ...)                                                \
220    ({                                                                          \
221       struct ir3_instruction *__arr[] = {__VA_ARGS__};                         \
222       ir3_create_collect(block, __arr, ARRAY_SIZE(__arr));                     \
223    })
224 
225 NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format,
226                                 ...);
227 
228 #define compile_assert(ctx, cond)                                              \
229    do {                                                                        \
230       if (!(cond))                                                             \
231          ir3_context_error((ctx), "failed assert: " #cond "\n");               \
232    } while (0)
233 
234 struct ir3_instruction *ir3_get_addr0(struct ir3_context *ctx,
235                                       struct ir3_instruction *src, int align);
236 struct ir3_instruction *ir3_get_addr1(struct ir3_context *ctx,
237                                       unsigned const_val);
238 struct ir3_instruction *ir3_get_predicate(struct ir3_context *ctx,
239                                           struct ir3_instruction *src);
240 
241 void ir3_declare_array(struct ir3_context *ctx, nir_register *reg);
242 struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_register *reg);
243 struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
244                                               struct ir3_array *arr, int n,
245                                               struct ir3_instruction *address);
246 void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr,
247                             int n, struct ir3_instruction *src,
248                             struct ir3_instruction *address);
249 
250 static inline type_t
utype_for_size(unsigned bit_size)251 utype_for_size(unsigned bit_size)
252 {
253    switch (bit_size) {
254    case 32:
255       return TYPE_U32;
256    case 16:
257       return TYPE_U16;
258    case 8:
259       return TYPE_U8;
260    default:
261       unreachable("bad bitsize");
262       return ~0;
263    }
264 }
265 
266 static inline type_t
utype_src(nir_src src)267 utype_src(nir_src src)
268 {
269    return utype_for_size(nir_src_bit_size(src));
270 }
271 
272 static inline type_t
utype_dst(nir_dest dst)273 utype_dst(nir_dest dst)
274 {
275    return utype_for_size(nir_dest_bit_size(dst));
276 }
277 
278 #endif /* IR3_CONTEXT_H_ */
279