1 /*
2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/debug.h"
28 #include "util/u_math.h"
29
30 #include "ir3_compiler.h"
31 #include "ir3_nir.h"
32 #include "ir3_shader.h"
33
34 static const nir_shader_compiler_options options = {
35 .lower_fpow = true,
36 .lower_scmp = true,
37 .lower_flrp16 = true,
38 .lower_flrp32 = true,
39 .lower_flrp64 = true,
40 .lower_ffract = true,
41 .lower_fmod = true,
42 .lower_fdiv = true,
43 .lower_isign = true,
44 .lower_ldexp = true,
45 .lower_uadd_carry = true,
46 .lower_usub_borrow = true,
47 .lower_mul_high = true,
48 .lower_mul_2x32_64 = true,
49 .fuse_ffma16 = true,
50 .fuse_ffma32 = true,
51 .fuse_ffma64 = true,
52 .vertex_id_zero_based = true,
53 .lower_extract_byte = true,
54 .lower_extract_word = true,
55 .lower_insert_byte = true,
56 .lower_insert_word = true,
57 .lower_helper_invocation = true,
58 .lower_bitfield_insert_to_shifts = true,
59 .lower_bitfield_extract_to_shifts = true,
60 .lower_pack_half_2x16 = true,
61 .lower_pack_snorm_4x8 = true,
62 .lower_pack_snorm_2x16 = true,
63 .lower_pack_unorm_4x8 = true,
64 .lower_pack_unorm_2x16 = true,
65 .lower_unpack_half_2x16 = true,
66 .lower_unpack_snorm_4x8 = true,
67 .lower_unpack_snorm_2x16 = true,
68 .lower_unpack_unorm_4x8 = true,
69 .lower_unpack_unorm_2x16 = true,
70 .lower_pack_split = true,
71 .use_interpolated_input_intrinsics = true,
72 .lower_rotate = true,
73 .lower_to_scalar = true,
74 .has_imul24 = true,
75 .has_fsub = true,
76 .has_isub = true,
77 .lower_wpos_pntc = true,
78 .lower_cs_local_index_from_id = true,
79
80 /* Only needed for the spirv_to_nir() pass done in ir3_cmdline.c
81 * but that should be harmless for GL since 64b is not
82 * supported there.
83 */
84 .lower_int64_options = (nir_lower_int64_options)~0,
85 .lower_uniforms_to_ubo = true,
86 .use_scoped_barrier = true,
87 };
88
89 /* we don't want to lower vertex_id to _zero_based on newer gpus: */
90 static const nir_shader_compiler_options options_a6xx = {
91 .lower_fpow = true,
92 .lower_scmp = true,
93 .lower_flrp16 = true,
94 .lower_flrp32 = true,
95 .lower_flrp64 = true,
96 .lower_ffract = true,
97 .lower_fmod = true,
98 .lower_fdiv = true,
99 .lower_isign = true,
100 .lower_ldexp = true,
101 .lower_uadd_carry = true,
102 .lower_usub_borrow = true,
103 .lower_mul_high = true,
104 .lower_mul_2x32_64 = true,
105 .fuse_ffma16 = true,
106 .fuse_ffma32 = true,
107 .fuse_ffma64 = true,
108 .vertex_id_zero_based = false,
109 .lower_extract_byte = true,
110 .lower_extract_word = true,
111 .lower_insert_byte = true,
112 .lower_insert_word = true,
113 .lower_helper_invocation = true,
114 .lower_bitfield_insert_to_shifts = true,
115 .lower_bitfield_extract_to_shifts = true,
116 .lower_pack_half_2x16 = true,
117 .lower_pack_snorm_4x8 = true,
118 .lower_pack_snorm_2x16 = true,
119 .lower_pack_unorm_4x8 = true,
120 .lower_pack_unorm_2x16 = true,
121 .lower_unpack_half_2x16 = true,
122 .lower_unpack_snorm_4x8 = true,
123 .lower_unpack_snorm_2x16 = true,
124 .lower_unpack_unorm_4x8 = true,
125 .lower_unpack_unorm_2x16 = true,
126 .lower_pack_split = true,
127 .use_interpolated_input_intrinsics = true,
128 .lower_rotate = true,
129 .vectorize_io = true,
130 .lower_to_scalar = true,
131 .has_imul24 = true,
132 .has_fsub = true,
133 .has_isub = true,
134 .max_unroll_iterations = 32,
135 .force_indirect_unrolling = nir_var_all,
136 .lower_wpos_pntc = true,
137 .lower_cs_local_index_from_id = true,
138
139 /* Only needed for the spirv_to_nir() pass done in ir3_cmdline.c
140 * but that should be harmless for GL since 64b is not
141 * supported there.
142 */
143 .lower_int64_options = (nir_lower_int64_options)~0,
144 .lower_uniforms_to_ubo = true,
145 .lower_device_index_to_zero = true,
146 .use_scoped_barrier = true,
147 };
148
149 const nir_shader_compiler_options *
ir3_get_compiler_options(struct ir3_compiler * compiler)150 ir3_get_compiler_options(struct ir3_compiler *compiler)
151 {
152 if (compiler->gen >= 6)
153 return &options_a6xx;
154 return &options;
155 }
156
157 static bool
ir3_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)158 ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
159 unsigned bit_size, unsigned num_components,
160 nir_intrinsic_instr *low,
161 nir_intrinsic_instr *high, void *data)
162 {
163 assert(bit_size >= 8);
164 if (bit_size != 32)
165 return false;
166 unsigned byte_size = bit_size / 8;
167
168 int size = num_components * byte_size;
169
170 /* Don't care about alignment past vec4. */
171 assert(util_is_power_of_two_nonzero(align_mul));
172 align_mul = MIN2(align_mul, 16);
173 align_offset &= 15;
174
175 /* Our offset alignment should aways be at least 4 bytes */
176 if (align_mul < 4)
177 return false;
178
179 unsigned worst_start_offset = 16 - align_mul + align_offset;
180 if (worst_start_offset + size > 16)
181 return false;
182
183 return true;
184 }
185
186 #define OPT(nir, pass, ...) \
187 ({ \
188 bool this_progress = false; \
189 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
190 this_progress; \
191 })
192
193 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
194
195 void
ir3_optimize_loop(struct ir3_compiler * compiler,nir_shader * s)196 ir3_optimize_loop(struct ir3_compiler *compiler, nir_shader *s)
197 {
198 bool progress;
199 unsigned lower_flrp = (s->options->lower_flrp16 ? 16 : 0) |
200 (s->options->lower_flrp32 ? 32 : 0) |
201 (s->options->lower_flrp64 ? 64 : 0);
202
203 do {
204 progress = false;
205
206 OPT_V(s, nir_lower_vars_to_ssa);
207 progress |= OPT(s, nir_opt_copy_prop_vars);
208 progress |= OPT(s, nir_opt_dead_write_vars);
209 progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
210 progress |= OPT(s, nir_lower_phis_to_scalar, false);
211
212 progress |= OPT(s, nir_copy_prop);
213 progress |= OPT(s, nir_opt_deref);
214 progress |= OPT(s, nir_opt_dce);
215 progress |= OPT(s, nir_opt_cse);
216 static int gcm = -1;
217 if (gcm == -1)
218 gcm = env_var_as_unsigned("GCM", 0);
219 if (gcm == 1)
220 progress |= OPT(s, nir_opt_gcm, true);
221 else if (gcm == 2)
222 progress |= OPT(s, nir_opt_gcm, false);
223 progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
224 progress |= OPT(s, nir_opt_intrinsics);
225 /* NOTE: GS lowering inserts an output var with varying slot that
226 * is larger than VARYING_SLOT_MAX (ie. GS_VERTEX_FLAGS_IR3),
227 * which triggers asserts in nir_shader_gather_info(). To work
228 * around that skip lowering phi precision for GS.
229 *
230 * Calling nir_shader_gather_info() late also seems to cause
231 * problems for tess lowering, for now since we only enable
232 * fp16/int16 for frag and compute, skip phi precision lowering
233 * for other stages.
234 */
235 if ((s->info.stage == MESA_SHADER_FRAGMENT) ||
236 (s->info.stage == MESA_SHADER_COMPUTE)) {
237 progress |= OPT(s, nir_opt_phi_precision);
238 }
239 progress |= OPT(s, nir_opt_algebraic);
240 progress |= OPT(s, nir_lower_alu);
241 progress |= OPT(s, nir_lower_pack);
242 progress |= OPT(s, nir_opt_constant_folding);
243
244 nir_load_store_vectorize_options vectorize_opts = {
245 .modes = nir_var_mem_ubo,
246 .callback = ir3_nir_should_vectorize_mem,
247 .robust_modes = compiler->robust_ubo_access ? nir_var_mem_ubo : 0,
248 };
249 progress |= OPT(s, nir_opt_load_store_vectorize, &vectorize_opts);
250
251 if (lower_flrp != 0) {
252 if (OPT(s, nir_lower_flrp, lower_flrp, false /* always_precise */)) {
253 OPT(s, nir_opt_constant_folding);
254 progress = true;
255 }
256
257 /* Nothing should rematerialize any flrps, so we only
258 * need to do this lowering once.
259 */
260 lower_flrp = 0;
261 }
262
263 progress |= OPT(s, nir_opt_dead_cf);
264 if (OPT(s, nir_opt_trivial_continues)) {
265 progress |= true;
266 /* If nir_opt_trivial_continues makes progress, then we need to clean
267 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
268 * to make progress.
269 */
270 OPT(s, nir_copy_prop);
271 OPT(s, nir_opt_dce);
272 }
273 progress |= OPT(s, nir_opt_if, false);
274 progress |= OPT(s, nir_opt_loop_unroll);
275 progress |= OPT(s, nir_opt_remove_phis);
276 progress |= OPT(s, nir_opt_undef);
277 } while (progress);
278 }
279
280 static bool
should_split_wrmask(const nir_instr * instr,const void * data)281 should_split_wrmask(const nir_instr *instr, const void *data)
282 {
283 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
284
285 switch (intr->intrinsic) {
286 case nir_intrinsic_store_ssbo:
287 case nir_intrinsic_store_shared:
288 case nir_intrinsic_store_global:
289 case nir_intrinsic_store_scratch:
290 return true;
291 default:
292 return false;
293 }
294 }
295
296 static bool
ir3_nir_lower_ssbo_size_filter(const nir_instr * instr,const void * data)297 ir3_nir_lower_ssbo_size_filter(const nir_instr *instr, const void *data)
298 {
299 return instr->type == nir_instr_type_intrinsic &&
300 nir_instr_as_intrinsic(instr)->intrinsic ==
301 nir_intrinsic_get_ssbo_size;
302 }
303
304 static nir_ssa_def *
ir3_nir_lower_ssbo_size_instr(nir_builder * b,nir_instr * instr,void * data)305 ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
306 {
307 uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
308 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
309 return nir_ishl(b, &intr->dest.ssa, nir_imm_int(b, ssbo_size_to_bytes_shift));
310 }
311
312 /**
313 * The resinfo opcode we have for getting the SSBO size on a6xx returns a byte
314 * length divided by IBO_0_FMT, while the NIR intrinsic coming in is a number of
315 * bytes. Switch things so the NIR intrinsic in our backend means dwords.
316 */
317 static bool
ir3_nir_lower_ssbo_size(nir_shader * s,bool storage_16bit)318 ir3_nir_lower_ssbo_size(nir_shader *s, bool storage_16bit)
319 {
320 uint8_t ssbo_size_to_bytes_shift = storage_16bit ? 1 : 2;
321 return nir_shader_lower_instructions(s, ir3_nir_lower_ssbo_size_filter,
322 ir3_nir_lower_ssbo_size_instr,
323 &ssbo_size_to_bytes_shift);
324 }
325
326 void
ir3_nir_lower_io_to_temporaries(nir_shader * s)327 ir3_nir_lower_io_to_temporaries(nir_shader *s)
328 {
329 /* Outputs consumed by the VPC, VS inputs, and FS outputs are all handled
330 * by the hardware pre-loading registers at the beginning and then reading
331 * them at the end, so we can't access them indirectly except through
332 * normal register-indirect accesses, and therefore ir3 doesn't support
333 * indirect accesses on those. Other i/o is lowered in ir3_nir_lower_tess,
334 * and indirects work just fine for those. GS outputs may be consumed by
335 * VPC, but have their own lowering in ir3_nir_lower_gs() which does
336 * something similar to nir_lower_io_to_temporaries so we shouldn't need
337 * to lower them.
338 *
339 * Note: this might be a little inefficient for VS or TES outputs which are
340 * when the next stage isn't an FS, but it probably don't make sense to
341 * depend on the next stage before variant creation.
342 *
343 * TODO: for gallium, mesa/st also does some redundant lowering, including
344 * running this pass for GS inputs/outputs which we don't want but not
345 * including TES outputs or FS inputs which we do need. We should probably
346 * stop doing that once we're sure all drivers are doing their own
347 * indirect i/o lowering.
348 */
349 bool lower_input = s->info.stage == MESA_SHADER_VERTEX ||
350 s->info.stage == MESA_SHADER_FRAGMENT;
351 bool lower_output = s->info.stage != MESA_SHADER_TESS_CTRL &&
352 s->info.stage != MESA_SHADER_GEOMETRY;
353 if (lower_input || lower_output) {
354 NIR_PASS_V(s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
355 lower_output, lower_input);
356
357 /* nir_lower_io_to_temporaries() creates global variables and copy
358 * instructions which need to be cleaned up.
359 */
360 NIR_PASS_V(s, nir_split_var_copies);
361 NIR_PASS_V(s, nir_lower_var_copies);
362 NIR_PASS_V(s, nir_lower_global_vars_to_local);
363 }
364
365 /* Regardless of the above, we need to lower indirect references to
366 * compact variables such as clip/cull distances because due to how
367 * TCS<->TES IO works we cannot handle indirect accesses that "straddle"
368 * vec4 components. nir_lower_indirect_derefs has a special case for
369 * compact variables, so it will actually lower them even though we pass
370 * in 0 modes.
371 *
372 * Using temporaries would be slightly better but
373 * nir_lower_io_to_temporaries currently doesn't support TCS i/o.
374 */
375 NIR_PASS_V(s, nir_lower_indirect_derefs, 0, UINT32_MAX);
376 }
377
378 void
ir3_finalize_nir(struct ir3_compiler * compiler,nir_shader * s)379 ir3_finalize_nir(struct ir3_compiler *compiler, nir_shader *s)
380 {
381 struct nir_lower_tex_options tex_options = {
382 .lower_rect = 0,
383 .lower_tg4_offsets = true,
384 };
385
386 if (compiler->gen >= 4) {
387 /* a4xx seems to have *no* sam.p */
388 tex_options.lower_txp = ~0; /* lower all txp */
389 } else {
390 /* a3xx just needs to avoid sam.p for 3d tex */
391 tex_options.lower_txp = (1 << GLSL_SAMPLER_DIM_3D);
392 }
393
394 if (ir3_shader_debug & IR3_DBG_DISASM) {
395 mesa_logi("----------------------");
396 nir_log_shaderi(s);
397 mesa_logi("----------------------");
398 }
399
400 if (s->info.stage == MESA_SHADER_GEOMETRY)
401 NIR_PASS_V(s, ir3_nir_lower_gs);
402
403 NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
404
405 OPT_V(s, nir_lower_regs_to_ssa);
406 OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
407
408 OPT_V(s, nir_lower_tex, &tex_options);
409 OPT_V(s, nir_lower_load_const_to_scalar);
410 if (compiler->gen < 5)
411 OPT_V(s, ir3_nir_lower_tg4_to_tex);
412
413 ir3_optimize_loop(compiler, s);
414
415 /* do idiv lowering after first opt loop to get a chance to propagate
416 * constants for divide by immed power-of-two:
417 */
418 nir_lower_idiv_options idiv_options = {
419 .imprecise_32bit_lowering = true,
420 .allow_fp16 = true,
421 };
422 const bool idiv_progress = OPT(s, nir_lower_idiv, &idiv_options);
423
424 if (idiv_progress)
425 ir3_optimize_loop(compiler, s);
426
427 OPT_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
428
429 if (ir3_shader_debug & IR3_DBG_DISASM) {
430 mesa_logi("----------------------");
431 nir_log_shaderi(s);
432 mesa_logi("----------------------");
433 }
434
435 /* st_program.c's parameter list optimization requires that future nir
436 * variants don't reallocate the uniform storage, so we have to remove
437 * uniforms that occupy storage. But we don't want to remove samplers,
438 * because they're needed for YUV variant lowering.
439 */
440 nir_foreach_uniform_variable_safe (var, s) {
441 if (var->data.mode == nir_var_uniform &&
442 (glsl_type_get_image_count(var->type) ||
443 glsl_type_get_sampler_count(var->type)))
444 continue;
445
446 exec_node_remove(&var->node);
447 }
448 nir_validate_shader(s, "after uniform var removal");
449
450 nir_sweep(s);
451 }
452
453 static bool
lower_subgroup_id_filter(const nir_instr * instr,const void * unused)454 lower_subgroup_id_filter(const nir_instr *instr, const void *unused)
455 {
456 (void)unused;
457
458 if (instr->type != nir_instr_type_intrinsic)
459 return false;
460
461 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
462 return intr->intrinsic == nir_intrinsic_load_subgroup_invocation ||
463 intr->intrinsic == nir_intrinsic_load_subgroup_id ||
464 intr->intrinsic == nir_intrinsic_load_num_subgroups;
465 }
466
467 static nir_ssa_def *
lower_subgroup_id(nir_builder * b,nir_instr * instr,void * unused)468 lower_subgroup_id(nir_builder *b, nir_instr *instr, void *unused)
469 {
470 (void)unused;
471
472 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
473 if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
474 return nir_iand(
475 b, nir_load_local_invocation_index(b),
476 nir_isub(b, nir_load_subgroup_size(b), nir_imm_int(b, 1)));
477 } else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
478 return nir_ishr(b, nir_load_local_invocation_index(b),
479 nir_load_subgroup_id_shift_ir3(b));
480 } else {
481 assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
482 /* If the workgroup size is constant,
483 * nir_lower_compute_system_values() will replace local_size with a
484 * constant so this can mostly be constant folded away.
485 */
486 nir_ssa_def *local_size = nir_load_workgroup_size(b);
487 nir_ssa_def *size =
488 nir_imul24(b, nir_channel(b, local_size, 0),
489 nir_imul24(b, nir_channel(b, local_size, 1),
490 nir_channel(b, local_size, 2)));
491 nir_ssa_def *one = nir_imm_int(b, 1);
492 return nir_iadd(b, one,
493 nir_ishr(b, nir_isub(b, size, one),
494 nir_load_subgroup_id_shift_ir3(b)));
495 }
496 }
497
498 static bool
ir3_nir_lower_subgroup_id_cs(nir_shader * shader)499 ir3_nir_lower_subgroup_id_cs(nir_shader *shader)
500 {
501 return nir_shader_lower_instructions(shader, lower_subgroup_id_filter,
502 lower_subgroup_id, NULL);
503 }
504
505 static const nir_lower_idiv_options idiv_options = {
506 .imprecise_32bit_lowering = true,
507 .allow_fp16 = true,
508 };
509
510 /**
511 * Late passes that need to be done after pscreen->finalize_nir()
512 */
513 void
ir3_nir_post_finalize(struct ir3_compiler * compiler,nir_shader * s)514 ir3_nir_post_finalize(struct ir3_compiler *compiler, nir_shader *s)
515 {
516 NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
517 ir3_glsl_type_size, (nir_lower_io_options)0);
518
519 if (s->info.stage == MESA_SHADER_FRAGMENT) {
520 /* NOTE: lower load_barycentric_at_sample first, since it
521 * produces load_barycentric_at_offset:
522 */
523 NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_sample);
524 NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_offset);
525 NIR_PASS_V(s, ir3_nir_move_varying_inputs);
526 NIR_PASS_V(s, nir_lower_fb_read);
527 }
528
529 if (compiler->gen >= 6 && s->info.stage == MESA_SHADER_FRAGMENT &&
530 !(ir3_shader_debug & IR3_DBG_NOFP16)) {
531 NIR_PASS_V(s, nir_lower_mediump_io, nir_var_shader_out, 0, false);
532 }
533
534 if (s->info.stage == MESA_SHADER_COMPUTE) {
535 bool progress = false;
536 NIR_PASS(progress, s, nir_lower_subgroups,
537 &(nir_lower_subgroups_options){
538 .subgroup_size = 128,
539 .ballot_bit_size = 32,
540 .ballot_components = 4,
541 .lower_to_scalar = true,
542 .lower_vote_eq = true,
543 .lower_subgroup_masks = true,
544 .lower_read_invocation_to_cond = true,
545 });
546
547 progress = false;
548 NIR_PASS(progress, s, ir3_nir_lower_subgroup_id_cs);
549
550 /* ir3_nir_lower_subgroup_id_cs creates extra compute intrinsics which
551 * we need to lower again.
552 */
553 if (progress)
554 NIR_PASS_V(s, nir_lower_compute_system_values, NULL);
555 }
556
557 /* we cannot ensure that ir3_finalize_nir() is only called once, so
558 * we also need to do any run-once workarounds here:
559 */
560 OPT_V(s, ir3_nir_apply_trig_workarounds);
561
562 nir_lower_image_options lower_image_opts = {
563 .lower_cube_size = true,
564 };
565 NIR_PASS_V(s, nir_lower_image, &lower_image_opts);
566 NIR_PASS_V(s, nir_lower_idiv, &idiv_options); /* idiv generated by cube lowering */
567
568 if (compiler->gen >= 6)
569 OPT_V(s, ir3_nir_lower_ssbo_size, compiler->storage_16bit);
570
571 ir3_optimize_loop(compiler, s);
572 }
573
574 static bool
ir3_nir_lower_view_layer_id(nir_shader * nir,bool layer_zero,bool view_zero)575 ir3_nir_lower_view_layer_id(nir_shader *nir, bool layer_zero, bool view_zero)
576 {
577 unsigned layer_id_loc = ~0, view_id_loc = ~0;
578 nir_foreach_shader_in_variable (var, nir) {
579 if (var->data.location == VARYING_SLOT_LAYER)
580 layer_id_loc = var->data.driver_location;
581 if (var->data.location == VARYING_SLOT_VIEWPORT)
582 view_id_loc = var->data.driver_location;
583 }
584
585 assert(!layer_zero || layer_id_loc != ~0);
586 assert(!view_zero || view_id_loc != ~0);
587
588 bool progress = false;
589 nir_builder b;
590
591 nir_foreach_function (func, nir) {
592 nir_builder_init(&b, func->impl);
593
594 nir_foreach_block (block, func->impl) {
595 nir_foreach_instr_safe (instr, block) {
596 if (instr->type != nir_instr_type_intrinsic)
597 continue;
598
599 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
600
601 if (intrin->intrinsic != nir_intrinsic_load_input)
602 continue;
603
604 unsigned base = nir_intrinsic_base(intrin);
605 if (base != layer_id_loc && base != view_id_loc)
606 continue;
607
608 b.cursor = nir_before_instr(&intrin->instr);
609 nir_ssa_def *zero = nir_imm_int(&b, 0);
610 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, zero);
611 nir_instr_remove(&intrin->instr);
612 progress = true;
613 }
614 }
615
616 if (progress) {
617 nir_metadata_preserve(
618 func->impl, nir_metadata_block_index | nir_metadata_dominance);
619 } else {
620 nir_metadata_preserve(func->impl, nir_metadata_all);
621 }
622 }
623
624 return progress;
625 }
626
627 void
ir3_nir_lower_variant(struct ir3_shader_variant * so,nir_shader * s)628 ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s)
629 {
630 if (ir3_shader_debug & IR3_DBG_DISASM) {
631 mesa_logi("----------------------");
632 nir_log_shaderi(s);
633 mesa_logi("----------------------");
634 }
635
636 bool progress = false;
637
638 if (so->key.has_gs || so->key.tessellation) {
639 switch (so->shader->type) {
640 case MESA_SHADER_VERTEX:
641 NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
642 so->key.tessellation);
643 progress = true;
644 break;
645 case MESA_SHADER_TESS_CTRL:
646 NIR_PASS_V(s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
647 NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
648 progress = true;
649 break;
650 case MESA_SHADER_TESS_EVAL:
651 NIR_PASS_V(s, ir3_nir_lower_tess_eval, so, so->key.tessellation);
652 if (so->key.has_gs)
653 NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
654 so->key.tessellation);
655 progress = true;
656 break;
657 case MESA_SHADER_GEOMETRY:
658 NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
659 progress = true;
660 break;
661 default:
662 break;
663 }
664 }
665
666 if (s->info.stage == MESA_SHADER_VERTEX) {
667 if (so->key.ucp_enables)
668 progress |=
669 OPT(s, nir_lower_clip_vs, so->key.ucp_enables, false, false, NULL);
670 } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
671 bool layer_zero =
672 so->key.layer_zero && (s->info.inputs_read & VARYING_BIT_LAYER);
673 bool view_zero =
674 so->key.view_zero && (s->info.inputs_read & VARYING_BIT_VIEWPORT);
675
676 if (so->key.ucp_enables && !so->shader->compiler->has_clip_cull)
677 progress |= OPT(s, nir_lower_clip_fs, so->key.ucp_enables, false);
678 if (layer_zero || view_zero)
679 progress |= OPT(s, ir3_nir_lower_view_layer_id, layer_zero, view_zero);
680 }
681
682 /* Move large constant variables to the constants attached to the NIR
683 * shader, which we will upload in the immediates range. This generates
684 * amuls, so we need to clean those up after.
685 *
686 * Passing no size_align, we would get packed values, which if we end up
687 * having to load with LDC would result in extra reads to unpack from
688 * straddling loads. Align everything to vec4 to avoid that, though we
689 * could theoretically do better.
690 */
691 OPT_V(s, nir_opt_large_constants, glsl_get_vec4_size_align_bytes,
692 32 /* bytes */);
693 OPT_V(s, ir3_nir_lower_load_constant, so);
694
695 if (!so->binning_pass)
696 OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
697
698 progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
699
700 /* Lower large temporaries to scratch, which in Qualcomm terms is private
701 * memory, to avoid excess register pressure. This should happen after
702 * nir_opt_large_constants, because loading from a UBO is much, much less
703 * expensive.
704 */
705 if (so->shader->compiler->has_pvtmem) {
706 progress |= OPT(s, nir_lower_vars_to_scratch, nir_var_function_temp,
707 16 * 16 /* bytes */, glsl_get_natural_size_align_bytes);
708 }
709
710 /* Lower scratch writemasks */
711 progress |= OPT(s, nir_lower_wrmasks, should_split_wrmask, s);
712
713 OPT_V(s, nir_lower_amul, ir3_glsl_type_size);
714
715 /* UBO offset lowering has to come after we've decided what will
716 * be left as load_ubo
717 */
718 if (so->shader->compiler->gen >= 6)
719 progress |= OPT(s, nir_lower_ubo_vec4);
720
721 OPT_V(s, ir3_nir_lower_io_offsets);
722
723 if (progress)
724 ir3_optimize_loop(so->shader->compiler, s);
725
726 /* Fixup indirect load_uniform's which end up with a const base offset
727 * which is too large to encode. Do this late(ish) so we actually
728 * can differentiate indirect vs non-indirect.
729 */
730 if (OPT(s, ir3_nir_fixup_load_uniform))
731 ir3_optimize_loop(so->shader->compiler, s);
732
733 /* Do late algebraic optimization to turn add(a, neg(b)) back into
734 * subs, then the mandatory cleanup after algebraic. Note that it may
735 * produce fnegs, and if so then we need to keep running to squash
736 * fneg(fneg(a)).
737 */
738 bool more_late_algebraic = true;
739 while (more_late_algebraic) {
740 more_late_algebraic = OPT(s, nir_opt_algebraic_late);
741 OPT_V(s, nir_opt_constant_folding);
742 OPT_V(s, nir_copy_prop);
743 OPT_V(s, nir_opt_dce);
744 OPT_V(s, nir_opt_cse);
745 }
746
747 OPT_V(s, nir_opt_sink, nir_move_const_undef);
748
749 if (ir3_shader_debug & IR3_DBG_DISASM) {
750 mesa_logi("----------------------");
751 nir_log_shaderi(s);
752 mesa_logi("----------------------");
753 }
754
755 nir_sweep(s);
756
757 /* Binning pass variants re-use the const_state of the corresponding
758 * draw pass shader, so that same const emit can be re-used for both
759 * passes:
760 */
761 if (!so->binning_pass)
762 ir3_setup_const_state(s, so, ir3_const_state(so));
763 }
764
765 static void
ir3_nir_scan_driver_consts(struct ir3_compiler * compiler,nir_shader * shader,struct ir3_const_state * layout)766 ir3_nir_scan_driver_consts(struct ir3_compiler *compiler, nir_shader *shader, struct ir3_const_state *layout)
767 {
768 nir_foreach_function (function, shader) {
769 if (!function->impl)
770 continue;
771
772 nir_foreach_block (block, function->impl) {
773 nir_foreach_instr (instr, block) {
774 if (instr->type != nir_instr_type_intrinsic)
775 continue;
776
777 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
778 unsigned idx;
779
780 switch (intr->intrinsic) {
781 case nir_intrinsic_image_atomic_add:
782 case nir_intrinsic_image_atomic_imin:
783 case nir_intrinsic_image_atomic_umin:
784 case nir_intrinsic_image_atomic_imax:
785 case nir_intrinsic_image_atomic_umax:
786 case nir_intrinsic_image_atomic_and:
787 case nir_intrinsic_image_atomic_or:
788 case nir_intrinsic_image_atomic_xor:
789 case nir_intrinsic_image_atomic_exchange:
790 case nir_intrinsic_image_atomic_comp_swap:
791 case nir_intrinsic_image_load:
792 case nir_intrinsic_image_store:
793 case nir_intrinsic_image_size:
794 if (compiler->gen < 6 &&
795 !(intr->intrinsic == nir_intrinsic_image_load &&
796 !(nir_intrinsic_access(intr) & ACCESS_COHERENT))) {
797 idx = nir_src_as_uint(intr->src[0]);
798 if (layout->image_dims.mask & (1 << idx))
799 break;
800 layout->image_dims.mask |= (1 << idx);
801 layout->image_dims.off[idx] = layout->image_dims.count;
802 layout->image_dims.count += 3; /* three const per */
803 }
804 break;
805 case nir_intrinsic_load_base_vertex:
806 case nir_intrinsic_load_first_vertex:
807 layout->num_driver_params =
808 MAX2(layout->num_driver_params, IR3_DP_VTXID_BASE + 1);
809 break;
810 case nir_intrinsic_load_base_instance:
811 layout->num_driver_params =
812 MAX2(layout->num_driver_params, IR3_DP_INSTID_BASE + 1);
813 break;
814 case nir_intrinsic_load_user_clip_plane:
815 idx = nir_intrinsic_ucp_id(intr);
816 layout->num_driver_params = MAX2(layout->num_driver_params,
817 IR3_DP_UCP0_X + (idx + 1) * 4);
818 break;
819 case nir_intrinsic_load_num_workgroups:
820 layout->num_driver_params =
821 MAX2(layout->num_driver_params, IR3_DP_NUM_WORK_GROUPS_Z + 1);
822 break;
823 case nir_intrinsic_load_workgroup_size:
824 layout->num_driver_params = MAX2(layout->num_driver_params,
825 IR3_DP_LOCAL_GROUP_SIZE_Z + 1);
826 break;
827 case nir_intrinsic_load_base_workgroup_id:
828 layout->num_driver_params =
829 MAX2(layout->num_driver_params, IR3_DP_BASE_GROUP_Z + 1);
830 break;
831 case nir_intrinsic_load_subgroup_size:
832 layout->num_driver_params =
833 MAX2(layout->num_driver_params, IR3_DP_SUBGROUP_SIZE + 1);
834 break;
835 case nir_intrinsic_load_subgroup_id_shift_ir3:
836 layout->num_driver_params =
837 MAX2(layout->num_driver_params, IR3_DP_SUBGROUP_ID_SHIFT + 1);
838 break;
839 default:
840 break;
841 }
842 }
843 }
844 }
845 }
846
847 /* Sets up the variant-dependent constant state for the ir3_shader. Note
848 * that it is also used from ir3_nir_analyze_ubo_ranges() to figure out the
849 * maximum number of driver params that would eventually be used, to leave
850 * space for this function to allocate the driver params.
851 */
852 void
ir3_setup_const_state(nir_shader * nir,struct ir3_shader_variant * v,struct ir3_const_state * const_state)853 ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
854 struct ir3_const_state *const_state)
855 {
856 struct ir3_compiler *compiler = v->shader->compiler;
857
858 memset(&const_state->offsets, ~0, sizeof(const_state->offsets));
859
860 ir3_nir_scan_driver_consts(compiler, nir, const_state);
861
862 if ((compiler->gen < 5) && (v->shader->stream_output.num_outputs > 0)) {
863 const_state->num_driver_params =
864 MAX2(const_state->num_driver_params, IR3_DP_VTXCNT_MAX + 1);
865 }
866
867 const_state->num_ubos = nir->info.num_ubos;
868
869 debug_assert((const_state->ubo_state.size % 16) == 0);
870 unsigned constoff = const_state->ubo_state.size / 16;
871 unsigned ptrsz = ir3_pointer_size(compiler);
872
873 if (const_state->num_ubos > 0) {
874 const_state->offsets.ubo = constoff;
875 constoff += align(const_state->num_ubos * ptrsz, 4) / 4;
876 }
877
878 if (const_state->image_dims.count > 0) {
879 unsigned cnt = const_state->image_dims.count;
880 const_state->offsets.image_dims = constoff;
881 constoff += align(cnt, 4) / 4;
882 }
883
884 if (const_state->num_driver_params > 0) {
885 /* num_driver_params in dwords. we only need to align to vec4s for the
886 * common case of immediate constant uploads, but for indirect dispatch
887 * the constants may also be indirect and so we have to align the area in
888 * const space to that requirement.
889 */
890 const_state->num_driver_params = align(const_state->num_driver_params, 4);
891 unsigned upload_unit = 1;
892 if (v->type == MESA_SHADER_COMPUTE ||
893 (const_state->num_driver_params >= IR3_DP_VTXID_BASE)) {
894 upload_unit = compiler->const_upload_unit;
895 }
896
897 /* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
898 if (v->type == MESA_SHADER_VERTEX && compiler->gen >= 6)
899 constoff = MAX2(constoff, 1);
900 constoff = align(constoff, upload_unit);
901 const_state->offsets.driver_param = constoff;
902
903 constoff += align(const_state->num_driver_params / 4, upload_unit);
904 }
905
906 if ((v->type == MESA_SHADER_VERTEX) && (compiler->gen < 5) &&
907 v->shader->stream_output.num_outputs > 0) {
908 const_state->offsets.tfbo = constoff;
909 constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
910 }
911
912 switch (v->type) {
913 case MESA_SHADER_VERTEX:
914 const_state->offsets.primitive_param = constoff;
915 constoff += 1;
916 break;
917 case MESA_SHADER_TESS_CTRL:
918 case MESA_SHADER_TESS_EVAL:
919 constoff = align(constoff - 1, 4) + 3;
920 const_state->offsets.primitive_param = constoff;
921 const_state->offsets.primitive_map = constoff + 5;
922 constoff += 5 + DIV_ROUND_UP(v->input_size, 4);
923 break;
924 case MESA_SHADER_GEOMETRY:
925 const_state->offsets.primitive_param = constoff;
926 const_state->offsets.primitive_map = constoff + 1;
927 constoff += 1 + DIV_ROUND_UP(v->input_size, 4);
928 break;
929 default:
930 break;
931 }
932
933 const_state->offsets.immediate = constoff;
934
935 assert(constoff <= ir3_max_const(v));
936 }
937