1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_instruction_selection.h"
26 
27 #include "common/ac_nir.h"
28 #include "common/sid.h"
29 #include "vulkan/radv_descriptor_set.h"
30 
31 #include "nir_control_flow.h"
32 
33 #include <vector>
34 
35 namespace aco {
36 
37 namespace {
38 
39 bool
is_loop_header_block(nir_block * block)40 is_loop_header_block(nir_block* block)
41 {
42    return block->cf_node.parent->type == nir_cf_node_loop &&
43           block == nir_loop_first_block(nir_cf_node_as_loop(block->cf_node.parent));
44 }
45 
46 /* similar to nir_block_is_unreachable(), but does not require dominance information */
47 bool
is_block_reachable(nir_function_impl * impl,nir_block * known_reachable,nir_block * block)48 is_block_reachable(nir_function_impl* impl, nir_block* known_reachable, nir_block* block)
49 {
50    if (block == nir_start_block(impl) || block == known_reachable)
51       return true;
52 
53    /* skip loop back-edges */
54    if (is_loop_header_block(block)) {
55       nir_loop* loop = nir_cf_node_as_loop(block->cf_node.parent);
56       nir_block* preheader = nir_block_cf_tree_prev(nir_loop_first_block(loop));
57       return is_block_reachable(impl, known_reachable, preheader);
58    }
59 
60    set_foreach (block->predecessors, entry) {
61       if (is_block_reachable(impl, known_reachable, (nir_block*)entry->key))
62          return true;
63    }
64 
65    return false;
66 }
67 
68 /* Check whether the given SSA def is only used by cross-lane instructions. */
69 bool
only_used_by_cross_lane_instrs(nir_ssa_def * ssa,bool follow_phis=true)70 only_used_by_cross_lane_instrs(nir_ssa_def* ssa, bool follow_phis = true)
71 {
72    nir_foreach_use (src, ssa) {
73       switch (src->parent_instr->type) {
74       case nir_instr_type_alu: {
75          nir_alu_instr* alu = nir_instr_as_alu(src->parent_instr);
76          if (alu->op != nir_op_unpack_64_2x32_split_x && alu->op != nir_op_unpack_64_2x32_split_y)
77             return false;
78          if (!only_used_by_cross_lane_instrs(&alu->dest.dest.ssa, follow_phis))
79             return false;
80 
81          continue;
82       }
83       case nir_instr_type_intrinsic: {
84          nir_intrinsic_instr* intrin = nir_instr_as_intrinsic(src->parent_instr);
85          if (intrin->intrinsic != nir_intrinsic_read_invocation &&
86              intrin->intrinsic != nir_intrinsic_read_first_invocation &&
87              intrin->intrinsic != nir_intrinsic_lane_permute_16_amd)
88             return false;
89 
90          continue;
91       }
92       case nir_instr_type_phi: {
93          /* Don't follow more than 1 phis, this avoids infinite loops. */
94          if (!follow_phis)
95             return false;
96 
97          nir_phi_instr* phi = nir_instr_as_phi(src->parent_instr);
98          if (!only_used_by_cross_lane_instrs(&phi->dest.ssa, false))
99             return false;
100 
101          continue;
102       }
103       default: return false;
104       }
105    }
106 
107    return true;
108 }
109 
110 /* If one side of a divergent IF ends in a branch and the other doesn't, we
111  * might have to emit the contents of the side without the branch at the merge
112  * block instead. This is so that we can use any SGPR live-out of the side
113  * without the branch without creating a linear phi in the invert or merge block. */
114 bool
sanitize_if(nir_function_impl * impl,nir_if * nif)115 sanitize_if(nir_function_impl* impl, nir_if* nif)
116 {
117    // TODO: skip this if the condition is uniform and there are no divergent breaks/continues?
118 
119    nir_block* then_block = nir_if_last_then_block(nif);
120    nir_block* else_block = nir_if_last_else_block(nif);
121    bool then_jump = nir_block_ends_in_jump(then_block) ||
122                     !is_block_reachable(impl, nir_if_first_then_block(nif), then_block);
123    bool else_jump = nir_block_ends_in_jump(else_block) ||
124                     !is_block_reachable(impl, nir_if_first_else_block(nif), else_block);
125    if (then_jump == else_jump)
126       return false;
127 
128    /* If the continue from block is empty then return as there is nothing to
129     * move.
130     */
131    if (nir_cf_list_is_empty_block(else_jump ? &nif->then_list : &nif->else_list))
132       return false;
133 
134    /* Even though this if statement has a jump on one side, we may still have
135     * phis afterwards.  Single-source phis can be produced by loop unrolling
136     * or dead control-flow passes and are perfectly legal.  Run a quick phi
137     * removal on the block after the if to clean up any such phis.
138     */
139    nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
140 
141    /* Finally, move the continue from branch after the if-statement. */
142    nir_block* last_continue_from_blk = else_jump ? then_block : else_block;
143    nir_block* first_continue_from_blk =
144       else_jump ? nir_if_first_then_block(nif) : nir_if_first_else_block(nif);
145 
146    nir_cf_list tmp;
147    nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
148                   nir_after_block(last_continue_from_blk));
149    nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
150 
151    return true;
152 }
153 
154 bool
sanitize_cf_list(nir_function_impl * impl,struct exec_list * cf_list)155 sanitize_cf_list(nir_function_impl* impl, struct exec_list* cf_list)
156 {
157    bool progress = false;
158    foreach_list_typed (nir_cf_node, cf_node, node, cf_list) {
159       switch (cf_node->type) {
160       case nir_cf_node_block: break;
161       case nir_cf_node_if: {
162          nir_if* nif = nir_cf_node_as_if(cf_node);
163          progress |= sanitize_cf_list(impl, &nif->then_list);
164          progress |= sanitize_cf_list(impl, &nif->else_list);
165          progress |= sanitize_if(impl, nif);
166          break;
167       }
168       case nir_cf_node_loop: {
169          nir_loop* loop = nir_cf_node_as_loop(cf_node);
170          progress |= sanitize_cf_list(impl, &loop->body);
171          break;
172       }
173       case nir_cf_node_function: unreachable("Invalid cf type");
174       }
175    }
176 
177    return progress;
178 }
179 
180 void
apply_nuw_to_ssa(isel_context * ctx,nir_ssa_def * ssa)181 apply_nuw_to_ssa(isel_context* ctx, nir_ssa_def* ssa)
182 {
183    nir_ssa_scalar scalar;
184    scalar.def = ssa;
185    scalar.comp = 0;
186 
187    if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != nir_op_iadd)
188       return;
189 
190    nir_alu_instr* add = nir_instr_as_alu(ssa->parent_instr);
191 
192    if (add->no_unsigned_wrap)
193       return;
194 
195    nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
196    nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
197 
198    if (nir_ssa_scalar_is_const(src0)) {
199       nir_ssa_scalar tmp = src0;
200       src0 = src1;
201       src1 = tmp;
202    }
203 
204    uint32_t src1_ub = nir_unsigned_upper_bound(ctx->shader, ctx->range_ht, src1, &ctx->ub_config);
205    add->no_unsigned_wrap =
206       !nir_addition_might_overflow(ctx->shader, ctx->range_ht, src0, src1_ub, &ctx->ub_config);
207 }
208 
209 void
apply_nuw_to_offsets(isel_context * ctx,nir_function_impl * impl)210 apply_nuw_to_offsets(isel_context* ctx, nir_function_impl* impl)
211 {
212    nir_foreach_block (block, impl) {
213       nir_foreach_instr (instr, block) {
214          if (instr->type != nir_instr_type_intrinsic)
215             continue;
216          nir_intrinsic_instr* intrin = nir_instr_as_intrinsic(instr);
217 
218          switch (intrin->intrinsic) {
219          case nir_intrinsic_load_constant:
220          case nir_intrinsic_load_uniform:
221          case nir_intrinsic_load_push_constant:
222             if (!nir_src_is_divergent(intrin->src[0]))
223                apply_nuw_to_ssa(ctx, intrin->src[0].ssa);
224             break;
225          case nir_intrinsic_load_ubo:
226          case nir_intrinsic_load_ssbo:
227             if (!nir_src_is_divergent(intrin->src[1]))
228                apply_nuw_to_ssa(ctx, intrin->src[1].ssa);
229             break;
230          case nir_intrinsic_store_ssbo:
231             if (!nir_src_is_divergent(intrin->src[2]))
232                apply_nuw_to_ssa(ctx, intrin->src[2].ssa);
233             break;
234          default: break;
235          }
236       }
237    }
238 }
239 
240 RegClass
get_reg_class(isel_context * ctx,RegType type,unsigned components,unsigned bitsize)241 get_reg_class(isel_context* ctx, RegType type, unsigned components, unsigned bitsize)
242 {
243    if (bitsize == 1)
244       return RegClass(RegType::sgpr, ctx->program->lane_mask.size() * components);
245    else
246       return RegClass::get(type, components * bitsize / 8u);
247 }
248 
249 void
setup_vs_output_info(isel_context * ctx,nir_shader * nir,const radv_vs_output_info * outinfo)250 setup_vs_output_info(isel_context* ctx, nir_shader* nir,
251                      const radv_vs_output_info* outinfo)
252 {
253    ctx->export_clip_dists = outinfo->export_clip_dists;
254    ctx->num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
255    ctx->num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
256 
257    assert(ctx->num_clip_distances + ctx->num_cull_distances <= 8);
258 
259    /* GFX10+ early rasterization:
260     * When there are no param exports in an NGG (or legacy VS) shader,
261     * RADV sets NO_PC_EXPORT=1, which means the HW will start clipping and rasterization
262     * as soon as it encounters a DONE pos export. When this happens, PS waves can launch
263     * before the NGG (or VS) waves finish.
264     */
265    ctx->program->early_rast = ctx->program->chip_class >= GFX10 && outinfo->param_exports == 0;
266 }
267 
268 void
setup_vs_variables(isel_context * ctx,nir_shader * nir)269 setup_vs_variables(isel_context* ctx, nir_shader* nir)
270 {
271    if (ctx->stage == vertex_vs || ctx->stage == vertex_ngg) {
272       setup_vs_output_info(ctx, nir, &ctx->program->info->vs.outinfo);
273 
274       /* TODO: NGG streamout */
275       if (ctx->stage.hw == HWStage::NGG)
276          assert(!ctx->program->info->so.num_outputs);
277    }
278 
279    if (ctx->stage == vertex_ngg) {
280       ctx->program->config->lds_size =
281          DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
282       assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <
283              (32 * 1024));
284    }
285 }
286 
287 void
setup_gs_variables(isel_context * ctx,nir_shader * nir)288 setup_gs_variables(isel_context* ctx, nir_shader* nir)
289 {
290    if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
291       ctx->program->config->lds_size =
292          ctx->program->info->gs_ring_info.lds_size; /* Already in units of the alloc granularity */
293    } else if (ctx->stage == vertex_geometry_ngg || ctx->stage == tess_eval_geometry_ngg) {
294       setup_vs_output_info(ctx, nir, &ctx->program->info->vs.outinfo);
295 
296       ctx->program->config->lds_size =
297          DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
298    }
299 }
300 
301 void
setup_tcs_info(isel_context * ctx,nir_shader * nir,nir_shader * vs)302 setup_tcs_info(isel_context* ctx, nir_shader* nir, nir_shader* vs)
303 {
304    ctx->tcs_in_out_eq = ctx->program->info->vs.tcs_in_out_eq;
305    ctx->tcs_temp_only_inputs = ctx->program->info->vs.tcs_temp_only_input_mask;
306    ctx->tcs_num_patches = ctx->program->info->num_tess_patches;
307    ctx->program->config->lds_size = ctx->program->info->tcs.num_lds_blocks;
308 }
309 
310 void
setup_tes_variables(isel_context * ctx,nir_shader * nir)311 setup_tes_variables(isel_context* ctx, nir_shader* nir)
312 {
313    ctx->tcs_num_patches = ctx->program->info->num_tess_patches;
314 
315    if (ctx->stage == tess_eval_vs || ctx->stage == tess_eval_ngg) {
316       setup_vs_output_info(ctx, nir, &ctx->program->info->tes.outinfo);
317 
318       /* TODO: NGG streamout */
319       if (ctx->stage.hw == HWStage::NGG)
320          assert(!ctx->program->info->so.num_outputs);
321    }
322 
323    if (ctx->stage == tess_eval_ngg) {
324       ctx->program->config->lds_size =
325          DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
326       assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <
327              (32 * 1024));
328    }
329 }
330 
331 void
setup_ms_variables(isel_context * ctx,nir_shader * nir)332 setup_ms_variables(isel_context* ctx, nir_shader* nir)
333 {
334    setup_vs_output_info(ctx, nir, &ctx->program->info->ms.outinfo);
335 
336    ctx->program->config->lds_size =
337       DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
338    assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) < (32 * 1024));
339 }
340 
341 void
setup_variables(isel_context * ctx,nir_shader * nir)342 setup_variables(isel_context* ctx, nir_shader* nir)
343 {
344    switch (nir->info.stage) {
345    case MESA_SHADER_FRAGMENT: {
346       break;
347    }
348    case MESA_SHADER_COMPUTE:
349    case MESA_SHADER_TASK: {
350       ctx->program->config->lds_size =
351          DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
352       break;
353    }
354    case MESA_SHADER_VERTEX: {
355       setup_vs_variables(ctx, nir);
356       break;
357    }
358    case MESA_SHADER_GEOMETRY: {
359       setup_gs_variables(ctx, nir);
360       break;
361    }
362    case MESA_SHADER_TESS_CTRL: {
363       break;
364    }
365    case MESA_SHADER_TESS_EVAL: {
366       setup_tes_variables(ctx, nir);
367       break;
368    }
369    case MESA_SHADER_MESH: {
370       setup_ms_variables(ctx, nir);
371       break;
372    }
373    default: unreachable("Unhandled shader stage.");
374    }
375 
376    /* Make sure we fit the available LDS space. */
377    assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <=
378           ctx->program->dev.lds_limit);
379 }
380 
381 void
setup_nir(isel_context * ctx,nir_shader * nir)382 setup_nir(isel_context* ctx, nir_shader* nir)
383 {
384    /* the variable setup has to be done before lower_io / CSE */
385    setup_variables(ctx, nir);
386 
387    nir_convert_to_lcssa(nir, true, false);
388    nir_lower_phis_to_scalar(nir, true);
389 
390    nir_function_impl* func = nir_shader_get_entrypoint(nir);
391    nir_index_ssa_defs(func);
392 }
393 
394 } /* end namespace */
395 
396 void
init_context(isel_context * ctx,nir_shader * shader)397 init_context(isel_context* ctx, nir_shader* shader)
398 {
399    nir_function_impl* impl = nir_shader_get_entrypoint(shader);
400    ctx->shader = shader;
401 
402    /* Init NIR range analysis. */
403    ctx->range_ht = _mesa_pointer_hash_table_create(NULL);
404    ctx->ub_config.min_subgroup_size = 64;
405    ctx->ub_config.max_subgroup_size = 64;
406    if (ctx->shader->info.stage == MESA_SHADER_COMPUTE && ctx->program->info->cs.subgroup_size) {
407       ctx->ub_config.min_subgroup_size = ctx->program->info->cs.subgroup_size;
408       ctx->ub_config.max_subgroup_size = ctx->program->info->cs.subgroup_size;
409    }
410    ctx->ub_config.max_workgroup_invocations = 2048;
411    ctx->ub_config.max_workgroup_count[0] = 65535;
412    ctx->ub_config.max_workgroup_count[1] = 65535;
413    ctx->ub_config.max_workgroup_count[2] = 65535;
414    ctx->ub_config.max_workgroup_size[0] = 2048;
415    ctx->ub_config.max_workgroup_size[1] = 2048;
416    ctx->ub_config.max_workgroup_size[2] = 2048;
417    for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; i++) {
418       unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[i];
419       unsigned dfmt = attrib_format & 0xf;
420       unsigned nfmt = (attrib_format >> 4) & 0x7;
421 
422       uint32_t max = UINT32_MAX;
423       if (nfmt == V_008F0C_BUF_NUM_FORMAT_UNORM) {
424          max = 0x3f800000u;
425       } else if (nfmt == V_008F0C_BUF_NUM_FORMAT_UINT || nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED) {
426          bool uscaled = nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED;
427          switch (dfmt) {
428          case V_008F0C_BUF_DATA_FORMAT_8:
429          case V_008F0C_BUF_DATA_FORMAT_8_8:
430          case V_008F0C_BUF_DATA_FORMAT_8_8_8_8: max = uscaled ? 0x437f0000u : UINT8_MAX; break;
431          case V_008F0C_BUF_DATA_FORMAT_10_10_10_2:
432          case V_008F0C_BUF_DATA_FORMAT_2_10_10_10: max = uscaled ? 0x447fc000u : 1023; break;
433          case V_008F0C_BUF_DATA_FORMAT_10_11_11:
434          case V_008F0C_BUF_DATA_FORMAT_11_11_10: max = uscaled ? 0x44ffe000u : 2047; break;
435          case V_008F0C_BUF_DATA_FORMAT_16:
436          case V_008F0C_BUF_DATA_FORMAT_16_16:
437          case V_008F0C_BUF_DATA_FORMAT_16_16_16_16: max = uscaled ? 0x477fff00u : UINT16_MAX; break;
438          case V_008F0C_BUF_DATA_FORMAT_32:
439          case V_008F0C_BUF_DATA_FORMAT_32_32:
440          case V_008F0C_BUF_DATA_FORMAT_32_32_32:
441          case V_008F0C_BUF_DATA_FORMAT_32_32_32_32: max = uscaled ? 0x4f800000u : UINT32_MAX; break;
442          }
443       }
444       ctx->ub_config.vertex_attrib_max[i] = max;
445    }
446 
447    nir_divergence_analysis(shader);
448    nir_opt_uniform_atomics(shader);
449 
450    apply_nuw_to_offsets(ctx, impl);
451 
452    /* sanitize control flow */
453    sanitize_cf_list(impl, &impl->body);
454    nir_metadata_preserve(impl, nir_metadata_none);
455 
456    /* we'll need these for isel */
457    nir_metadata_require(impl, nir_metadata_block_index);
458 
459    if (!ctx->stage.has(SWStage::GSCopy) && ctx->options->dump_preoptir) {
460       fprintf(stderr, "NIR shader before instruction selection:\n");
461       nir_print_shader(shader, stderr);
462    }
463 
464    ctx->first_temp_id = ctx->program->peekAllocationId();
465    ctx->program->allocateRange(impl->ssa_alloc);
466    RegClass* regclasses = ctx->program->temp_rc.data() + ctx->first_temp_id;
467 
468    std::unique_ptr<unsigned[]> nir_to_aco{new unsigned[impl->num_blocks]()};
469 
470    /* TODO: make this recursive to improve compile times */
471    bool done = false;
472    while (!done) {
473       done = true;
474       nir_foreach_block (block, impl) {
475          nir_foreach_instr (instr, block) {
476             switch (instr->type) {
477             case nir_instr_type_alu: {
478                nir_alu_instr* alu_instr = nir_instr_as_alu(instr);
479                RegType type =
480                   nir_dest_is_divergent(alu_instr->dest.dest) ? RegType::vgpr : RegType::sgpr;
481                switch (alu_instr->op) {
482                case nir_op_fmul:
483                case nir_op_fmulz:
484                case nir_op_fadd:
485                case nir_op_fsub:
486                case nir_op_ffma:
487                case nir_op_ffmaz:
488                case nir_op_fmax:
489                case nir_op_fmin:
490                case nir_op_fneg:
491                case nir_op_fabs:
492                case nir_op_fsat:
493                case nir_op_fsign:
494                case nir_op_frcp:
495                case nir_op_frsq:
496                case nir_op_fsqrt:
497                case nir_op_fexp2:
498                case nir_op_flog2:
499                case nir_op_ffract:
500                case nir_op_ffloor:
501                case nir_op_fceil:
502                case nir_op_ftrunc:
503                case nir_op_fround_even:
504                case nir_op_fsin:
505                case nir_op_fcos:
506                case nir_op_f2f16:
507                case nir_op_f2f16_rtz:
508                case nir_op_f2f16_rtne:
509                case nir_op_f2f32:
510                case nir_op_f2f64:
511                case nir_op_u2f16:
512                case nir_op_u2f32:
513                case nir_op_u2f64:
514                case nir_op_i2f16:
515                case nir_op_i2f32:
516                case nir_op_i2f64:
517                case nir_op_pack_half_2x16_split:
518                case nir_op_pack_unorm_2x16:
519                case nir_op_pack_snorm_2x16:
520                case nir_op_pack_uint_2x16:
521                case nir_op_pack_sint_2x16:
522                case nir_op_unpack_half_2x16_split_x:
523                case nir_op_unpack_half_2x16_split_y:
524                case nir_op_fddx:
525                case nir_op_fddy:
526                case nir_op_fddx_fine:
527                case nir_op_fddy_fine:
528                case nir_op_fddx_coarse:
529                case nir_op_fddy_coarse:
530                case nir_op_fquantize2f16:
531                case nir_op_ldexp:
532                case nir_op_frexp_sig:
533                case nir_op_frexp_exp:
534                case nir_op_cube_face_index_amd:
535                case nir_op_cube_face_coord_amd:
536                case nir_op_sad_u8x4:
537                case nir_op_udot_4x8_uadd:
538                case nir_op_sdot_4x8_iadd:
539                case nir_op_udot_4x8_uadd_sat:
540                case nir_op_sdot_4x8_iadd_sat:
541                case nir_op_udot_2x16_uadd:
542                case nir_op_sdot_2x16_iadd:
543                case nir_op_udot_2x16_uadd_sat:
544                case nir_op_sdot_2x16_iadd_sat: type = RegType::vgpr; break;
545                case nir_op_f2i16:
546                case nir_op_f2u16:
547                case nir_op_f2i32:
548                case nir_op_f2u32:
549                case nir_op_f2i64:
550                case nir_op_f2u64:
551                case nir_op_b2i8:
552                case nir_op_b2i16:
553                case nir_op_b2i32:
554                case nir_op_b2i64:
555                case nir_op_b2b32:
556                case nir_op_b2f16:
557                case nir_op_b2f32:
558                case nir_op_mov: break;
559                case nir_op_iadd:
560                case nir_op_iadd_sat:
561                case nir_op_uadd_sat:
562                case nir_op_isub:
563                case nir_op_imul:
564                case nir_op_imin:
565                case nir_op_imax:
566                case nir_op_umin:
567                case nir_op_umax:
568                case nir_op_ishl:
569                case nir_op_ishr:
570                case nir_op_ushr:
571                   /* packed 16bit instructions have to be VGPR */
572                   type = alu_instr->dest.dest.ssa.num_components == 2 ? RegType::vgpr : type;
573                   FALLTHROUGH;
574                default:
575                   for (unsigned i = 0; i < nir_op_infos[alu_instr->op].num_inputs; i++) {
576                      if (regclasses[alu_instr->src[i].src.ssa->index].type() == RegType::vgpr)
577                         type = RegType::vgpr;
578                   }
579                   break;
580                }
581 
582                RegClass rc = get_reg_class(ctx, type, alu_instr->dest.dest.ssa.num_components,
583                                            alu_instr->dest.dest.ssa.bit_size);
584                regclasses[alu_instr->dest.dest.ssa.index] = rc;
585                break;
586             }
587             case nir_instr_type_load_const: {
588                unsigned num_components = nir_instr_as_load_const(instr)->def.num_components;
589                unsigned bit_size = nir_instr_as_load_const(instr)->def.bit_size;
590                RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
591                regclasses[nir_instr_as_load_const(instr)->def.index] = rc;
592                break;
593             }
594             case nir_instr_type_intrinsic: {
595                nir_intrinsic_instr* intrinsic = nir_instr_as_intrinsic(instr);
596                if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest)
597                   break;
598                RegType type = RegType::sgpr;
599                switch (intrinsic->intrinsic) {
600                case nir_intrinsic_load_push_constant:
601                case nir_intrinsic_load_workgroup_id:
602                case nir_intrinsic_load_num_workgroups:
603                case nir_intrinsic_load_ray_launch_size:
604                case nir_intrinsic_load_subgroup_id:
605                case nir_intrinsic_load_num_subgroups:
606                case nir_intrinsic_load_first_vertex:
607                case nir_intrinsic_load_base_instance:
608                case nir_intrinsic_vote_all:
609                case nir_intrinsic_vote_any:
610                case nir_intrinsic_read_first_invocation:
611                case nir_intrinsic_read_invocation:
612                case nir_intrinsic_first_invocation:
613                case nir_intrinsic_ballot:
614                case nir_intrinsic_load_ring_tess_factors_amd:
615                case nir_intrinsic_load_ring_tess_factors_offset_amd:
616                case nir_intrinsic_load_ring_tess_offchip_amd:
617                case nir_intrinsic_load_ring_tess_offchip_offset_amd:
618                case nir_intrinsic_load_ring_esgs_amd:
619                case nir_intrinsic_load_ring_es2gs_offset_amd:
620                case nir_intrinsic_bindless_image_samples:
621                case nir_intrinsic_has_input_vertex_amd:
622                case nir_intrinsic_has_input_primitive_amd:
623                case nir_intrinsic_load_workgroup_num_input_vertices_amd:
624                case nir_intrinsic_load_workgroup_num_input_primitives_amd:
625                case nir_intrinsic_load_shader_query_enabled_amd:
626                case nir_intrinsic_load_cull_front_face_enabled_amd:
627                case nir_intrinsic_load_cull_back_face_enabled_amd:
628                case nir_intrinsic_load_cull_ccw_amd:
629                case nir_intrinsic_load_cull_small_primitives_enabled_amd:
630                case nir_intrinsic_load_cull_any_enabled_amd:
631                case nir_intrinsic_load_viewport_x_scale:
632                case nir_intrinsic_load_viewport_y_scale:
633                case nir_intrinsic_load_viewport_x_offset:
634                case nir_intrinsic_load_viewport_y_offset:
635                case nir_intrinsic_load_force_vrs_rates_amd:
636                case nir_intrinsic_load_scalar_arg_amd:
637                case nir_intrinsic_load_smem_amd: type = RegType::sgpr; break;
638                case nir_intrinsic_load_sample_id:
639                case nir_intrinsic_load_input:
640                case nir_intrinsic_load_output:
641                case nir_intrinsic_load_input_vertex:
642                case nir_intrinsic_load_per_vertex_input:
643                case nir_intrinsic_load_per_vertex_output:
644                case nir_intrinsic_load_vertex_id:
645                case nir_intrinsic_load_vertex_id_zero_base:
646                case nir_intrinsic_load_barycentric_sample:
647                case nir_intrinsic_load_barycentric_pixel:
648                case nir_intrinsic_load_barycentric_model:
649                case nir_intrinsic_load_barycentric_centroid:
650                case nir_intrinsic_load_barycentric_at_sample:
651                case nir_intrinsic_load_barycentric_at_offset:
652                case nir_intrinsic_load_interpolated_input:
653                case nir_intrinsic_load_frag_coord:
654                case nir_intrinsic_load_frag_shading_rate:
655                case nir_intrinsic_load_sample_pos:
656                case nir_intrinsic_load_local_invocation_id:
657                case nir_intrinsic_load_local_invocation_index:
658                case nir_intrinsic_load_subgroup_invocation:
659                case nir_intrinsic_load_tess_coord:
660                case nir_intrinsic_write_invocation_amd:
661                case nir_intrinsic_mbcnt_amd:
662                case nir_intrinsic_byte_permute_amd:
663                case nir_intrinsic_lane_permute_16_amd:
664                case nir_intrinsic_load_instance_id:
665                case nir_intrinsic_ssbo_atomic_add:
666                case nir_intrinsic_ssbo_atomic_imin:
667                case nir_intrinsic_ssbo_atomic_umin:
668                case nir_intrinsic_ssbo_atomic_imax:
669                case nir_intrinsic_ssbo_atomic_umax:
670                case nir_intrinsic_ssbo_atomic_and:
671                case nir_intrinsic_ssbo_atomic_or:
672                case nir_intrinsic_ssbo_atomic_xor:
673                case nir_intrinsic_ssbo_atomic_exchange:
674                case nir_intrinsic_ssbo_atomic_comp_swap:
675                case nir_intrinsic_ssbo_atomic_fmin:
676                case nir_intrinsic_ssbo_atomic_fmax:
677                case nir_intrinsic_global_atomic_add:
678                case nir_intrinsic_global_atomic_imin:
679                case nir_intrinsic_global_atomic_umin:
680                case nir_intrinsic_global_atomic_imax:
681                case nir_intrinsic_global_atomic_umax:
682                case nir_intrinsic_global_atomic_and:
683                case nir_intrinsic_global_atomic_or:
684                case nir_intrinsic_global_atomic_xor:
685                case nir_intrinsic_global_atomic_exchange:
686                case nir_intrinsic_global_atomic_comp_swap:
687                case nir_intrinsic_global_atomic_fmin:
688                case nir_intrinsic_global_atomic_fmax:
689                case nir_intrinsic_bindless_image_atomic_add:
690                case nir_intrinsic_bindless_image_atomic_umin:
691                case nir_intrinsic_bindless_image_atomic_imin:
692                case nir_intrinsic_bindless_image_atomic_umax:
693                case nir_intrinsic_bindless_image_atomic_imax:
694                case nir_intrinsic_bindless_image_atomic_and:
695                case nir_intrinsic_bindless_image_atomic_or:
696                case nir_intrinsic_bindless_image_atomic_xor:
697                case nir_intrinsic_bindless_image_atomic_exchange:
698                case nir_intrinsic_bindless_image_atomic_comp_swap:
699                case nir_intrinsic_bindless_image_atomic_fmin:
700                case nir_intrinsic_bindless_image_atomic_fmax:
701                case nir_intrinsic_bindless_image_size:
702                case nir_intrinsic_shared_atomic_add:
703                case nir_intrinsic_shared_atomic_imin:
704                case nir_intrinsic_shared_atomic_umin:
705                case nir_intrinsic_shared_atomic_imax:
706                case nir_intrinsic_shared_atomic_umax:
707                case nir_intrinsic_shared_atomic_and:
708                case nir_intrinsic_shared_atomic_or:
709                case nir_intrinsic_shared_atomic_xor:
710                case nir_intrinsic_shared_atomic_exchange:
711                case nir_intrinsic_shared_atomic_comp_swap:
712                case nir_intrinsic_shared_atomic_fadd:
713                case nir_intrinsic_shared_atomic_fmin:
714                case nir_intrinsic_shared_atomic_fmax:
715                case nir_intrinsic_load_scratch:
716                case nir_intrinsic_load_invocation_id:
717                case nir_intrinsic_load_primitive_id:
718                case nir_intrinsic_load_buffer_amd:
719                case nir_intrinsic_load_tess_rel_patch_id_amd:
720                case nir_intrinsic_load_gs_vertex_offset_amd:
721                case nir_intrinsic_load_initial_edgeflags_amd:
722                case nir_intrinsic_load_packed_passthrough_primitive_amd:
723                case nir_intrinsic_gds_atomic_add_amd:
724                case nir_intrinsic_bvh64_intersect_ray_amd:
725                case nir_intrinsic_load_cull_small_prim_precision_amd:
726                case nir_intrinsic_load_vector_arg_amd: type = RegType::vgpr; break;
727                case nir_intrinsic_load_shared:
728                   /* When the result of these loads is only used by cross-lane instructions,
729                    * it is beneficial to use a VGPR destination. This is because this allows
730                    * to put the s_waitcnt further down, which decreases latency.
731                    */
732                   if (only_used_by_cross_lane_instrs(&intrinsic->dest.ssa)) {
733                      type = RegType::vgpr;
734                      break;
735                   }
736                   FALLTHROUGH;
737                case nir_intrinsic_shuffle:
738                case nir_intrinsic_quad_broadcast:
739                case nir_intrinsic_quad_swap_horizontal:
740                case nir_intrinsic_quad_swap_vertical:
741                case nir_intrinsic_quad_swap_diagonal:
742                case nir_intrinsic_quad_swizzle_amd:
743                case nir_intrinsic_masked_swizzle_amd:
744                case nir_intrinsic_inclusive_scan:
745                case nir_intrinsic_exclusive_scan:
746                case nir_intrinsic_reduce:
747                case nir_intrinsic_load_sbt_amd:
748                case nir_intrinsic_load_ubo:
749                case nir_intrinsic_load_ssbo:
750                case nir_intrinsic_load_global:
751                case nir_intrinsic_load_global_constant:
752                   type = nir_dest_is_divergent(intrinsic->dest) ? RegType::vgpr : RegType::sgpr;
753                   break;
754                case nir_intrinsic_load_view_index:
755                   type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
756                   break;
757                default:
758                   for (unsigned i = 0; i < nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
759                        i++) {
760                      if (regclasses[intrinsic->src[i].ssa->index].type() == RegType::vgpr)
761                         type = RegType::vgpr;
762                   }
763                   break;
764                }
765                RegClass rc = get_reg_class(ctx, type, intrinsic->dest.ssa.num_components,
766                                            intrinsic->dest.ssa.bit_size);
767                regclasses[intrinsic->dest.ssa.index] = rc;
768                break;
769             }
770             case nir_instr_type_tex: {
771                nir_tex_instr* tex = nir_instr_as_tex(instr);
772                RegType type = nir_dest_is_divergent(tex->dest) ? RegType::vgpr : RegType::sgpr;
773 
774                if (tex->op == nir_texop_texture_samples) {
775                   assert(!tex->dest.ssa.divergent);
776                }
777 
778                RegClass rc =
779                   get_reg_class(ctx, type, tex->dest.ssa.num_components, tex->dest.ssa.bit_size);
780                regclasses[tex->dest.ssa.index] = rc;
781                break;
782             }
783             case nir_instr_type_parallel_copy: {
784                nir_foreach_parallel_copy_entry (entry, nir_instr_as_parallel_copy(instr)) {
785                   regclasses[entry->dest.ssa.index] = regclasses[entry->src.ssa->index];
786                }
787                break;
788             }
789             case nir_instr_type_ssa_undef: {
790                unsigned num_components = nir_instr_as_ssa_undef(instr)->def.num_components;
791                unsigned bit_size = nir_instr_as_ssa_undef(instr)->def.bit_size;
792                RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
793                regclasses[nir_instr_as_ssa_undef(instr)->def.index] = rc;
794                break;
795             }
796             case nir_instr_type_phi: {
797                nir_phi_instr* phi = nir_instr_as_phi(instr);
798                RegType type = RegType::sgpr;
799                unsigned num_components = phi->dest.ssa.num_components;
800                assert((phi->dest.ssa.bit_size != 1 || num_components == 1) &&
801                       "Multiple components not supported on boolean phis.");
802 
803                if (nir_dest_is_divergent(phi->dest)) {
804                   type = RegType::vgpr;
805                } else {
806                   nir_foreach_phi_src (src, phi) {
807                      if (regclasses[src->src.ssa->index].type() == RegType::vgpr)
808                         type = RegType::vgpr;
809                   }
810                }
811 
812                RegClass rc = get_reg_class(ctx, type, num_components, phi->dest.ssa.bit_size);
813                if (rc != regclasses[phi->dest.ssa.index])
814                   done = false;
815                regclasses[phi->dest.ssa.index] = rc;
816                break;
817             }
818             default: break;
819             }
820          }
821       }
822    }
823 
824    ctx->program->config->spi_ps_input_ena = ctx->program->info->ps.spi_ps_input;
825    ctx->program->config->spi_ps_input_addr = ctx->program->info->ps.spi_ps_input;
826 
827    ctx->cf_info.nir_to_aco = std::move(nir_to_aco);
828 
829    /* align and copy constant data */
830    while (ctx->program->constant_data.size() % 4u)
831       ctx->program->constant_data.push_back(0);
832    ctx->constant_data_offset = ctx->program->constant_data.size();
833    ctx->program->constant_data.insert(ctx->program->constant_data.end(),
834                                       (uint8_t*)shader->constant_data,
835                                       (uint8_t*)shader->constant_data + shader->constant_data_size);
836 }
837 
838 void
cleanup_context(isel_context * ctx)839 cleanup_context(isel_context* ctx)
840 {
841    _mesa_hash_table_destroy(ctx->range_ht, NULL);
842 }
843 
844 isel_context
setup_isel_context(Program * program,unsigned shader_count,struct nir_shader * const * shaders,ac_shader_config * config,const struct radv_nir_compiler_options * options,const struct radv_shader_info * info,const struct radv_shader_args * args,bool is_gs_copy_shader)845 setup_isel_context(Program* program, unsigned shader_count, struct nir_shader* const* shaders,
846                    ac_shader_config* config, const struct radv_nir_compiler_options* options,
847                    const struct radv_shader_info* info,
848                    const struct radv_shader_args* args, bool is_gs_copy_shader)
849 {
850    SWStage sw_stage = SWStage::None;
851    for (unsigned i = 0; i < shader_count; i++) {
852       switch (shaders[i]->info.stage) {
853       case MESA_SHADER_VERTEX: sw_stage = sw_stage | SWStage::VS; break;
854       case MESA_SHADER_TESS_CTRL: sw_stage = sw_stage | SWStage::TCS; break;
855       case MESA_SHADER_TESS_EVAL: sw_stage = sw_stage | SWStage::TES; break;
856       case MESA_SHADER_GEOMETRY:
857          sw_stage = sw_stage | (is_gs_copy_shader ? SWStage::GSCopy : SWStage::GS);
858          break;
859       case MESA_SHADER_FRAGMENT: sw_stage = sw_stage | SWStage::FS; break;
860       case MESA_SHADER_COMPUTE: sw_stage = sw_stage | SWStage::CS; break;
861       case MESA_SHADER_TASK: sw_stage = sw_stage | SWStage::TS; break;
862       case MESA_SHADER_MESH: sw_stage = sw_stage | SWStage::MS; break;
863       default: unreachable("Shader stage not implemented");
864       }
865    }
866    bool gfx9_plus = options->chip_class >= GFX9;
867    bool ngg = info->is_ngg && options->chip_class >= GFX10;
868    HWStage hw_stage{};
869    if (sw_stage == SWStage::VS && info->vs.as_es && !ngg)
870       hw_stage = HWStage::ES;
871    else if (sw_stage == SWStage::VS && !info->vs.as_ls && !ngg)
872       hw_stage = HWStage::VS;
873    else if (sw_stage == SWStage::VS && ngg)
874       hw_stage = HWStage::NGG; /* GFX10/NGG: VS without GS uses the HW GS stage */
875    else if (sw_stage == SWStage::GS)
876       hw_stage = HWStage::GS;
877    else if (sw_stage == SWStage::FS)
878       hw_stage = HWStage::FS;
879    else if (sw_stage == SWStage::CS)
880       hw_stage = HWStage::CS;
881    else if (sw_stage == SWStage::GSCopy)
882       hw_stage = HWStage::VS;
883    else if (sw_stage == SWStage::TS)
884       hw_stage = HWStage::CS; /* Task shaders are implemented with compute shaders. */
885    else if (sw_stage == SWStage::MS)
886       hw_stage = HWStage::NGG; /* Mesh shaders only work on NGG and on GFX10.3+. */
887    else if (sw_stage == SWStage::VS_GS && gfx9_plus && !ngg)
888       hw_stage = HWStage::GS; /* GFX6-9: VS+GS merged into a GS (and GFX10/legacy) */
889    else if (sw_stage == SWStage::VS_GS && ngg)
890       hw_stage = HWStage::NGG; /* GFX10+: VS+GS merged into an NGG GS */
891    else if (sw_stage == SWStage::VS && info->vs.as_ls)
892       hw_stage = HWStage::LS; /* GFX6-8: VS is a Local Shader, when tessellation is used */
893    else if (sw_stage == SWStage::TCS)
894       hw_stage = HWStage::HS; /* GFX6-8: TCS is a Hull Shader */
895    else if (sw_stage == SWStage::VS_TCS)
896       hw_stage = HWStage::HS; /* GFX9-10: VS+TCS merged into a Hull Shader */
897    else if (sw_stage == SWStage::TES && !info->tes.as_es && !ngg)
898       hw_stage = HWStage::VS; /* GFX6-9: TES without GS uses the HW VS stage (and GFX10/legacy) */
899    else if (sw_stage == SWStage::TES && !info->tes.as_es && ngg)
900       hw_stage = HWStage::NGG; /* GFX10/NGG: TES without GS */
901    else if (sw_stage == SWStage::TES && info->tes.as_es && !ngg)
902       hw_stage = HWStage::ES; /* GFX6-8: TES is an Export Shader */
903    else if (sw_stage == SWStage::TES_GS && gfx9_plus && !ngg)
904       hw_stage = HWStage::GS; /* GFX9: TES+GS merged into a GS (and GFX10/legacy) */
905    else if (sw_stage == SWStage::TES_GS && ngg)
906       hw_stage = HWStage::NGG; /* GFX10+: TES+GS merged into an NGG GS */
907    else
908       unreachable("Shader stage not implemented");
909 
910    init_program(program, Stage{hw_stage, sw_stage}, info, options->chip_class,
911                 options->family, options->wgp_mode, config);
912 
913    isel_context ctx = {};
914    ctx.program = program;
915    ctx.args = args;
916    ctx.options = options;
917    ctx.stage = program->stage;
918 
919    program->workgroup_size = program->info->workgroup_size;
920    assert(program->workgroup_size);
921 
922    /* Mesh shading only works on GFX10.3+. */
923    ASSERTED bool mesh_shading = ctx.stage.has(SWStage::TS) || ctx.stage.has(SWStage::MS);
924    assert(!mesh_shading || ctx.program->chip_class >= GFX10_3);
925 
926    if (ctx.stage == tess_control_hs)
927       setup_tcs_info(&ctx, shaders[0], NULL);
928    else if (ctx.stage == vertex_tess_control_hs)
929       setup_tcs_info(&ctx, shaders[1], shaders[0]);
930 
931    calc_min_waves(program);
932 
933    unsigned scratch_size = 0;
934    if (program->stage == gs_copy_vs) {
935       assert(shader_count == 1);
936       setup_vs_output_info(&ctx, shaders[0], &program->info->vs.outinfo);
937    } else {
938       for (unsigned i = 0; i < shader_count; i++) {
939          nir_shader* nir = shaders[i];
940          setup_nir(&ctx, nir);
941       }
942 
943       for (unsigned i = 0; i < shader_count; i++)
944          scratch_size = std::max(scratch_size, shaders[i]->scratch_size);
945    }
946 
947    ctx.program->config->scratch_bytes_per_wave = align(scratch_size * ctx.program->wave_size, 1024);
948 
949    ctx.block = ctx.program->create_and_insert_block();
950    ctx.block->kind = block_kind_top_level;
951 
952    return ctx;
953 }
954 
955 } // namespace aco
956