1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include "si_compute.h"
27
28 #include "ac_rtld.h"
29 #include "amd_kernel_code_t.h"
30 #include "nir/tgsi_to_nir.h"
31 #include "si_build_pm4.h"
32 #include "util/u_async_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35
36 #define COMPUTE_DBG(sscreen, fmt, args...) \
37 do { \
38 if ((sscreen->debug_flags & DBG(COMPUTE))) \
39 fprintf(stderr, fmt, ##args); \
40 } while (0);
41
42 struct dispatch_packet {
43 uint16_t header;
44 uint16_t setup;
45 uint16_t workgroup_size_x;
46 uint16_t workgroup_size_y;
47 uint16_t workgroup_size_z;
48 uint16_t reserved0;
49 uint32_t grid_size_x;
50 uint32_t grid_size_y;
51 uint32_t grid_size_z;
52 uint32_t private_segment_size;
53 uint32_t group_segment_size;
54 uint64_t kernel_object;
55 uint64_t kernarg_address;
56 uint64_t reserved2;
57 };
58
si_compute_get_code_object(const struct si_compute * program,uint64_t symbol_offset)59 static const amd_kernel_code_t *si_compute_get_code_object(const struct si_compute *program,
60 uint64_t symbol_offset)
61 {
62 const struct si_shader_selector *sel = &program->sel;
63
64 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
65 return NULL;
66
67 struct ac_rtld_binary rtld;
68 if (!ac_rtld_open(&rtld,
69 (struct ac_rtld_open_info){.info = &sel->screen->info,
70 .shader_type = MESA_SHADER_COMPUTE,
71 .wave_size = program->shader.wave_size,
72 .num_parts = 1,
73 .elf_ptrs = &program->shader.binary.elf_buffer,
74 .elf_sizes = &program->shader.binary.elf_size}))
75 return NULL;
76
77 const amd_kernel_code_t *result = NULL;
78 const char *text;
79 size_t size;
80 if (!ac_rtld_get_section_by_name(&rtld, ".text", &text, &size))
81 goto out;
82
83 if (symbol_offset + sizeof(amd_kernel_code_t) > size)
84 goto out;
85
86 result = (const amd_kernel_code_t *)(text + symbol_offset);
87
88 out:
89 ac_rtld_close(&rtld);
90 return result;
91 }
92
code_object_to_config(const amd_kernel_code_t * code_object,struct ac_shader_config * out_config)93 static void code_object_to_config(const amd_kernel_code_t *code_object,
94 struct ac_shader_config *out_config)
95 {
96
97 uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
98 uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
99 out_config->num_sgprs = code_object->wavefront_sgpr_count;
100 out_config->num_vgprs = code_object->workitem_vgpr_count;
101 out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
102 out_config->rsrc1 = rsrc1;
103 out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
104 out_config->rsrc2 = rsrc2;
105 out_config->scratch_bytes_per_wave =
106 align(code_object->workitem_private_segment_byte_size * 64, 1024);
107 }
108
109 /* Asynchronous compute shader compilation. */
si_create_compute_state_async(void * job,void * gdata,int thread_index)110 static void si_create_compute_state_async(void *job, void *gdata, int thread_index)
111 {
112 struct si_compute *program = (struct si_compute *)job;
113 struct si_shader_selector *sel = &program->sel;
114 struct si_shader *shader = &program->shader;
115 struct ac_llvm_compiler *compiler;
116 struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
117 struct si_screen *sscreen = sel->screen;
118
119 assert(!debug->debug_message || debug->async);
120 assert(thread_index >= 0);
121 assert(thread_index < ARRAY_SIZE(sscreen->compiler));
122 compiler = &sscreen->compiler[thread_index];
123
124 if (!compiler->passes)
125 si_init_compiler(sscreen, compiler);
126
127 assert(program->ir_type == PIPE_SHADER_IR_NIR);
128 si_nir_scan_shader(sel->nir, &sel->info);
129
130 si_get_active_slot_masks(&sel->info, &sel->active_const_and_shader_buffers,
131 &sel->active_samplers_and_images);
132
133 program->shader.is_monolithic = true;
134
135 /* Variable block sizes need 10 bits (1 + log2(SI_MAX_VARIABLE_THREADS_PER_BLOCK)) per dim.
136 * We pack them into a single user SGPR.
137 */
138 unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS + (sel->info.uses_grid_size ? 3 : 0) +
139 (sel->info.uses_variable_block_size ? 1 : 0) +
140 sel->info.base.cs.user_data_components_amd;
141
142 /* Fast path for compute shaders - some descriptors passed via user SGPRs. */
143 /* Shader buffers in user SGPRs. */
144 for (unsigned i = 0; i < MIN2(3, sel->info.base.num_ssbos) && user_sgprs <= 12; i++) {
145 user_sgprs = align(user_sgprs, 4);
146 if (i == 0)
147 sel->cs_shaderbufs_sgpr_index = user_sgprs;
148 user_sgprs += 4;
149 sel->cs_num_shaderbufs_in_user_sgprs++;
150 }
151
152 /* Images in user SGPRs. */
153 unsigned non_msaa_images = u_bit_consecutive(0, sel->info.base.num_images) &
154 ~sel->info.base.msaa_images;
155
156 for (unsigned i = 0; i < 3 && non_msaa_images & (1 << i); i++) {
157 unsigned num_sgprs = sel->info.base.image_buffers & (1 << i) ? 4 : 8;
158
159 if (align(user_sgprs, num_sgprs) + num_sgprs > 16)
160 break;
161
162 user_sgprs = align(user_sgprs, num_sgprs);
163 if (i == 0)
164 sel->cs_images_sgpr_index = user_sgprs;
165 user_sgprs += num_sgprs;
166 sel->cs_num_images_in_user_sgprs++;
167 }
168 sel->cs_images_num_sgprs = user_sgprs - sel->cs_images_sgpr_index;
169 assert(user_sgprs <= 16);
170
171 unsigned char ir_sha1_cache_key[20];
172 si_get_ir_cache_key(sel, false, false, shader->wave_size, ir_sha1_cache_key);
173
174 /* Try to load the shader from the shader cache. */
175 simple_mtx_lock(&sscreen->shader_cache_mutex);
176
177 if (si_shader_cache_load_shader(sscreen, ir_sha1_cache_key, shader)) {
178 simple_mtx_unlock(&sscreen->shader_cache_mutex);
179
180 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
181 si_shader_dump(sscreen, shader, debug, stderr, true);
182
183 if (!si_shader_binary_upload(sscreen, shader, 0))
184 program->shader.compilation_failed = true;
185 } else {
186 simple_mtx_unlock(&sscreen->shader_cache_mutex);
187
188 if (!si_create_shader_variant(sscreen, compiler, &program->shader, debug)) {
189 program->shader.compilation_failed = true;
190 return;
191 }
192
193 bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
194
195 shader->config.rsrc1 = S_00B848_VGPRS((shader->config.num_vgprs - 1) /
196 ((shader->wave_size == 32 ||
197 sscreen->info.wave64_vgpr_alloc_granularity == 8) ? 8 : 4)) |
198 S_00B848_DX10_CLAMP(1) |
199 S_00B848_MEM_ORDERED(si_shader_mem_ordered(shader)) |
200 S_00B848_WGP_MODE(sscreen->info.chip_class >= GFX10) |
201 S_00B848_FLOAT_MODE(shader->config.float_mode);
202
203 if (sscreen->info.chip_class < GFX10) {
204 shader->config.rsrc1 |= S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8);
205 }
206
207 shader->config.rsrc2 = S_00B84C_USER_SGPR(user_sgprs) | S_00B84C_SCRATCH_EN(scratch_enabled) |
208 S_00B84C_TGID_X_EN(sel->info.uses_block_id[0]) |
209 S_00B84C_TGID_Y_EN(sel->info.uses_block_id[1]) |
210 S_00B84C_TGID_Z_EN(sel->info.uses_block_id[2]) |
211 S_00B84C_TG_SIZE_EN(sel->info.uses_subgroup_info) |
212 S_00B84C_TIDIG_COMP_CNT(sel->info.uses_thread_id[2]
213 ? 2
214 : sel->info.uses_thread_id[1] ? 1 : 0) |
215 S_00B84C_LDS_SIZE(shader->config.lds_size);
216
217 simple_mtx_lock(&sscreen->shader_cache_mutex);
218 si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key, shader, true);
219 simple_mtx_unlock(&sscreen->shader_cache_mutex);
220 }
221
222 ralloc_free(sel->nir);
223 sel->nir = NULL;
224 }
225
si_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * cso)226 static void *si_create_compute_state(struct pipe_context *ctx, const struct pipe_compute_state *cso)
227 {
228 struct si_context *sctx = (struct si_context *)ctx;
229 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
230 struct si_compute *program = CALLOC_STRUCT(si_compute);
231 struct si_shader_selector *sel = &program->sel;
232
233 pipe_reference_init(&sel->base.reference, 1);
234 sel->info.stage = MESA_SHADER_COMPUTE;
235 sel->screen = sscreen;
236 sel->const_and_shader_buf_descriptors_index =
237 si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_COMPUTE);
238 sel->sampler_and_images_descriptors_index =
239 si_sampler_and_image_descriptors_idx(PIPE_SHADER_COMPUTE);
240 sel->info.base.shared_size = cso->req_local_mem;
241 program->shader.selector = &program->sel;
242 program->shader.wave_size = si_determine_wave_size(sscreen, &program->shader);
243 program->ir_type = cso->ir_type;
244 program->private_size = cso->req_private_mem;
245 program->input_size = cso->req_input_mem;
246
247 if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
248 if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
249 program->ir_type = PIPE_SHADER_IR_NIR;
250 sel->nir = tgsi_to_nir(cso->prog, ctx->screen, true);
251 } else {
252 assert(cso->ir_type == PIPE_SHADER_IR_NIR);
253 sel->nir = (struct nir_shader *)cso->prog;
254 }
255
256 sel->compiler_ctx_state.debug = sctx->debug;
257 sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
258 p_atomic_inc(&sscreen->num_shaders_created);
259
260 si_schedule_initial_compile(sctx, MESA_SHADER_COMPUTE, &sel->ready, &sel->compiler_ctx_state,
261 program, si_create_compute_state_async);
262 } else {
263 const struct pipe_binary_program_header *header;
264 header = cso->prog;
265
266 program->shader.binary.elf_size = header->num_bytes;
267 program->shader.binary.elf_buffer = malloc(header->num_bytes);
268 if (!program->shader.binary.elf_buffer) {
269 FREE(program);
270 return NULL;
271 }
272 memcpy((void *)program->shader.binary.elf_buffer, header->blob, header->num_bytes);
273
274 const amd_kernel_code_t *code_object = si_compute_get_code_object(program, 0);
275 code_object_to_config(code_object, &program->shader.config);
276
277 si_shader_dump(sctx->screen, &program->shader, &sctx->debug, stderr, true);
278 if (!si_shader_binary_upload(sctx->screen, &program->shader, 0)) {
279 fprintf(stderr, "LLVM failed to upload shader\n");
280 free((void *)program->shader.binary.elf_buffer);
281 FREE(program);
282 return NULL;
283 }
284 }
285
286 return program;
287 }
288
si_bind_compute_state(struct pipe_context * ctx,void * state)289 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
290 {
291 struct si_context *sctx = (struct si_context *)ctx;
292 struct si_compute *program = (struct si_compute *)state;
293 struct si_shader_selector *sel = &program->sel;
294
295 sctx->cs_shader_state.program = program;
296 if (!program)
297 return;
298
299 /* Wait because we need active slot usage masks. */
300 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
301 util_queue_fence_wait(&sel->ready);
302
303 si_set_active_descriptors(sctx,
304 SI_DESCS_FIRST_COMPUTE + SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
305 sel->active_const_and_shader_buffers);
306 si_set_active_descriptors(sctx, SI_DESCS_FIRST_COMPUTE + SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
307 sel->active_samplers_and_images);
308
309 sctx->compute_shaderbuf_sgprs_dirty = true;
310 sctx->compute_image_sgprs_dirty = true;
311
312 if (unlikely((sctx->screen->debug_flags & DBG(SQTT)) && sctx->thread_trace)) {
313 uint32_t pipeline_code_hash = _mesa_hash_data_with_seed(
314 program->shader.binary.elf_buffer,
315 program->shader.binary.elf_size,
316 0);
317 uint64_t base_address = program->shader.bo->gpu_address;
318
319 struct ac_thread_trace_data *thread_trace_data = sctx->thread_trace;
320 if (!si_sqtt_pipeline_is_registered(thread_trace_data, pipeline_code_hash)) {
321 si_sqtt_register_pipeline(sctx, pipeline_code_hash, base_address, true);
322 }
323
324 si_sqtt_describe_pipeline_bind(sctx, pipeline_code_hash, 1);
325 }
326 }
327
si_set_global_binding(struct pipe_context * ctx,unsigned first,unsigned n,struct pipe_resource ** resources,uint32_t ** handles)328 static void si_set_global_binding(struct pipe_context *ctx, unsigned first, unsigned n,
329 struct pipe_resource **resources, uint32_t **handles)
330 {
331 unsigned i;
332 struct si_context *sctx = (struct si_context *)ctx;
333 struct si_compute *program = sctx->cs_shader_state.program;
334
335 if (first + n > program->max_global_buffers) {
336 unsigned old_max = program->max_global_buffers;
337 program->max_global_buffers = first + n;
338 program->global_buffers = realloc(
339 program->global_buffers, program->max_global_buffers * sizeof(program->global_buffers[0]));
340 if (!program->global_buffers) {
341 fprintf(stderr, "radeonsi: failed to allocate compute global_buffers\n");
342 return;
343 }
344
345 memset(&program->global_buffers[old_max], 0,
346 (program->max_global_buffers - old_max) * sizeof(program->global_buffers[0]));
347 }
348
349 if (!resources) {
350 for (i = 0; i < n; i++) {
351 pipe_resource_reference(&program->global_buffers[first + i], NULL);
352 }
353 return;
354 }
355
356 for (i = 0; i < n; i++) {
357 uint64_t va;
358 uint32_t offset;
359 pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
360 va = si_resource(resources[i])->gpu_address;
361 offset = util_le32_to_cpu(*handles[i]);
362 va += offset;
363 va = util_cpu_to_le64(va);
364 memcpy(handles[i], &va, sizeof(va));
365 }
366 }
367
si_emit_initial_compute_regs(struct si_context * sctx,struct radeon_cmdbuf * cs)368 void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf *cs)
369 {
370 const struct radeon_info *info = &sctx->screen->info;
371
372 radeon_begin(cs);
373 radeon_set_sh_reg(R_00B834_COMPUTE_PGM_HI,
374 S_00B834_DATA(sctx->screen->info.address32_hi >> 8));
375
376 radeon_set_sh_reg_seq(R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
377 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
378 * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
379 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
380 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
381
382 if (sctx->chip_class == GFX6) {
383 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
384 * and is now per pipe, so it should be handled in the
385 * kernel if we want to use something other than the default value.
386 *
387 * TODO: This should be:
388 * (number of compute units) * 4 * (waves per simd) - 1
389 */
390 radeon_set_sh_reg(R_00B82C_COMPUTE_MAX_WAVE_ID, 0x190 /* Default value */);
391
392 if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) {
393 uint64_t bc_va = sctx->border_color_buffer->gpu_address;
394
395 radeon_set_config_reg(R_00950C_TA_CS_BC_BASE_ADDR, bc_va >> 8);
396 }
397 }
398
399 if (sctx->chip_class >= GFX7) {
400 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
401 radeon_set_sh_reg_seq(R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
402 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
403 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
404
405 /* Disable profiling on compute queues. */
406 if (cs != &sctx->gfx_cs || !sctx->screen->info.has_graphics) {
407 radeon_set_sh_reg(R_00B82C_COMPUTE_PERFCOUNT_ENABLE, 0);
408 radeon_set_sh_reg(R_00B878_COMPUTE_THREAD_TRACE_ENABLE, 0);
409 }
410
411 /* Set the pointer to border colors. */
412 /* Aldebaran doesn't support border colors. */
413 if (sctx->border_color_buffer) {
414 uint64_t bc_va = sctx->border_color_buffer->gpu_address;
415
416 radeon_set_uconfig_reg_seq(R_030E00_TA_CS_BC_BASE_ADDR, 2, false);
417 radeon_emit(bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
418 radeon_emit(S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
419 }
420 }
421
422 /* cs_preamble_state initializes this for the gfx queue, so only do this
423 * if we are on a compute queue.
424 */
425 if (sctx->chip_class >= GFX9 &&
426 (cs != &sctx->gfx_cs || !sctx->screen->info.has_graphics)) {
427 radeon_set_uconfig_reg(R_0301EC_CP_COHER_START_DELAY,
428 sctx->chip_class >= GFX10 ? 0x20 : 0);
429 }
430
431 if (!info->has_graphics && info->family >= CHIP_ARCTURUS) {
432 radeon_set_sh_reg_seq(R_00B894_COMPUTE_STATIC_THREAD_MGMT_SE4, 4);
433 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
434 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
435 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
436 radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
437 }
438
439 if (sctx->chip_class >= GFX10) {
440 radeon_set_sh_reg_seq(R_00B890_COMPUTE_USER_ACCUM_0, 5);
441 radeon_emit(0); /* R_00B890_COMPUTE_USER_ACCUM_0 */
442 radeon_emit(0); /* R_00B894_COMPUTE_USER_ACCUM_1 */
443 radeon_emit(0); /* R_00B898_COMPUTE_USER_ACCUM_2 */
444 radeon_emit(0); /* R_00B89C_COMPUTE_USER_ACCUM_3 */
445 radeon_emit(0); /* R_00B8A0_COMPUTE_PGM_RSRC3 */
446
447 radeon_set_sh_reg(R_00B9F4_COMPUTE_DISPATCH_TUNNEL, 0);
448 }
449 radeon_end();
450 }
451
si_setup_compute_scratch_buffer(struct si_context * sctx,struct si_shader * shader)452 static bool si_setup_compute_scratch_buffer(struct si_context *sctx, struct si_shader *shader)
453 {
454 uint64_t scratch_bo_size, scratch_needed;
455 scratch_bo_size = 0;
456 scratch_needed = sctx->max_seen_compute_scratch_bytes_per_wave * sctx->scratch_waves;
457 if (sctx->compute_scratch_buffer)
458 scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
459
460 if (scratch_bo_size < scratch_needed) {
461 si_resource_reference(&sctx->compute_scratch_buffer, NULL);
462
463 sctx->compute_scratch_buffer =
464 si_aligned_buffer_create(&sctx->screen->b,
465 PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
466 PIPE_USAGE_DEFAULT,
467 scratch_needed, sctx->screen->info.pte_fragment_size);
468
469 if (!sctx->compute_scratch_buffer)
470 return false;
471 }
472
473 if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
474 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
475
476 if (!si_shader_binary_upload(sctx->screen, shader, scratch_va))
477 return false;
478
479 si_resource_reference(&shader->scratch_bo, sctx->compute_scratch_buffer);
480 }
481
482 return true;
483 }
484
si_switch_compute_shader(struct si_context * sctx,struct si_compute * program,struct si_shader * shader,const amd_kernel_code_t * code_object,unsigned offset,bool * prefetch)485 static bool si_switch_compute_shader(struct si_context *sctx, struct si_compute *program,
486 struct si_shader *shader, const amd_kernel_code_t *code_object,
487 unsigned offset, bool *prefetch)
488 {
489 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
490 struct ac_shader_config inline_config = {0};
491 struct ac_shader_config *config;
492 uint64_t shader_va;
493
494 *prefetch = false;
495
496 if (sctx->cs_shader_state.emitted_program == program && sctx->cs_shader_state.offset == offset)
497 return true;
498
499 if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
500 config = &shader->config;
501 } else {
502 unsigned lds_blocks;
503
504 config = &inline_config;
505 code_object_to_config(code_object, config);
506
507 lds_blocks = config->lds_size;
508 /* XXX: We are over allocating LDS. For GFX6, the shader reports
509 * LDS in blocks of 256 bytes, so if there are 4 bytes lds
510 * allocated in the shader and 4 bytes allocated by the state
511 * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
512 */
513 if (sctx->chip_class <= GFX6) {
514 lds_blocks += align(program->sel.info.base.shared_size, 256) >> 8;
515 } else {
516 lds_blocks += align(program->sel.info.base.shared_size, 512) >> 9;
517 }
518
519 /* TODO: use si_multiwave_lds_size_workaround */
520 assert(lds_blocks <= 0xFF);
521
522 config->rsrc2 &= C_00B84C_LDS_SIZE;
523 config->rsrc2 |= S_00B84C_LDS_SIZE(lds_blocks);
524 }
525
526 unsigned tmpring_size;
527 ac_get_scratch_tmpring_size(&sctx->screen->info, sctx->scratch_waves,
528 config->scratch_bytes_per_wave,
529 &sctx->max_seen_compute_scratch_bytes_per_wave, &tmpring_size);
530
531 if (!si_setup_compute_scratch_buffer(sctx, shader))
532 return false;
533
534 if (shader->scratch_bo) {
535 COMPUTE_DBG(sctx->screen,
536 "Waves: %u; Scratch per wave: %u bytes; "
537 "Total Scratch: %u bytes\n",
538 sctx->scratch_waves, config->scratch_bytes_per_wave,
539 config->scratch_bytes_per_wave * sctx->scratch_waves);
540
541 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, shader->scratch_bo,
542 RADEON_USAGE_READWRITE | RADEON_PRIO_SCRATCH_BUFFER);
543 }
544
545 shader_va = shader->bo->gpu_address + offset;
546 if (program->ir_type == PIPE_SHADER_IR_NATIVE) {
547 /* Shader code is placed after the amd_kernel_code_t
548 * struct. */
549 shader_va += sizeof(amd_kernel_code_t);
550 }
551
552 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, shader->bo,
553 RADEON_USAGE_READ | RADEON_PRIO_SHADER_BINARY);
554
555 radeon_begin(cs);
556 radeon_set_sh_reg(R_00B830_COMPUTE_PGM_LO, shader_va >> 8);
557
558 radeon_set_sh_reg_seq(R_00B848_COMPUTE_PGM_RSRC1, 2);
559 radeon_emit(config->rsrc1);
560 radeon_emit(config->rsrc2);
561
562 COMPUTE_DBG(sctx->screen,
563 "COMPUTE_PGM_RSRC1: 0x%08x "
564 "COMPUTE_PGM_RSRC2: 0x%08x\n",
565 config->rsrc1, config->rsrc2);
566
567 radeon_set_sh_reg(R_00B860_COMPUTE_TMPRING_SIZE, tmpring_size);
568 radeon_end();
569
570 sctx->cs_shader_state.emitted_program = program;
571 sctx->cs_shader_state.offset = offset;
572 sctx->cs_shader_state.uses_scratch = config->scratch_bytes_per_wave != 0;
573
574 *prefetch = true;
575 return true;
576 }
577
setup_scratch_rsrc_user_sgprs(struct si_context * sctx,const amd_kernel_code_t * code_object,unsigned user_sgpr)578 static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
579 const amd_kernel_code_t *code_object, unsigned user_sgpr)
580 {
581 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
582 uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
583
584 unsigned max_private_element_size =
585 AMD_HSA_BITS_GET(code_object->code_properties, AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
586
587 uint32_t scratch_dword0 = scratch_va & 0xffffffff;
588 uint32_t scratch_dword1 =
589 S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1);
590
591 /* Disable address clamping */
592 uint32_t scratch_dword2 = 0xffffffff;
593 uint32_t scratch_dword3 = S_008F0C_INDEX_STRIDE(3) | S_008F0C_ADD_TID_ENABLE(1);
594
595 if (sctx->chip_class >= GFX9) {
596 assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
597 } else {
598 scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
599
600 if (sctx->chip_class < GFX8) {
601 /* BUF_DATA_FORMAT is ignored, but it cannot be
602 * BUF_DATA_FORMAT_INVALID. */
603 scratch_dword3 |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
604 }
605 }
606
607 radeon_begin(cs);
608 radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 4);
609 radeon_emit(scratch_dword0);
610 radeon_emit(scratch_dword1);
611 radeon_emit(scratch_dword2);
612 radeon_emit(scratch_dword3);
613 radeon_end();
614 }
615
si_setup_user_sgprs_co_v2(struct si_context * sctx,const amd_kernel_code_t * code_object,const struct pipe_grid_info * info,uint64_t kernel_args_va)616 static void si_setup_user_sgprs_co_v2(struct si_context *sctx, const amd_kernel_code_t *code_object,
617 const struct pipe_grid_info *info, uint64_t kernel_args_va)
618 {
619 struct si_compute *program = sctx->cs_shader_state.program;
620 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
621
622 static const enum amd_code_property_mask_t workgroup_count_masks[] = {
623 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
624 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
625 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z};
626
627 unsigned i, user_sgpr = 0;
628 if (AMD_HSA_BITS_GET(code_object->code_properties,
629 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
630 if (code_object->workitem_private_segment_byte_size > 0) {
631 setup_scratch_rsrc_user_sgprs(sctx, code_object, user_sgpr);
632 }
633 user_sgpr += 4;
634 }
635
636 radeon_begin(cs);
637
638 if (AMD_HSA_BITS_GET(code_object->code_properties, AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
639 struct dispatch_packet dispatch;
640 unsigned dispatch_offset;
641 struct si_resource *dispatch_buf = NULL;
642 uint64_t dispatch_va;
643
644 /* Upload dispatch ptr */
645 memset(&dispatch, 0, sizeof(dispatch));
646
647 dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]);
648 dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]);
649 dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]);
650
651 dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]);
652 dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]);
653 dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]);
654
655 dispatch.private_segment_size = util_cpu_to_le32(program->private_size);
656 dispatch.group_segment_size = util_cpu_to_le32(program->sel.info.base.shared_size);
657
658 dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va);
659
660 u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch), 256, &dispatch, &dispatch_offset,
661 (struct pipe_resource **)&dispatch_buf);
662
663 if (!dispatch_buf) {
664 fprintf(stderr, "Error: Failed to allocate dispatch "
665 "packet.");
666 }
667 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, dispatch_buf,
668 RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER);
669
670 dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
671
672 radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
673 radeon_emit(dispatch_va);
674 radeon_emit(S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) | S_008F04_STRIDE(0));
675
676 si_resource_reference(&dispatch_buf, NULL);
677 user_sgpr += 2;
678 }
679
680 if (AMD_HSA_BITS_GET(code_object->code_properties,
681 AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
682 radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
683 radeon_emit(kernel_args_va);
684 radeon_emit(S_008F04_BASE_ADDRESS_HI(kernel_args_va >> 32) | S_008F04_STRIDE(0));
685 user_sgpr += 2;
686 }
687
688 for (i = 0; i < 3 && user_sgpr < 16; i++) {
689 if (code_object->code_properties & workgroup_count_masks[i]) {
690 radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 1);
691 radeon_emit(info->grid[i]);
692 user_sgpr += 1;
693 }
694 }
695 radeon_end();
696 }
697
si_upload_compute_input(struct si_context * sctx,const amd_kernel_code_t * code_object,const struct pipe_grid_info * info)698 static bool si_upload_compute_input(struct si_context *sctx, const amd_kernel_code_t *code_object,
699 const struct pipe_grid_info *info)
700 {
701 struct si_compute *program = sctx->cs_shader_state.program;
702 struct si_resource *input_buffer = NULL;
703 uint32_t kernel_args_offset = 0;
704 uint32_t *kernel_args;
705 void *kernel_args_ptr;
706 uint64_t kernel_args_va;
707
708 u_upload_alloc(sctx->b.const_uploader, 0, program->input_size,
709 sctx->screen->info.tcc_cache_line_size, &kernel_args_offset,
710 (struct pipe_resource **)&input_buffer, &kernel_args_ptr);
711
712 if (unlikely(!kernel_args_ptr))
713 return false;
714
715 kernel_args = (uint32_t *)kernel_args_ptr;
716 kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
717
718 memcpy(kernel_args, info->input, program->input_size);
719
720 for (unsigned i = 0; i < program->input_size / 4; i++) {
721 COMPUTE_DBG(sctx->screen, "input %u : %u\n", i, kernel_args[i]);
722 }
723
724 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, input_buffer,
725 RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER);
726
727 si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
728 si_resource_reference(&input_buffer, NULL);
729 return true;
730 }
731
si_setup_nir_user_data(struct si_context * sctx,const struct pipe_grid_info * info)732 static void si_setup_nir_user_data(struct si_context *sctx, const struct pipe_grid_info *info)
733 {
734 struct si_compute *program = sctx->cs_shader_state.program;
735 struct si_shader_selector *sel = &program->sel;
736 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
737 unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 + 4 * SI_NUM_RESOURCE_SGPRS;
738 unsigned block_size_reg = grid_size_reg +
739 /* 12 bytes = 3 dwords. */
740 12 * sel->info.uses_grid_size;
741 unsigned cs_user_data_reg = block_size_reg + 4 * program->sel.info.uses_variable_block_size;
742
743 radeon_begin(cs);
744
745 if (sel->info.uses_grid_size) {
746 if (info->indirect) {
747 radeon_end();
748
749 for (unsigned i = 0; i < 3; ++i) {
750 si_cp_copy_data(sctx, &sctx->gfx_cs, COPY_DATA_REG, NULL, (grid_size_reg >> 2) + i,
751 COPY_DATA_SRC_MEM, si_resource(info->indirect),
752 info->indirect_offset + 4 * i);
753 }
754 radeon_begin_again(cs);
755 } else {
756 radeon_set_sh_reg_seq(grid_size_reg, 3);
757 radeon_emit(info->grid[0]);
758 radeon_emit(info->grid[1]);
759 radeon_emit(info->grid[2]);
760 }
761 }
762
763 if (sel->info.uses_variable_block_size) {
764 radeon_set_sh_reg(block_size_reg,
765 info->block[0] | (info->block[1] << 10) | (info->block[2] << 20));
766 }
767
768 if (sel->info.base.cs.user_data_components_amd) {
769 radeon_set_sh_reg_seq(cs_user_data_reg, sel->info.base.cs.user_data_components_amd);
770 radeon_emit_array(sctx->cs_user_data, sel->info.base.cs.user_data_components_amd);
771 }
772 radeon_end();
773 }
774
si_emit_dispatch_packets(struct si_context * sctx,const struct pipe_grid_info * info)775 static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_grid_info *info)
776 {
777 struct si_screen *sscreen = sctx->screen;
778 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
779 bool render_cond_bit = sctx->render_cond_enabled;
780 unsigned threads_per_threadgroup = info->block[0] * info->block[1] * info->block[2];
781 unsigned waves_per_threadgroup =
782 DIV_ROUND_UP(threads_per_threadgroup, sctx->cs_shader_state.program->shader.wave_size);
783 unsigned threadgroups_per_cu = 1;
784
785 if (sctx->chip_class >= GFX10 && waves_per_threadgroup == 1)
786 threadgroups_per_cu = 2;
787
788 if (unlikely(sctx->thread_trace_enabled)) {
789 si_write_event_with_dims_marker(sctx, &sctx->gfx_cs,
790 info->indirect ? EventCmdDispatchIndirect : EventCmdDispatch,
791 info->grid[0], info->grid[1], info->grid[2]);
792 }
793
794 radeon_begin(cs);
795 radeon_set_sh_reg(
796 R_00B854_COMPUTE_RESOURCE_LIMITS,
797 ac_get_compute_resource_limits(&sscreen->info, waves_per_threadgroup,
798 sctx->cs_max_waves_per_sh, threadgroups_per_cu));
799
800 unsigned dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) | S_00B800_FORCE_START_AT_000(1) |
801 /* If the KMD allows it (there is a KMD hw register for it),
802 * allow launching waves out-of-order. (same as Vulkan) */
803 S_00B800_ORDER_MODE(sctx->chip_class >= GFX7) |
804 S_00B800_CS_W32_EN(sctx->cs_shader_state.program->shader.wave_size == 32);
805
806 const uint *last_block = info->last_block;
807 bool partial_block_en = last_block[0] || last_block[1] || last_block[2];
808
809 radeon_set_sh_reg_seq(R_00B81C_COMPUTE_NUM_THREAD_X, 3);
810
811 if (partial_block_en) {
812 unsigned partial[3];
813
814 /* If no partial_block, these should be an entire block size, not 0. */
815 partial[0] = last_block[0] ? last_block[0] : info->block[0];
816 partial[1] = last_block[1] ? last_block[1] : info->block[1];
817 partial[2] = last_block[2] ? last_block[2] : info->block[2];
818
819 radeon_emit(S_00B81C_NUM_THREAD_FULL(info->block[0]) |
820 S_00B81C_NUM_THREAD_PARTIAL(partial[0]));
821 radeon_emit(S_00B820_NUM_THREAD_FULL(info->block[1]) |
822 S_00B820_NUM_THREAD_PARTIAL(partial[1]));
823 radeon_emit(S_00B824_NUM_THREAD_FULL(info->block[2]) |
824 S_00B824_NUM_THREAD_PARTIAL(partial[2]));
825
826 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
827 } else {
828 radeon_emit(S_00B81C_NUM_THREAD_FULL(info->block[0]));
829 radeon_emit(S_00B820_NUM_THREAD_FULL(info->block[1]));
830 radeon_emit(S_00B824_NUM_THREAD_FULL(info->block[2]));
831 }
832
833 if (info->indirect) {
834 uint64_t base_va = si_resource(info->indirect)->gpu_address;
835
836 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(info->indirect),
837 RADEON_USAGE_READ | RADEON_PRIO_DRAW_INDIRECT);
838
839 radeon_emit(PKT3(PKT3_SET_BASE, 2, 0) | PKT3_SHADER_TYPE_S(1));
840 radeon_emit(1);
841 radeon_emit(base_va);
842 radeon_emit(base_va >> 32);
843
844 radeon_emit(PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) | PKT3_SHADER_TYPE_S(1));
845 radeon_emit(info->indirect_offset);
846 radeon_emit(dispatch_initiator);
847 } else {
848 radeon_emit(PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) | PKT3_SHADER_TYPE_S(1));
849 radeon_emit(info->grid[0]);
850 radeon_emit(info->grid[1]);
851 radeon_emit(info->grid[2]);
852 radeon_emit(dispatch_initiator);
853 }
854
855 if (unlikely(sctx->thread_trace_enabled && sctx->chip_class >= GFX9)) {
856 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
857 radeon_emit(EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) | EVENT_INDEX(0));
858 }
859 radeon_end();
860 }
861
si_check_needs_implicit_sync(struct si_context * sctx)862 static bool si_check_needs_implicit_sync(struct si_context *sctx)
863 {
864 /* If the compute shader is going to read from a texture/image written by a
865 * previous draw, we must wait for its completion before continuing.
866 * Buffers and image stores (from the draw) are not taken into consideration
867 * because that's the app responsibility.
868 *
869 * The OpenGL 4.6 spec says:
870 *
871 * buffer object and texture stores performed by shaders are not
872 * automatically synchronized
873 *
874 * TODO: Bindless textures are not handled, and thus are not synchronized.
875 */
876 struct si_shader_info *info = &sctx->cs_shader_state.program->sel.info;
877 struct si_samplers *samplers = &sctx->samplers[PIPE_SHADER_COMPUTE];
878 unsigned mask = samplers->enabled_mask & info->base.textures_used[0];
879
880 while (mask) {
881 int i = u_bit_scan(&mask);
882 struct si_sampler_view *sview = (struct si_sampler_view *)samplers->views[i];
883
884 struct si_resource *res = si_resource(sview->base.texture);
885 if (sctx->ws->cs_is_buffer_referenced(&sctx->gfx_cs, res->buf,
886 RADEON_USAGE_NEEDS_IMPLICIT_SYNC))
887 return true;
888 }
889
890 struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE];
891 mask = u_bit_consecutive(0, info->base.num_images) & images->enabled_mask;
892
893 while (mask) {
894 int i = u_bit_scan(&mask);
895 struct pipe_image_view *sview = &images->views[i];
896
897 struct si_resource *res = si_resource(sview->resource);
898 if (sctx->ws->cs_is_buffer_referenced(&sctx->gfx_cs, res->buf,
899 RADEON_USAGE_NEEDS_IMPLICIT_SYNC))
900 return true;
901 }
902 return false;
903 }
904
si_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * info)905 static void si_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *info)
906 {
907 struct si_context *sctx = (struct si_context *)ctx;
908 struct si_screen *sscreen = sctx->screen;
909 struct si_compute *program = sctx->cs_shader_state.program;
910 const amd_kernel_code_t *code_object = si_compute_get_code_object(program, info->pc);
911 int i;
912 bool cs_regalloc_hang = sscreen->info.has_cs_regalloc_hang_bug &&
913 info->block[0] * info->block[1] * info->block[2] > 256;
914
915 if (cs_regalloc_hang)
916 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
917
918 if (program->ir_type != PIPE_SHADER_IR_NATIVE && program->shader.compilation_failed)
919 return;
920
921 if (sctx->has_graphics) {
922 if (sctx->last_num_draw_calls != sctx->num_draw_calls) {
923 si_update_fb_dirtiness_after_rendering(sctx);
924 sctx->last_num_draw_calls = sctx->num_draw_calls;
925
926 if (sctx->force_cb_shader_coherent || si_check_needs_implicit_sync(sctx))
927 si_make_CB_shader_coherent(sctx, 0,
928 sctx->framebuffer.CB_has_shader_readable_metadata,
929 sctx->framebuffer.all_DCC_pipe_aligned);
930 }
931
932 si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
933 }
934
935 /* Add buffer sizes for memory checking in need_cs_space. */
936 si_context_add_resource_size(sctx, &program->shader.bo->b.b);
937 /* TODO: add the scratch buffer */
938
939 if (info->indirect) {
940 si_context_add_resource_size(sctx, info->indirect);
941
942 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
943 if (sctx->chip_class <= GFX8 && si_resource(info->indirect)->TC_L2_dirty) {
944 sctx->flags |= SI_CONTEXT_WB_L2;
945 si_resource(info->indirect)->TC_L2_dirty = false;
946 }
947 }
948
949 si_need_gfx_cs_space(sctx, 0);
950
951 /* If we're using a secure context, determine if cs must be secure or not */
952 if (unlikely(radeon_uses_secure_bos(sctx->ws))) {
953 bool secure = si_compute_resources_check_encrypted(sctx);
954 if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
955 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
956 RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION,
957 NULL);
958 }
959 }
960
961 if (sctx->bo_list_add_all_compute_resources)
962 si_compute_resources_add_all_to_bo_list(sctx);
963
964 if (!sctx->cs_shader_state.initialized) {
965 si_emit_initial_compute_regs(sctx, &sctx->gfx_cs);
966
967 sctx->cs_shader_state.emitted_program = NULL;
968 sctx->cs_shader_state.initialized = true;
969 }
970
971 /* First emit registers. */
972 bool prefetch;
973 if (!si_switch_compute_shader(sctx, program, &program->shader, code_object, info->pc, &prefetch))
974 return;
975
976 si_upload_compute_shader_descriptors(sctx);
977 si_emit_compute_shader_pointers(sctx);
978
979 if (program->ir_type == PIPE_SHADER_IR_NATIVE &&
980 unlikely(!si_upload_compute_input(sctx, code_object, info)))
981 return;
982
983 /* Global buffers */
984 for (i = 0; i < program->max_global_buffers; i++) {
985 struct si_resource *buffer = si_resource(program->global_buffers[i]);
986 if (!buffer) {
987 continue;
988 }
989 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, buffer,
990 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RW_BUFFER);
991 }
992
993 /* Registers that are not read from memory should be set before this: */
994 if (sctx->flags)
995 sctx->emit_cache_flush(sctx, &sctx->gfx_cs);
996
997 if (sctx->has_graphics && si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
998 sctx->atoms.s.render_cond.emit(sctx);
999 si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
1000 }
1001
1002 /* Prefetch the compute shader to L2. */
1003 if (sctx->chip_class >= GFX7 && prefetch)
1004 si_cp_dma_prefetch(sctx, &program->shader.bo->b.b, 0, program->shader.bo->b.b.width0);
1005
1006 if (program->ir_type != PIPE_SHADER_IR_NATIVE)
1007 si_setup_nir_user_data(sctx, info);
1008
1009 si_emit_dispatch_packets(sctx, info);
1010
1011 if (unlikely(sctx->current_saved_cs)) {
1012 si_trace_emit(sctx);
1013 si_log_compute_state(sctx, sctx->log);
1014 }
1015
1016 /* Mark displayable DCC as dirty for bound images. */
1017 unsigned display_dcc_store_mask = sctx->images[PIPE_SHADER_COMPUTE].display_dcc_store_mask &
1018 BITFIELD_MASK(program->sel.info.base.num_images);
1019 while (display_dcc_store_mask) {
1020 struct si_texture *tex = (struct si_texture *)
1021 sctx->images[PIPE_SHADER_COMPUTE].views[u_bit_scan(&display_dcc_store_mask)].resource;
1022
1023 si_mark_display_dcc_dirty(sctx, tex);
1024 }
1025
1026 /* TODO: Bindless images don't set displayable_dcc_dirty after image stores. */
1027
1028 sctx->compute_is_busy = true;
1029 sctx->num_compute_calls++;
1030
1031 if (cs_regalloc_hang)
1032 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
1033 }
1034
si_destroy_compute(struct si_compute * program)1035 void si_destroy_compute(struct si_compute *program)
1036 {
1037 struct si_shader_selector *sel = &program->sel;
1038
1039 if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
1040 util_queue_drop_job(&sel->screen->shader_compiler_queue, &sel->ready);
1041 util_queue_fence_destroy(&sel->ready);
1042 }
1043
1044 for (unsigned i = 0; i < program->max_global_buffers; i++)
1045 pipe_resource_reference(&program->global_buffers[i], NULL);
1046 FREE(program->global_buffers);
1047
1048 si_shader_destroy(&program->shader);
1049 ralloc_free(program->sel.nir);
1050 FREE(program);
1051 }
1052
si_delete_compute_state(struct pipe_context * ctx,void * state)1053 static void si_delete_compute_state(struct pipe_context *ctx, void *state)
1054 {
1055 struct si_compute *program = (struct si_compute *)state;
1056 struct si_context *sctx = (struct si_context *)ctx;
1057
1058 if (!state)
1059 return;
1060
1061 if (program == sctx->cs_shader_state.program)
1062 sctx->cs_shader_state.program = NULL;
1063
1064 if (program == sctx->cs_shader_state.emitted_program)
1065 sctx->cs_shader_state.emitted_program = NULL;
1066
1067 si_compute_reference(&program, NULL);
1068 }
1069
si_set_compute_resources(struct pipe_context * ctx_,unsigned start,unsigned count,struct pipe_surface ** surfaces)1070 static void si_set_compute_resources(struct pipe_context *ctx_, unsigned start, unsigned count,
1071 struct pipe_surface **surfaces)
1072 {
1073 }
1074
si_init_compute_functions(struct si_context * sctx)1075 void si_init_compute_functions(struct si_context *sctx)
1076 {
1077 sctx->b.create_compute_state = si_create_compute_state;
1078 sctx->b.delete_compute_state = si_delete_compute_state;
1079 sctx->b.bind_compute_state = si_bind_compute_state;
1080 sctx->b.set_compute_resources = si_set_compute_resources;
1081 sctx->b.set_global_binding = si_set_global_binding;
1082 sctx->b.launch_grid = si_launch_grid;
1083 }
1084