1 /*
2  * Copyrigh 2016 Red Hat Inc.
3  * Based on anv:
4  * Copyright © 2015 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  */
25 
26 #include <assert.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29 #include <string.h>
30 
31 #include "nir/nir_builder.h"
32 #include "util/u_atomic.h"
33 #include "radv_acceleration_structure.h"
34 #include "radv_cs.h"
35 #include "radv_meta.h"
36 #include "radv_private.h"
37 #include "sid.h"
38 
39 #define TIMESTAMP_NOT_READY UINT64_MAX
40 
41 static const int pipelinestat_block_size = 11 * 8;
42 static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
43 
44 static unsigned
radv_get_pipeline_statistics_index(const VkQueryPipelineStatisticFlagBits flag)45 radv_get_pipeline_statistics_index(const VkQueryPipelineStatisticFlagBits flag)
46 {
47    int offset = ffs(flag) - 1;
48    assert(offset < ARRAY_SIZE(pipeline_statistics_indices));
49    return pipeline_statistics_indices[offset];
50 }
51 
52 static nir_ssa_def *
nir_test_flag(nir_builder * b,nir_ssa_def * flags,uint32_t flag)53 nir_test_flag(nir_builder *b, nir_ssa_def *flags, uint32_t flag)
54 {
55    return nir_i2b(b, nir_iand(b, flags, nir_imm_int(b, flag)));
56 }
57 
58 static void
radv_break_on_count(nir_builder * b,nir_variable * var,nir_ssa_def * count)59 radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
60 {
61    nir_ssa_def *counter = nir_load_var(b, var);
62 
63    nir_push_if(b, nir_uge(b, counter, count));
64    nir_jump(b, nir_jump_break);
65    nir_pop_if(b, NULL);
66 
67    counter = nir_iadd(b, counter, nir_imm_int(b, 1));
68    nir_store_var(b, var, counter, 0x1);
69 }
70 
71 static void
radv_store_availability(nir_builder * b,nir_ssa_def * flags,nir_ssa_def * dst_buf,nir_ssa_def * offset,nir_ssa_def * value32)72 radv_store_availability(nir_builder *b, nir_ssa_def *flags, nir_ssa_def *dst_buf,
73                         nir_ssa_def *offset, nir_ssa_def *value32)
74 {
75    nir_push_if(b, nir_test_flag(b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
76 
77    nir_push_if(b, nir_test_flag(b, flags, VK_QUERY_RESULT_64_BIT));
78 
79    nir_store_ssbo(b, nir_vec2(b, value32, nir_imm_int(b, 0)), dst_buf, offset, .write_mask = 0x3,
80                   .align_mul = 8);
81 
82    nir_push_else(b, NULL);
83 
84    nir_store_ssbo(b, value32, dst_buf, offset, .write_mask = 0x1, .align_mul = 4);
85 
86    nir_pop_if(b, NULL);
87 
88    nir_pop_if(b, NULL);
89 }
90 
91 static nir_shader *
build_occlusion_query_shader(struct radv_device * device)92 build_occlusion_query_shader(struct radv_device *device)
93 {
94    /* the shader this builds is roughly
95     *
96     * push constants {
97     * 	uint32_t flags;
98     * 	uint32_t dst_stride;
99     * };
100     *
101     * uint32_t src_stride = 16 * db_count;
102     *
103     * location(binding = 0) buffer dst_buf;
104     * location(binding = 1) buffer src_buf;
105     *
106     * void main() {
107     * 	uint64_t result = 0;
108     * 	uint64_t src_offset = src_stride * global_id.x;
109     * 	uint64_t dst_offset = dst_stride * global_id.x;
110     * 	bool available = true;
111     * 	for (int i = 0; i < db_count; ++i) {
112     *		if (enabled_rb_mask & (1 << i)) {
113     *			uint64_t start = src_buf[src_offset + 16 * i];
114     *			uint64_t end = src_buf[src_offset + 16 * i + 8];
115     *			if ((start & (1ull << 63)) && (end & (1ull << 63)))
116     *				result += end - start;
117     *			else
118     *				available = false;
119     *		}
120     * 	}
121     * 	uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
122     * 	if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
123     * 		if (flags & VK_QUERY_RESULT_64_BIT)
124     * 			dst_buf[dst_offset] = result;
125     * 		else
126     * 			dst_buf[dst_offset] = (uint32_t)result.
127     * 	}
128     * 	if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
129     * 		dst_buf[dst_offset + elem_size] = available;
130     * 	}
131     * }
132     */
133    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "occlusion_query");
134    b.shader->info.workgroup_size[0] = 64;
135    b.shader->info.workgroup_size[1] = 1;
136    b.shader->info.workgroup_size[2] = 1;
137 
138    nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
139    nir_variable *outer_counter =
140       nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
141    nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
142    nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
143    nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
144    unsigned enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
145    unsigned db_count = device->physical_device->rad_info.max_render_backends;
146 
147    nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
148 
149    nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
150    nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
151 
152    nir_ssa_def *global_id = get_global_ids(&b, 1);
153 
154    nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
155    nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
156    nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
157    nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
158 
159    nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
160    nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
161    nir_store_var(&b, available, nir_imm_true(&b), 0x1);
162 
163    nir_push_loop(&b);
164 
165    nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
166    radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
167 
168    nir_ssa_def *enabled_cond = nir_iand(&b, nir_imm_int(&b, enabled_rb_mask),
169                                         nir_ishl(&b, nir_imm_int(&b, 1), current_outer_count));
170 
171    nir_push_if(&b, nir_i2b(&b, enabled_cond));
172 
173    nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
174    load_offset = nir_iadd(&b, input_base, load_offset);
175 
176    nir_ssa_def *load = nir_load_ssbo(&b, 2, 64, src_buf, load_offset, .align_mul = 16);
177 
178    nir_store_var(&b, start, nir_channel(&b, load, 0), 0x1);
179    nir_store_var(&b, end, nir_channel(&b, load, 1), 0x1);
180 
181    nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
182    nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
183 
184    nir_push_if(&b, nir_iand(&b, start_done, end_done));
185 
186    nir_store_var(&b, result,
187                  nir_iadd(&b, nir_load_var(&b, result),
188                           nir_isub(&b, nir_load_var(&b, end), nir_load_var(&b, start))),
189                  0x1);
190 
191    nir_push_else(&b, NULL);
192 
193    nir_store_var(&b, available, nir_imm_false(&b), 0x1);
194 
195    nir_pop_if(&b, NULL);
196    nir_pop_if(&b, NULL);
197    nir_pop_loop(&b, NULL);
198 
199    /* Store the result if complete or if partial results have been requested. */
200 
201    nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
202    nir_ssa_def *result_size =
203       nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
204    nir_push_if(&b, nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
205                            nir_load_var(&b, available)));
206 
207    nir_push_if(&b, result_is_64bit);
208 
209    nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x1,
210                   .align_mul = 8);
211 
212    nir_push_else(&b, NULL);
213 
214    nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base,
215                   .write_mask = 0x1, .align_mul = 8);
216 
217    nir_pop_if(&b, NULL);
218    nir_pop_if(&b, NULL);
219 
220    radv_store_availability(&b, flags, dst_buf, nir_iadd(&b, result_size, output_base),
221                            nir_b2i32(&b, nir_load_var(&b, available)));
222 
223    return b.shader;
224 }
225 
226 static nir_shader *
build_pipeline_statistics_query_shader(struct radv_device * device)227 build_pipeline_statistics_query_shader(struct radv_device *device)
228 {
229    /* the shader this builds is roughly
230     *
231     * push constants {
232     * 	uint32_t flags;
233     * 	uint32_t dst_stride;
234     * 	uint32_t stats_mask;
235     * 	uint32_t avail_offset;
236     * };
237     *
238     * uint32_t src_stride = pipelinestat_block_size * 2;
239     *
240     * location(binding = 0) buffer dst_buf;
241     * location(binding = 1) buffer src_buf;
242     *
243     * void main() {
244     * 	uint64_t src_offset = src_stride * global_id.x;
245     * 	uint64_t dst_base = dst_stride * global_id.x;
246     * 	uint64_t dst_offset = dst_base;
247     * 	uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
248     * 	uint32_t elem_count = stats_mask >> 16;
249     * 	uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
250     * 	if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
251     * 		dst_buf[dst_offset + elem_count * elem_size] = available32;
252     * 	}
253     * 	if ((bool)available32) {
254     * 		// repeat 11 times:
255     * 		if (stats_mask & (1 << 0)) {
256     * 			uint64_t start = src_buf[src_offset + 8 * indices[0]];
257     * 			uint64_t end = src_buf[src_offset + 8 * indices[0] +
258     * pipelinestat_block_size]; uint64_t result = end - start; if (flags & VK_QUERY_RESULT_64_BIT)
259     * 				dst_buf[dst_offset] = result;
260     * 			else
261     * 				dst_buf[dst_offset] = (uint32_t)result.
262     * 			dst_offset += elem_size;
263     * 		}
264     * 	} else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
265     *              // Set everything to 0 as we don't know what is valid.
266     * 		for (int i = 0; i < elem_count; ++i)
267     * 			dst_buf[dst_base + elem_size * i] = 0;
268     * 	}
269     * }
270     */
271    nir_builder b =
272       nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "pipeline_statistics_query");
273    b.shader->info.workgroup_size[0] = 64;
274    b.shader->info.workgroup_size[1] = 1;
275    b.shader->info.workgroup_size[2] = 1;
276 
277    nir_variable *output_offset =
278       nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
279 
280    nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
281    nir_ssa_def *stats_mask = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 8), .range = 16);
282    nir_ssa_def *avail_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
283 
284    nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
285    nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
286 
287    nir_ssa_def *global_id = get_global_ids(&b, 1);
288 
289    nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
290    nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
291    nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
292    nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
293 
294    avail_offset = nir_iadd(&b, avail_offset, nir_imul(&b, global_id, nir_imm_int(&b, 4)));
295 
296    nir_ssa_def *available32 = nir_load_ssbo(&b, 1, 32, src_buf, avail_offset, .align_mul = 4);
297 
298    nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
299    nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
300    nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
301 
302    radv_store_availability(&b, flags, dst_buf,
303                            nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)),
304                            available32);
305 
306    nir_push_if(&b, nir_i2b(&b, available32));
307 
308    nir_store_var(&b, output_offset, output_base, 0x1);
309    for (int i = 0; i < ARRAY_SIZE(pipeline_statistics_indices); ++i) {
310       nir_push_if(&b, nir_test_flag(&b, stats_mask, 1u << i));
311 
312       nir_ssa_def *start_offset =
313          nir_iadd(&b, input_base, nir_imm_int(&b, pipeline_statistics_indices[i] * 8));
314       nir_ssa_def *start = nir_load_ssbo(&b, 1, 64, src_buf, start_offset, .align_mul = 8);
315 
316       nir_ssa_def *end_offset =
317          nir_iadd(&b, input_base,
318                   nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size));
319       nir_ssa_def *end = nir_load_ssbo(&b, 1, 64, src_buf, end_offset, .align_mul = 8);
320 
321       nir_ssa_def *result = nir_isub(&b, end, start);
322 
323       /* Store result */
324       nir_push_if(&b, result_is_64bit);
325 
326       nir_store_ssbo(&b, result, dst_buf, nir_load_var(&b, output_offset), .write_mask = 0x1,
327                      .align_mul = 8);
328 
329       nir_push_else(&b, NULL);
330 
331       nir_store_ssbo(&b, nir_u2u32(&b, result), dst_buf, nir_load_var(&b, output_offset),
332                      .write_mask = 0x1, .align_mul = 4);
333 
334       nir_pop_if(&b, NULL);
335 
336       nir_store_var(&b, output_offset, nir_iadd(&b, nir_load_var(&b, output_offset), elem_size),
337                     0x1);
338 
339       nir_pop_if(&b, NULL);
340    }
341 
342    nir_push_else(&b, NULL); /* nir_i2b(&b, available32) */
343 
344    nir_push_if(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT));
345 
346    /* Stores zeros in all outputs. */
347 
348    nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
349    nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
350 
351    nir_loop *loop = nir_push_loop(&b);
352 
353    nir_ssa_def *current_counter = nir_load_var(&b, counter);
354    radv_break_on_count(&b, counter, elem_count);
355 
356    nir_ssa_def *output_elem = nir_iadd(&b, output_base, nir_imul(&b, elem_size, current_counter));
357    nir_push_if(&b, result_is_64bit);
358 
359    nir_store_ssbo(&b, nir_imm_int64(&b, 0), dst_buf, output_elem, .write_mask = 0x1,
360                   .align_mul = 8);
361 
362    nir_push_else(&b, NULL);
363 
364    nir_store_ssbo(&b, nir_imm_int(&b, 0), dst_buf, output_elem, .write_mask = 0x1, .align_mul = 4);
365 
366    nir_pop_if(&b, NULL);
367 
368    nir_pop_loop(&b, loop);
369    nir_pop_if(&b, NULL); /* VK_QUERY_RESULT_PARTIAL_BIT */
370    nir_pop_if(&b, NULL); /* nir_i2b(&b, available32) */
371    return b.shader;
372 }
373 
374 static nir_shader *
build_tfb_query_shader(struct radv_device * device)375 build_tfb_query_shader(struct radv_device *device)
376 {
377    /* the shader this builds is roughly
378     *
379     * uint32_t src_stride = 32;
380     *
381     * location(binding = 0) buffer dst_buf;
382     * location(binding = 1) buffer src_buf;
383     *
384     * void main() {
385     *	uint64_t result[2] = {};
386     *	bool available = false;
387     *	uint64_t src_offset = src_stride * global_id.x;
388     * 	uint64_t dst_offset = dst_stride * global_id.x;
389     * 	uint64_t *src_data = src_buf[src_offset];
390     *	uint32_t avail = (src_data[0] >> 32) &
391     *			 (src_data[1] >> 32) &
392     *			 (src_data[2] >> 32) &
393     *			 (src_data[3] >> 32);
394     *	if (avail & 0x80000000) {
395     *		result[0] = src_data[3] - src_data[1];
396     *		result[1] = src_data[2] - src_data[0];
397     *		available = true;
398     *	}
399     * 	uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
400     * 	if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
401     *		if (flags & VK_QUERY_RESULT_64_BIT) {
402     *			dst_buf[dst_offset] = result;
403     *		} else {
404     *			dst_buf[dst_offset] = (uint32_t)result;
405     *		}
406     *	}
407     *	if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
408     *		dst_buf[dst_offset + result_size] = available;
409     * 	}
410     * }
411     */
412    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "tfb_query");
413    b.shader->info.workgroup_size[0] = 64;
414    b.shader->info.workgroup_size[1] = 1;
415    b.shader->info.workgroup_size[2] = 1;
416 
417    /* Create and initialize local variables. */
418    nir_variable *result =
419       nir_local_variable_create(b.impl, glsl_vector_type(GLSL_TYPE_UINT64, 2), "result");
420    nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
421 
422    nir_store_var(&b, result, nir_vec2(&b, nir_imm_int64(&b, 0), nir_imm_int64(&b, 0)), 0x3);
423    nir_store_var(&b, available, nir_imm_false(&b), 0x1);
424 
425    nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
426 
427    /* Load resources. */
428    nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
429    nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
430 
431    /* Compute global ID. */
432    nir_ssa_def *global_id = get_global_ids(&b, 1);
433 
434    /* Compute src/dst strides. */
435    nir_ssa_def *input_stride = nir_imm_int(&b, 32);
436    nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
437    nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
438    nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
439 
440    /* Load data from the query pool. */
441    nir_ssa_def *load1 = nir_load_ssbo(&b, 4, 32, src_buf, input_base, .align_mul = 32);
442    nir_ssa_def *load2 = nir_load_ssbo(
443       &b, 4, 32, src_buf, nir_iadd(&b, input_base, nir_imm_int(&b, 16)), .align_mul = 16);
444 
445    /* Check if result is available. */
446    nir_ssa_def *avails[2];
447    avails[0] = nir_iand(&b, nir_channel(&b, load1, 1), nir_channel(&b, load1, 3));
448    avails[1] = nir_iand(&b, nir_channel(&b, load2, 1), nir_channel(&b, load2, 3));
449    nir_ssa_def *result_is_available =
450       nir_i2b(&b, nir_iand(&b, nir_iand(&b, avails[0], avails[1]), nir_imm_int(&b, 0x80000000)));
451 
452    /* Only compute result if available. */
453    nir_push_if(&b, result_is_available);
454 
455    /* Pack values. */
456    nir_ssa_def *packed64[4];
457    packed64[0] =
458       nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load1, 0), nir_channel(&b, load1, 1)));
459    packed64[1] =
460       nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load1, 2), nir_channel(&b, load1, 3)));
461    packed64[2] =
462       nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load2, 0), nir_channel(&b, load2, 1)));
463    packed64[3] =
464       nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load2, 2), nir_channel(&b, load2, 3)));
465 
466    /* Compute result. */
467    nir_ssa_def *num_primitive_written = nir_isub(&b, packed64[3], packed64[1]);
468    nir_ssa_def *primitive_storage_needed = nir_isub(&b, packed64[2], packed64[0]);
469 
470    nir_store_var(&b, result, nir_vec2(&b, num_primitive_written, primitive_storage_needed), 0x3);
471    nir_store_var(&b, available, nir_imm_true(&b), 0x1);
472 
473    nir_pop_if(&b, NULL);
474 
475    /* Determine if result is 64 or 32 bit. */
476    nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
477    nir_ssa_def *result_size =
478       nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16), nir_imm_int(&b, 8));
479 
480    /* Store the result if complete or partial results have been requested. */
481    nir_push_if(&b, nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
482                            nir_load_var(&b, available)));
483 
484    /* Store result. */
485    nir_push_if(&b, result_is_64bit);
486 
487    nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x3,
488                   .align_mul = 8);
489 
490    nir_push_else(&b, NULL);
491 
492    nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base,
493                   .write_mask = 0x3, .align_mul = 4);
494 
495    nir_pop_if(&b, NULL);
496    nir_pop_if(&b, NULL);
497 
498    radv_store_availability(&b, flags, dst_buf, nir_iadd(&b, result_size, output_base),
499                            nir_b2i32(&b, nir_load_var(&b, available)));
500 
501    return b.shader;
502 }
503 
504 static nir_shader *
build_timestamp_query_shader(struct radv_device * device)505 build_timestamp_query_shader(struct radv_device *device)
506 {
507    /* the shader this builds is roughly
508     *
509     * uint32_t src_stride = 8;
510     *
511     * location(binding = 0) buffer dst_buf;
512     * location(binding = 1) buffer src_buf;
513     *
514     * void main() {
515     *	uint64_t result = 0;
516     *	bool available = false;
517     *	uint64_t src_offset = src_stride * global_id.x;
518     * 	uint64_t dst_offset = dst_stride * global_id.x;
519     * 	uint64_t timestamp = src_buf[src_offset];
520     *	if (timestamp != TIMESTAMP_NOT_READY) {
521     *		result = timestamp;
522     *		available = true;
523     *	}
524     * 	uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
525     * 	if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
526     *		if (flags & VK_QUERY_RESULT_64_BIT) {
527     *			dst_buf[dst_offset] = result;
528     *		} else {
529     *			dst_buf[dst_offset] = (uint32_t)result;
530     *		}
531     *	}
532     *	if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
533     *		dst_buf[dst_offset + result_size] = available;
534     * 	}
535     * }
536     */
537    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "timestamp_query");
538    b.shader->info.workgroup_size[0] = 64;
539    b.shader->info.workgroup_size[1] = 1;
540    b.shader->info.workgroup_size[2] = 1;
541 
542    /* Create and initialize local variables. */
543    nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
544    nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
545 
546    nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
547    nir_store_var(&b, available, nir_imm_false(&b), 0x1);
548 
549    nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
550 
551    /* Load resources. */
552    nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
553    nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
554 
555    /* Compute global ID. */
556    nir_ssa_def *global_id = get_global_ids(&b, 1);
557 
558    /* Compute src/dst strides. */
559    nir_ssa_def *input_stride = nir_imm_int(&b, 8);
560    nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
561    nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
562    nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
563 
564    /* Load data from the query pool. */
565    nir_ssa_def *load = nir_load_ssbo(&b, 2, 32, src_buf, input_base, .align_mul = 8);
566 
567    /* Pack the timestamp. */
568    nir_ssa_def *timestamp;
569    timestamp =
570       nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load, 0), nir_channel(&b, load, 1)));
571 
572    /* Check if result is available. */
573    nir_ssa_def *result_is_available =
574       nir_i2b(&b, nir_ine(&b, timestamp, nir_imm_int64(&b, TIMESTAMP_NOT_READY)));
575 
576    /* Only store result if available. */
577    nir_push_if(&b, result_is_available);
578 
579    nir_store_var(&b, result, timestamp, 0x1);
580    nir_store_var(&b, available, nir_imm_true(&b), 0x1);
581 
582    nir_pop_if(&b, NULL);
583 
584    /* Determine if result is 64 or 32 bit. */
585    nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
586    nir_ssa_def *result_size =
587       nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
588 
589    /* Store the result if complete or partial results have been requested. */
590    nir_push_if(&b, nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
591                            nir_load_var(&b, available)));
592 
593    /* Store result. */
594    nir_push_if(&b, result_is_64bit);
595 
596    nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x1,
597                   .align_mul = 8);
598 
599    nir_push_else(&b, NULL);
600 
601    nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base,
602                   .write_mask = 0x1, .align_mul = 4);
603 
604    nir_pop_if(&b, NULL);
605 
606    nir_pop_if(&b, NULL);
607 
608    radv_store_availability(&b, flags, dst_buf, nir_iadd(&b, result_size, output_base),
609                            nir_b2i32(&b, nir_load_var(&b, available)));
610 
611    return b.shader;
612 }
613 
614 static VkResult
radv_device_init_meta_query_state_internal(struct radv_device * device)615 radv_device_init_meta_query_state_internal(struct radv_device *device)
616 {
617    VkResult result;
618    nir_shader *occlusion_cs = NULL;
619    nir_shader *pipeline_statistics_cs = NULL;
620    nir_shader *tfb_cs = NULL;
621    nir_shader *timestamp_cs = NULL;
622 
623    mtx_lock(&device->meta_state.mtx);
624    if (device->meta_state.query.pipeline_statistics_query_pipeline) {
625       mtx_unlock(&device->meta_state.mtx);
626       return VK_SUCCESS;
627    }
628    occlusion_cs = build_occlusion_query_shader(device);
629    pipeline_statistics_cs = build_pipeline_statistics_query_shader(device);
630    tfb_cs = build_tfb_query_shader(device);
631    timestamp_cs = build_timestamp_query_shader(device);
632 
633    VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
634       .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
635       .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
636       .bindingCount = 2,
637       .pBindings = (VkDescriptorSetLayoutBinding[]){
638          {.binding = 0,
639           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
640           .descriptorCount = 1,
641           .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
642           .pImmutableSamplers = NULL},
643          {.binding = 1,
644           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
645           .descriptorCount = 1,
646           .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
647           .pImmutableSamplers = NULL},
648       }};
649 
650    result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &occlusion_ds_create_info,
651                                            &device->meta_state.alloc,
652                                            &device->meta_state.query.ds_layout);
653    if (result != VK_SUCCESS)
654       goto fail;
655 
656    VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
657       .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
658       .setLayoutCount = 1,
659       .pSetLayouts = &device->meta_state.query.ds_layout,
660       .pushConstantRangeCount = 1,
661       .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
662    };
663 
664    result =
665       radv_CreatePipelineLayout(radv_device_to_handle(device), &occlusion_pl_create_info,
666                                 &device->meta_state.alloc, &device->meta_state.query.p_layout);
667    if (result != VK_SUCCESS)
668       goto fail;
669 
670    VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
671       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
672       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
673       .module = vk_shader_module_handle_from_nir(occlusion_cs),
674       .pName = "main",
675       .pSpecializationInfo = NULL,
676    };
677 
678    VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
679       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
680       .stage = occlusion_pipeline_shader_stage,
681       .flags = 0,
682       .layout = device->meta_state.query.p_layout,
683    };
684 
685    result = radv_CreateComputePipelines(
686       radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
687       &occlusion_vk_pipeline_info, NULL, &device->meta_state.query.occlusion_query_pipeline);
688    if (result != VK_SUCCESS)
689       goto fail;
690 
691    VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
692       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
693       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
694       .module = vk_shader_module_handle_from_nir(pipeline_statistics_cs),
695       .pName = "main",
696       .pSpecializationInfo = NULL,
697    };
698 
699    VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
700       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
701       .stage = pipeline_statistics_pipeline_shader_stage,
702       .flags = 0,
703       .layout = device->meta_state.query.p_layout,
704    };
705 
706    result = radv_CreateComputePipelines(
707       radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
708       &pipeline_statistics_vk_pipeline_info, NULL,
709       &device->meta_state.query.pipeline_statistics_query_pipeline);
710    if (result != VK_SUCCESS)
711       goto fail;
712 
713    VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage = {
714       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
715       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
716       .module = vk_shader_module_handle_from_nir(tfb_cs),
717       .pName = "main",
718       .pSpecializationInfo = NULL,
719    };
720 
721    VkComputePipelineCreateInfo tfb_pipeline_info = {
722       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
723       .stage = tfb_pipeline_shader_stage,
724       .flags = 0,
725       .layout = device->meta_state.query.p_layout,
726    };
727 
728    result = radv_CreateComputePipelines(
729       radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
730       &tfb_pipeline_info, NULL, &device->meta_state.query.tfb_query_pipeline);
731    if (result != VK_SUCCESS)
732       goto fail;
733 
734    VkPipelineShaderStageCreateInfo timestamp_pipeline_shader_stage = {
735       .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
736       .stage = VK_SHADER_STAGE_COMPUTE_BIT,
737       .module = vk_shader_module_handle_from_nir(timestamp_cs),
738       .pName = "main",
739       .pSpecializationInfo = NULL,
740    };
741 
742    VkComputePipelineCreateInfo timestamp_pipeline_info = {
743       .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
744       .stage = timestamp_pipeline_shader_stage,
745       .flags = 0,
746       .layout = device->meta_state.query.p_layout,
747    };
748 
749    result = radv_CreateComputePipelines(
750       radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
751       &timestamp_pipeline_info, NULL, &device->meta_state.query.timestamp_query_pipeline);
752 
753 fail:
754    if (result != VK_SUCCESS)
755       radv_device_finish_meta_query_state(device);
756    ralloc_free(occlusion_cs);
757    ralloc_free(pipeline_statistics_cs);
758    ralloc_free(tfb_cs);
759    ralloc_free(timestamp_cs);
760    mtx_unlock(&device->meta_state.mtx);
761    return result;
762 }
763 
764 VkResult
radv_device_init_meta_query_state(struct radv_device * device,bool on_demand)765 radv_device_init_meta_query_state(struct radv_device *device, bool on_demand)
766 {
767    if (on_demand)
768       return VK_SUCCESS;
769 
770    return radv_device_init_meta_query_state_internal(device);
771 }
772 
773 void
radv_device_finish_meta_query_state(struct radv_device * device)774 radv_device_finish_meta_query_state(struct radv_device *device)
775 {
776    if (device->meta_state.query.tfb_query_pipeline)
777       radv_DestroyPipeline(radv_device_to_handle(device),
778                            device->meta_state.query.tfb_query_pipeline, &device->meta_state.alloc);
779 
780    if (device->meta_state.query.pipeline_statistics_query_pipeline)
781       radv_DestroyPipeline(radv_device_to_handle(device),
782                            device->meta_state.query.pipeline_statistics_query_pipeline,
783                            &device->meta_state.alloc);
784 
785    if (device->meta_state.query.occlusion_query_pipeline)
786       radv_DestroyPipeline(radv_device_to_handle(device),
787                            device->meta_state.query.occlusion_query_pipeline,
788                            &device->meta_state.alloc);
789 
790    if (device->meta_state.query.timestamp_query_pipeline)
791       radv_DestroyPipeline(radv_device_to_handle(device),
792                            device->meta_state.query.timestamp_query_pipeline,
793                            &device->meta_state.alloc);
794 
795    if (device->meta_state.query.p_layout)
796       radv_DestroyPipelineLayout(radv_device_to_handle(device), device->meta_state.query.p_layout,
797                                  &device->meta_state.alloc);
798 
799    if (device->meta_state.query.ds_layout)
800       radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
801                                       device->meta_state.query.ds_layout,
802                                       &device->meta_state.alloc);
803 }
804 
805 static void
radv_query_shader(struct radv_cmd_buffer * cmd_buffer,VkPipeline * pipeline,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo,uint64_t src_offset,uint64_t dst_offset,uint32_t src_stride,uint32_t dst_stride,size_t dst_size,uint32_t count,uint32_t flags,uint32_t pipeline_stats_mask,uint32_t avail_offset)806 radv_query_shader(struct radv_cmd_buffer *cmd_buffer, VkPipeline *pipeline,
807                   struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo,
808                   uint64_t src_offset, uint64_t dst_offset, uint32_t src_stride,
809                   uint32_t dst_stride, size_t dst_size, uint32_t count, uint32_t flags,
810                   uint32_t pipeline_stats_mask, uint32_t avail_offset)
811 {
812    struct radv_device *device = cmd_buffer->device;
813    struct radv_meta_saved_state saved_state;
814    struct radv_buffer src_buffer, dst_buffer;
815    bool old_predicating;
816 
817    if (!*pipeline) {
818       VkResult ret = radv_device_init_meta_query_state_internal(device);
819       if (ret != VK_SUCCESS) {
820          cmd_buffer->record_result = ret;
821          return;
822       }
823    }
824 
825    radv_meta_save(
826       &saved_state, cmd_buffer,
827       RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
828 
829    /* VK_EXT_conditional_rendering says that copy commands should not be
830     * affected by conditional rendering.
831     */
832    old_predicating = cmd_buffer->state.predicating;
833    cmd_buffer->state.predicating = false;
834 
835    uint64_t src_buffer_size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset);
836    uint64_t dst_buffer_size = dst_stride * (count - 1) + dst_size;
837 
838    radv_buffer_init(&src_buffer, device, src_bo, src_buffer_size, src_offset);
839    radv_buffer_init(&dst_buffer, device, dst_bo, dst_buffer_size, dst_offset);
840 
841    radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
842                         *pipeline);
843 
844    radv_meta_push_descriptor_set(
845       cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.query.p_layout, 0, /* set */
846       2, /* descriptorWriteCount */
847       (VkWriteDescriptorSet[]){
848          {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
849           .dstBinding = 0,
850           .dstArrayElement = 0,
851           .descriptorCount = 1,
852           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
853           .pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&dst_buffer),
854                                                    .offset = 0,
855                                                    .range = VK_WHOLE_SIZE}},
856          {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
857           .dstBinding = 1,
858           .dstArrayElement = 0,
859           .descriptorCount = 1,
860           .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
861           .pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&src_buffer),
862                                                    .offset = 0,
863                                                    .range = VK_WHOLE_SIZE}}});
864 
865    /* Encode the number of elements for easy access by the shader. */
866    pipeline_stats_mask &= 0x7ff;
867    pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
868 
869    avail_offset -= src_offset;
870 
871    struct {
872       uint32_t flags;
873       uint32_t dst_stride;
874       uint32_t pipeline_stats_mask;
875       uint32_t avail_offset;
876    } push_constants = {flags, dst_stride, pipeline_stats_mask, avail_offset};
877 
878    radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), device->meta_state.query.p_layout,
879                          VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants), &push_constants);
880 
881    cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_INV_VCACHE;
882 
883    if (flags & VK_QUERY_RESULT_WAIT_BIT)
884       cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
885 
886    radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
887 
888    /* Restore conditional rendering. */
889    cmd_buffer->state.predicating = old_predicating;
890 
891    radv_buffer_finish(&src_buffer);
892    radv_buffer_finish(&dst_buffer);
893 
894    radv_meta_restore(&saved_state, cmd_buffer);
895 }
896 
897 static bool
radv_query_pool_needs_gds(struct radv_device * device,struct radv_query_pool * pool)898 radv_query_pool_needs_gds(struct radv_device *device, struct radv_query_pool *pool)
899 {
900    /* The number of primitives generated by geometry shader invocations is
901     * only counted by the hardware if GS uses the legacy path. When NGG GS
902     * is used, the hardware can't know the number of generated primitives
903     * and we have to it manually inside the shader. To achieve that, the
904     * driver does a plain GDS atomic to accumulate that value.
905     * TODO: fix use of NGG GS and non-NGG GS inside the same begin/end
906     * query.
907     */
908    return device->physical_device->use_ngg &&
909           (pool->pipeline_stats_mask & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT);
910 }
911 
912 static void
radv_destroy_query_pool(struct radv_device * device,const VkAllocationCallbacks * pAllocator,struct radv_query_pool * pool)913 radv_destroy_query_pool(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
914                         struct radv_query_pool *pool)
915 {
916    if (pool->bo)
917       device->ws->buffer_destroy(device->ws, pool->bo);
918    vk_object_base_finish(&pool->base);
919    vk_free2(&device->vk.alloc, pAllocator, pool);
920 }
921 
922 VkResult
radv_CreateQueryPool(VkDevice _device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)923 radv_CreateQueryPool(VkDevice _device, const VkQueryPoolCreateInfo *pCreateInfo,
924                      const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool)
925 {
926    RADV_FROM_HANDLE(radv_device, device, _device);
927    struct radv_query_pool *pool =
928       vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
929 
930    if (!pool)
931       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
932 
933    vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_QUERY_POOL);
934 
935    switch (pCreateInfo->queryType) {
936    case VK_QUERY_TYPE_OCCLUSION:
937       pool->stride = 16 * device->physical_device->rad_info.max_render_backends;
938       break;
939    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
940       pool->stride = pipelinestat_block_size * 2;
941       break;
942    case VK_QUERY_TYPE_TIMESTAMP:
943    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
944    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
945       pool->stride = 8;
946       break;
947    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
948       pool->stride = 32;
949       break;
950    default:
951       unreachable("creating unhandled query type");
952    }
953 
954    pool->type = pCreateInfo->queryType;
955    pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
956    pool->availability_offset = pool->stride * pCreateInfo->queryCount;
957    pool->size = pool->availability_offset;
958    if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
959       pool->size += 4 * pCreateInfo->queryCount;
960 
961    VkResult result = device->ws->buffer_create(device->ws, pool->size, 64, RADEON_DOMAIN_GTT,
962                                                RADEON_FLAG_NO_INTERPROCESS_SHARING,
963                                                RADV_BO_PRIORITY_QUERY_POOL, 0, &pool->bo);
964    if (result != VK_SUCCESS) {
965       radv_destroy_query_pool(device, pAllocator, pool);
966       return vk_error(device, result);
967    }
968 
969    pool->ptr = device->ws->buffer_map(pool->bo);
970    if (!pool->ptr) {
971       radv_destroy_query_pool(device, pAllocator, pool);
972       return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
973    }
974 
975    *pQueryPool = radv_query_pool_to_handle(pool);
976    return VK_SUCCESS;
977 }
978 
979 void
radv_DestroyQueryPool(VkDevice _device,VkQueryPool _pool,const VkAllocationCallbacks * pAllocator)980 radv_DestroyQueryPool(VkDevice _device, VkQueryPool _pool, const VkAllocationCallbacks *pAllocator)
981 {
982    RADV_FROM_HANDLE(radv_device, device, _device);
983    RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
984 
985    if (!pool)
986       return;
987 
988    radv_destroy_query_pool(device, pAllocator, pool);
989 }
990 
991 VkResult
radv_GetQueryPoolResults(VkDevice _device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)992 radv_GetQueryPoolResults(VkDevice _device, VkQueryPool queryPool, uint32_t firstQuery,
993                          uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
994                          VkQueryResultFlags flags)
995 {
996    RADV_FROM_HANDLE(radv_device, device, _device);
997    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
998    char *data = pData;
999    VkResult result = VK_SUCCESS;
1000 
1001    if (radv_device_is_lost(device))
1002       return VK_ERROR_DEVICE_LOST;
1003 
1004    for (unsigned query_idx = 0; query_idx < queryCount; ++query_idx, data += stride) {
1005       char *dest = data;
1006       unsigned query = firstQuery + query_idx;
1007       char *src = pool->ptr + query * pool->stride;
1008       uint32_t available;
1009 
1010       switch (pool->type) {
1011       case VK_QUERY_TYPE_TIMESTAMP:
1012       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1013       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: {
1014          uint64_t const *src64 = (uint64_t const *)src;
1015          uint64_t value;
1016 
1017          do {
1018             value = p_atomic_read(src64);
1019          } while (value == TIMESTAMP_NOT_READY && (flags & VK_QUERY_RESULT_WAIT_BIT));
1020 
1021          available = value != TIMESTAMP_NOT_READY;
1022 
1023          if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1024             result = VK_NOT_READY;
1025 
1026          if (flags & VK_QUERY_RESULT_64_BIT) {
1027             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1028                *(uint64_t *)dest = value;
1029             dest += 8;
1030          } else {
1031             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1032                *(uint32_t *)dest = (uint32_t)value;
1033             dest += 4;
1034          }
1035          break;
1036       }
1037       case VK_QUERY_TYPE_OCCLUSION: {
1038          uint64_t const *src64 = (uint64_t const *)src;
1039          uint32_t db_count = device->physical_device->rad_info.max_render_backends;
1040          uint32_t enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
1041          uint64_t sample_count = 0;
1042          available = 1;
1043 
1044          for (int i = 0; i < db_count; ++i) {
1045             uint64_t start, end;
1046 
1047             if (!(enabled_rb_mask & (1 << i)))
1048                continue;
1049 
1050             do {
1051                start = p_atomic_read(src64 + 2 * i);
1052                end = p_atomic_read(src64 + 2 * i + 1);
1053             } while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) &&
1054                      (flags & VK_QUERY_RESULT_WAIT_BIT));
1055 
1056             if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
1057                available = 0;
1058             else {
1059                sample_count += end - start;
1060             }
1061          }
1062 
1063          if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1064             result = VK_NOT_READY;
1065 
1066          if (flags & VK_QUERY_RESULT_64_BIT) {
1067             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1068                *(uint64_t *)dest = sample_count;
1069             dest += 8;
1070          } else {
1071             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1072                *(uint32_t *)dest = sample_count;
1073             dest += 4;
1074          }
1075          break;
1076       }
1077       case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1078          const uint32_t *avail_ptr =
1079             (const uint32_t *)(pool->ptr + pool->availability_offset + 4 * query);
1080 
1081          do {
1082             available = p_atomic_read(avail_ptr);
1083          } while (!available && (flags & VK_QUERY_RESULT_WAIT_BIT));
1084 
1085          if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1086             result = VK_NOT_READY;
1087 
1088          const uint64_t *start = (uint64_t *)src;
1089          const uint64_t *stop = (uint64_t *)(src + pipelinestat_block_size);
1090          if (flags & VK_QUERY_RESULT_64_BIT) {
1091             uint64_t *dst = (uint64_t *)dest;
1092             dest += util_bitcount(pool->pipeline_stats_mask) * 8;
1093             for (int i = 0; i < ARRAY_SIZE(pipeline_statistics_indices); ++i) {
1094                if (pool->pipeline_stats_mask & (1u << i)) {
1095                   if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1096                      *dst = stop[pipeline_statistics_indices[i]] -
1097                             start[pipeline_statistics_indices[i]];
1098                   dst++;
1099                }
1100             }
1101 
1102          } else {
1103             uint32_t *dst = (uint32_t *)dest;
1104             dest += util_bitcount(pool->pipeline_stats_mask) * 4;
1105             for (int i = 0; i < ARRAY_SIZE(pipeline_statistics_indices); ++i) {
1106                if (pool->pipeline_stats_mask & (1u << i)) {
1107                   if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1108                      *dst = stop[pipeline_statistics_indices[i]] -
1109                             start[pipeline_statistics_indices[i]];
1110                   dst++;
1111                }
1112             }
1113          }
1114          break;
1115       }
1116       case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
1117          uint64_t const *src64 = (uint64_t const *)src;
1118          uint64_t num_primitives_written;
1119          uint64_t primitive_storage_needed;
1120 
1121          /* SAMPLE_STREAMOUTSTATS stores this structure:
1122           * {
1123           *	u64 NumPrimitivesWritten;
1124           *	u64 PrimitiveStorageNeeded;
1125           * }
1126           */
1127          available = 1;
1128          for (int j = 0; j < 4; j++) {
1129             if (!(p_atomic_read(src64 + j) & 0x8000000000000000UL))
1130                available = 0;
1131          }
1132 
1133          if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1134             result = VK_NOT_READY;
1135 
1136          num_primitives_written = src64[3] - src64[1];
1137          primitive_storage_needed = src64[2] - src64[0];
1138 
1139          if (flags & VK_QUERY_RESULT_64_BIT) {
1140             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1141                *(uint64_t *)dest = num_primitives_written;
1142             dest += 8;
1143             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1144                *(uint64_t *)dest = primitive_storage_needed;
1145             dest += 8;
1146          } else {
1147             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1148                *(uint32_t *)dest = num_primitives_written;
1149             dest += 4;
1150             if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1151                *(uint32_t *)dest = primitive_storage_needed;
1152             dest += 4;
1153          }
1154          break;
1155       }
1156       default:
1157          unreachable("trying to get results of unhandled query type");
1158       }
1159 
1160       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1161          if (flags & VK_QUERY_RESULT_64_BIT) {
1162             *(uint64_t *)dest = available;
1163          } else {
1164             *(uint32_t *)dest = available;
1165          }
1166       }
1167    }
1168 
1169    return result;
1170 }
1171 
1172 static void
emit_query_flush(struct radv_cmd_buffer * cmd_buffer,struct radv_query_pool * pool)1173 emit_query_flush(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *pool)
1174 {
1175    if (cmd_buffer->pending_reset_query) {
1176       if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
1177          /* Only need to flush caches if the query pool size is
1178           * large enough to be resetted using the compute shader
1179           * path. Small pools don't need any cache flushes
1180           * because we use a CP dma clear.
1181           */
1182          si_emit_cache_flush(cmd_buffer);
1183       }
1184    }
1185 }
1186 
1187 static size_t
radv_query_result_size(const struct radv_query_pool * pool,VkQueryResultFlags flags)1188 radv_query_result_size(const struct radv_query_pool *pool, VkQueryResultFlags flags)
1189 {
1190    unsigned values = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0;
1191    switch (pool->type) {
1192    case VK_QUERY_TYPE_TIMESTAMP:
1193    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1194    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1195    case VK_QUERY_TYPE_OCCLUSION:
1196       values += 1;
1197       break;
1198    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1199       values += util_bitcount(pool->pipeline_stats_mask);
1200       break;
1201    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1202       values += 2;
1203       break;
1204    default:
1205       unreachable("trying to get size of unhandled query type");
1206    }
1207    return values * ((flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4);
1208 }
1209 
1210 void
radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)1211 radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
1212                              uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
1213                              VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
1214 {
1215    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1216    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1217    RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
1218    struct radeon_cmdbuf *cs = cmd_buffer->cs;
1219    uint64_t va = radv_buffer_get_va(pool->bo);
1220    uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
1221    size_t dst_size = radv_query_result_size(pool, flags);
1222    dest_va += dst_buffer->offset + dstOffset;
1223 
1224    if (!queryCount)
1225       return;
1226 
1227    radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
1228    radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
1229 
1230    /* From the Vulkan spec 1.1.108:
1231     *
1232     * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1233     *  previous uses of vkCmdResetQueryPool in the same queue, without any
1234     *  additional synchronization."
1235     *
1236     * So, we have to flush the caches if the compute shader path was used.
1237     */
1238    emit_query_flush(cmd_buffer, pool);
1239 
1240    switch (pool->type) {
1241    case VK_QUERY_TYPE_OCCLUSION:
1242       if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1243          unsigned enabled_rb_mask = cmd_buffer->device->physical_device->rad_info.enabled_rb_mask;
1244          uint32_t rb_avail_offset = 16 * util_last_bit(enabled_rb_mask) - 4;
1245          for (unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1246             unsigned query = firstQuery + i;
1247             uint64_t src_va = va + query * pool->stride + rb_avail_offset;
1248 
1249             radeon_check_space(cmd_buffer->device->ws, cs, 7);
1250 
1251             /* Waits on the upper word of the last DB entry */
1252             radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL, src_va, 0x80000000, 0xffffffff);
1253          }
1254       }
1255       radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
1256                         pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1257                         dst_buffer->offset + dstOffset, pool->stride, stride, dst_size, queryCount,
1258                         flags, 0, 0);
1259       break;
1260    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1261       if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1262          for (unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1263             unsigned query = firstQuery + i;
1264 
1265             radeon_check_space(cmd_buffer->device->ws, cs, 7);
1266 
1267             uint64_t avail_va = va + pool->availability_offset + 4 * query;
1268 
1269             /* This waits on the ME. All copies below are done on the ME */
1270             radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, avail_va, 1, 0xffffffff);
1271          }
1272       }
1273       radv_query_shader(
1274          cmd_buffer, &cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
1275          pool->bo, dst_buffer->bo, firstQuery * pool->stride, dst_buffer->offset + dstOffset,
1276          pool->stride, stride, dst_size, queryCount, flags, pool->pipeline_stats_mask,
1277          pool->availability_offset + 4 * firstQuery);
1278       break;
1279    case VK_QUERY_TYPE_TIMESTAMP:
1280    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1281    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1282       if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1283          for (unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1284             unsigned query = firstQuery + i;
1285             uint64_t local_src_va = va + query * pool->stride;
1286 
1287             radeon_check_space(cmd_buffer->device->ws, cs, 7);
1288 
1289             /* Wait on the high 32 bits of the timestamp in
1290              * case the low part is 0xffffffff.
1291              */
1292             radv_cp_wait_mem(cs, WAIT_REG_MEM_NOT_EQUAL, local_src_va + 4,
1293                              TIMESTAMP_NOT_READY >> 32, 0xffffffff);
1294          }
1295       }
1296 
1297       radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.timestamp_query_pipeline,
1298                         pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1299                         dst_buffer->offset + dstOffset, pool->stride, stride, dst_size, queryCount,
1300                         flags, 0, 0);
1301       break;
1302    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1303       if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1304          for (unsigned i = 0; i < queryCount; i++) {
1305             unsigned query = firstQuery + i;
1306             uint64_t src_va = va + query * pool->stride;
1307 
1308             radeon_check_space(cmd_buffer->device->ws, cs, 7 * 4);
1309 
1310             /* Wait on the upper word of all results. */
1311             for (unsigned j = 0; j < 4; j++, src_va += 8) {
1312                radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL, src_va + 4, 0x80000000,
1313                                 0xffffffff);
1314             }
1315          }
1316       }
1317 
1318       radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.tfb_query_pipeline,
1319                         pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1320                         dst_buffer->offset + dstOffset, pool->stride, stride, dst_size, queryCount,
1321                         flags, 0, 0);
1322       break;
1323    default:
1324       unreachable("trying to get results of unhandled query type");
1325    }
1326 }
1327 
1328 static uint32_t
query_clear_value(VkQueryType type)1329 query_clear_value(VkQueryType type)
1330 {
1331    switch (type) {
1332    case VK_QUERY_TYPE_TIMESTAMP:
1333    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1334    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1335       return (uint32_t)TIMESTAMP_NOT_READY;
1336    default:
1337       return 0;
1338    }
1339 }
1340 
1341 void
radv_CmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)1342 radv_CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
1343                        uint32_t queryCount)
1344 {
1345    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1346    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1347    uint32_t value = query_clear_value(pool->type);
1348    uint32_t flush_bits = 0;
1349 
1350    /* Make sure to sync all previous work if the given command buffer has
1351     * pending active queries. Otherwise the GPU might write queries data
1352     * after the reset operation.
1353     */
1354    cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
1355 
1356    flush_bits |= radv_fill_buffer(cmd_buffer, NULL, pool->bo, firstQuery * pool->stride,
1357                                   queryCount * pool->stride, value);
1358 
1359    if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1360       flush_bits |= radv_fill_buffer(cmd_buffer, NULL, pool->bo,
1361                                      pool->availability_offset + firstQuery * 4, queryCount * 4, 0);
1362    }
1363 
1364    if (flush_bits) {
1365       /* Only need to flush caches for the compute shader path. */
1366       cmd_buffer->pending_reset_query = true;
1367       cmd_buffer->state.flush_bits |= flush_bits;
1368    }
1369 }
1370 
1371 void
radv_ResetQueryPool(VkDevice _device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)1372 radv_ResetQueryPool(VkDevice _device, VkQueryPool queryPool, uint32_t firstQuery,
1373                     uint32_t queryCount)
1374 {
1375    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1376 
1377    uint32_t value = query_clear_value(pool->type);
1378    uint32_t *data = (uint32_t *)(pool->ptr + firstQuery * pool->stride);
1379    uint32_t *data_end = (uint32_t *)(pool->ptr + (firstQuery + queryCount) * pool->stride);
1380 
1381    for (uint32_t *p = data; p != data_end; ++p)
1382       *p = value;
1383 
1384    if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1385       memset(pool->ptr + pool->availability_offset + firstQuery * 4, 0, queryCount * 4);
1386    }
1387 }
1388 
1389 static unsigned
event_type_for_stream(unsigned stream)1390 event_type_for_stream(unsigned stream)
1391 {
1392    switch (stream) {
1393    default:
1394    case 0:
1395       return V_028A90_SAMPLE_STREAMOUTSTATS;
1396    case 1:
1397       return V_028A90_SAMPLE_STREAMOUTSTATS1;
1398    case 2:
1399       return V_028A90_SAMPLE_STREAMOUTSTATS2;
1400    case 3:
1401       return V_028A90_SAMPLE_STREAMOUTSTATS3;
1402    }
1403 }
1404 
1405 static void
emit_begin_query(struct radv_cmd_buffer * cmd_buffer,struct radv_query_pool * pool,uint64_t va,VkQueryType query_type,VkQueryControlFlags flags,uint32_t index)1406 emit_begin_query(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *pool, uint64_t va,
1407                  VkQueryType query_type, VkQueryControlFlags flags, uint32_t index)
1408 {
1409    struct radeon_cmdbuf *cs = cmd_buffer->cs;
1410    switch (query_type) {
1411    case VK_QUERY_TYPE_OCCLUSION:
1412       radeon_check_space(cmd_buffer->device->ws, cs, 7);
1413 
1414       ++cmd_buffer->state.active_occlusion_queries;
1415       if (cmd_buffer->state.active_occlusion_queries == 1) {
1416          if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
1417             /* This is the first occlusion query, enable
1418              * the hint if the precision bit is set.
1419              */
1420             cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1421          }
1422 
1423          radv_set_db_count_control(cmd_buffer);
1424       } else {
1425          if ((flags & VK_QUERY_CONTROL_PRECISE_BIT) &&
1426              !cmd_buffer->state.perfect_occlusion_queries_enabled) {
1427             /* This is not the first query, but this one
1428              * needs to enable precision, DB_COUNT_CONTROL
1429              * has to be updated accordingly.
1430              */
1431             cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1432 
1433             radv_set_db_count_control(cmd_buffer);
1434          }
1435       }
1436 
1437       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1438       radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1439       radeon_emit(cs, va);
1440       radeon_emit(cs, va >> 32);
1441       break;
1442    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1443       radeon_check_space(cmd_buffer->device->ws, cs, 4);
1444 
1445       ++cmd_buffer->state.active_pipeline_queries;
1446       if (cmd_buffer->state.active_pipeline_queries == 1) {
1447          cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1448          cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_START_PIPELINE_STATS;
1449       }
1450 
1451       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1452       radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1453       radeon_emit(cs, va);
1454       radeon_emit(cs, va >> 32);
1455 
1456       if (radv_query_pool_needs_gds(cmd_buffer->device, pool)) {
1457          int idx = radv_get_pipeline_statistics_index(
1458             VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT);
1459 
1460          /* Make sure GDS is idle before copying the value. */
1461          cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2;
1462          si_emit_cache_flush(cmd_buffer);
1463 
1464          va += 8 * idx;
1465 
1466          radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1467          radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_GDS) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
1468                             COPY_DATA_WR_CONFIRM);
1469          radeon_emit(cs, 0);
1470          radeon_emit(cs, 0);
1471          radeon_emit(cs, va);
1472          radeon_emit(cs, va >> 32);
1473 
1474          /* Record that the command buffer needs GDS. */
1475          cmd_buffer->gds_needed = true;
1476 
1477          cmd_buffer->state.active_pipeline_gds_queries++;
1478       }
1479       break;
1480    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1481       radeon_check_space(cmd_buffer->device->ws, cs, 4);
1482 
1483       assert(index < MAX_SO_STREAMS);
1484 
1485       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1486       radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1487       radeon_emit(cs, va);
1488       radeon_emit(cs, va >> 32);
1489       break;
1490    default:
1491       unreachable("beginning unhandled query type");
1492    }
1493 }
1494 
1495 static void
emit_end_query(struct radv_cmd_buffer * cmd_buffer,struct radv_query_pool * pool,uint64_t va,uint64_t avail_va,VkQueryType query_type,uint32_t index)1496 emit_end_query(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *pool, uint64_t va,
1497                uint64_t avail_va, VkQueryType query_type, uint32_t index)
1498 {
1499    struct radeon_cmdbuf *cs = cmd_buffer->cs;
1500    switch (query_type) {
1501    case VK_QUERY_TYPE_OCCLUSION:
1502       radeon_check_space(cmd_buffer->device->ws, cs, 14);
1503 
1504       cmd_buffer->state.active_occlusion_queries--;
1505       if (cmd_buffer->state.active_occlusion_queries == 0) {
1506          radv_set_db_count_control(cmd_buffer);
1507 
1508          /* Reset the perfect occlusion queries hint now that no
1509           * queries are active.
1510           */
1511          cmd_buffer->state.perfect_occlusion_queries_enabled = false;
1512       }
1513 
1514       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1515       radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1516       radeon_emit(cs, va + 8);
1517       radeon_emit(cs, (va + 8) >> 32);
1518 
1519       break;
1520    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1521       radeon_check_space(cmd_buffer->device->ws, cs, 16);
1522 
1523       cmd_buffer->state.active_pipeline_queries--;
1524       if (cmd_buffer->state.active_pipeline_queries == 0) {
1525          cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_START_PIPELINE_STATS;
1526          cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1527       }
1528       va += pipelinestat_block_size;
1529 
1530       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1531       radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1532       radeon_emit(cs, va);
1533       radeon_emit(cs, va >> 32);
1534 
1535       si_cs_emit_write_event_eop(cs, cmd_buffer->device->physical_device->rad_info.chip_class,
1536                                  radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS,
1537                                  0, EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, avail_va, 1,
1538                                  cmd_buffer->gfx9_eop_bug_va);
1539 
1540       if (radv_query_pool_needs_gds(cmd_buffer->device, pool)) {
1541          int idx = radv_get_pipeline_statistics_index(
1542             VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT);
1543 
1544          /* Make sure GDS is idle before copying the value. */
1545          cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2;
1546          si_emit_cache_flush(cmd_buffer);
1547 
1548          va += 8 * idx;
1549 
1550          radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1551          radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_GDS) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
1552                             COPY_DATA_WR_CONFIRM);
1553          radeon_emit(cs, 0);
1554          radeon_emit(cs, 0);
1555          radeon_emit(cs, va);
1556          radeon_emit(cs, va >> 32);
1557 
1558          cmd_buffer->state.active_pipeline_gds_queries--;
1559       }
1560       break;
1561    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1562       radeon_check_space(cmd_buffer->device->ws, cs, 4);
1563 
1564       assert(index < MAX_SO_STREAMS);
1565 
1566       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1567       radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1568       radeon_emit(cs, (va + 16));
1569       radeon_emit(cs, (va + 16) >> 32);
1570       break;
1571    default:
1572       unreachable("ending unhandled query type");
1573    }
1574 
1575    cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1576                                           RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2 |
1577                                           RADV_CMD_FLAG_INV_VCACHE;
1578    if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1579       cmd_buffer->active_query_flush_bits |=
1580          RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB;
1581    }
1582 }
1583 
1584 void
radv_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags,uint32_t index)1585 radv_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
1586                              VkQueryControlFlags flags, uint32_t index)
1587 {
1588    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1589    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1590    struct radeon_cmdbuf *cs = cmd_buffer->cs;
1591    uint64_t va = radv_buffer_get_va(pool->bo);
1592 
1593    radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1594 
1595    emit_query_flush(cmd_buffer, pool);
1596 
1597    va += pool->stride * query;
1598 
1599    emit_begin_query(cmd_buffer, pool, va, pool->type, flags, index);
1600 }
1601 
1602 void
radv_CmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags)1603 radv_CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
1604                    VkQueryControlFlags flags)
1605 {
1606    radv_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
1607 }
1608 
1609 void
radv_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,uint32_t index)1610 radv_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
1611                            uint32_t index)
1612 {
1613    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1614    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1615    uint64_t va = radv_buffer_get_va(pool->bo);
1616    uint64_t avail_va = va + pool->availability_offset + 4 * query;
1617    va += pool->stride * query;
1618 
1619    /* Do not need to add the pool BO to the list because the query must
1620     * currently be active, which means the BO is already in the list.
1621     */
1622    emit_end_query(cmd_buffer, pool, va, avail_va, pool->type, index);
1623 
1624    /*
1625     * For multiview we have to emit a query for each bit in the mask,
1626     * however the first query we emit will get the totals for all the
1627     * operations, so we don't want to get a real value in the other
1628     * queries. This emits a fake begin/end sequence so the waiting
1629     * code gets a completed query value and doesn't hang, but the
1630     * query returns 0.
1631     */
1632    if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
1633       for (unsigned i = 1; i < util_bitcount(cmd_buffer->state.subpass->view_mask); i++) {
1634          va += pool->stride;
1635          avail_va += 4;
1636          emit_begin_query(cmd_buffer, pool, va, pool->type, 0, 0);
1637          emit_end_query(cmd_buffer, pool, va, avail_va, pool->type, 0);
1638       }
1639    }
1640 }
1641 
1642 void
radv_CmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query)1643 radv_CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query)
1644 {
1645    radv_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
1646 }
1647 
1648 void
radv_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)1649 radv_CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
1650                        VkQueryPool queryPool, uint32_t query)
1651 {
1652    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1653    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1654    bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
1655    struct radeon_cmdbuf *cs = cmd_buffer->cs;
1656    uint64_t va = radv_buffer_get_va(pool->bo);
1657    uint64_t query_va = va + pool->stride * query;
1658 
1659    radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1660 
1661    emit_query_flush(cmd_buffer, pool);
1662 
1663    int num_queries = 1;
1664    if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
1665       num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
1666 
1667    ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
1668 
1669    for (unsigned i = 0; i < num_queries; i++) {
1670       switch (pipelineStage) {
1671       case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1672          radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1673          radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
1674                             COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) | COPY_DATA_DST_SEL(V_370_MEM));
1675          radeon_emit(cs, 0);
1676          radeon_emit(cs, 0);
1677          radeon_emit(cs, query_va);
1678          radeon_emit(cs, query_va >> 32);
1679          break;
1680       default:
1681          si_cs_emit_write_event_eop(cs, cmd_buffer->device->physical_device->rad_info.chip_class,
1682                                     mec, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DST_SEL_MEM,
1683                                     EOP_DATA_SEL_TIMESTAMP, query_va, 0,
1684                                     cmd_buffer->gfx9_eop_bug_va);
1685          break;
1686       }
1687       query_va += pool->stride;
1688    }
1689 
1690    cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1691                                           RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2 |
1692                                           RADV_CMD_FLAG_INV_VCACHE;
1693    if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1694       cmd_buffer->active_query_flush_bits |=
1695          RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB;
1696    }
1697 
1698    assert(cmd_buffer->cs->cdw <= cdw_max);
1699 }
1700 
1701 void
radv_CmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer,uint32_t accelerationStructureCount,const VkAccelerationStructureKHR * pAccelerationStructures,VkQueryType queryType,VkQueryPool queryPool,uint32_t firstQuery)1702 radv_CmdWriteAccelerationStructuresPropertiesKHR(
1703    VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount,
1704    const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType,
1705    VkQueryPool queryPool, uint32_t firstQuery)
1706 {
1707    RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1708    RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1709    struct radeon_cmdbuf *cs = cmd_buffer->cs;
1710    uint64_t pool_va = radv_buffer_get_va(pool->bo);
1711    uint64_t query_va = pool_va + pool->stride * firstQuery;
1712 
1713    radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1714 
1715    emit_query_flush(cmd_buffer, pool);
1716 
1717    ASSERTED unsigned cdw_max =
1718       radeon_check_space(cmd_buffer->device->ws, cs, 6 * accelerationStructureCount);
1719 
1720    for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
1721       RADV_FROM_HANDLE(radv_acceleration_structure, accel_struct, pAccelerationStructures[i]);
1722       uint64_t va = radv_accel_struct_get_va(accel_struct);
1723 
1724       switch (queryType) {
1725       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1726          va += offsetof(struct radv_accel_struct_header, compacted_size);
1727          break;
1728       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1729          va += offsetof(struct radv_accel_struct_header, serialization_size);
1730          break;
1731       default:
1732          unreachable("Unhandle accel struct query type.");
1733       }
1734 
1735       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1736       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
1737                          COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM);
1738       radeon_emit(cs, va);
1739       radeon_emit(cs, va >> 32);
1740       radeon_emit(cs, query_va);
1741       radeon_emit(cs, query_va >> 32);
1742 
1743       query_va += pool->stride;
1744    }
1745 
1746    assert(cmd_buffer->cs->cdw <= cdw_max);
1747 }
1748