1 #include "zink_batch.h"
2
3 #include "zink_context.h"
4 #include "zink_fence.h"
5 #include "zink_framebuffer.h"
6 #include "zink_query.h"
7 #include "zink_program.h"
8 #include "zink_render_pass.h"
9 #include "zink_resource.h"
10 #include "zink_screen.h"
11 #include "zink_surface.h"
12
13 #include "util/hash_table.h"
14 #include "util/u_debug.h"
15 #include "util/set.h"
16
17 #ifdef VK_USE_PLATFORM_METAL_EXT
18 #include "QuartzCore/CAMetalLayer.h"
19 #endif
20 #include "wsi_common.h"
21
22 void
debug_describe_zink_batch_state(char * buf,const struct zink_batch_state * ptr)23 debug_describe_zink_batch_state(char *buf, const struct zink_batch_state *ptr)
24 {
25 sprintf(buf, "zink_batch_state");
26 }
27
28 void
zink_reset_batch_state(struct zink_context * ctx,struct zink_batch_state * bs)29 zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
30 {
31 struct zink_screen *screen = zink_screen(ctx->base.screen);
32
33 if (VKSCR(ResetCommandPool)(screen->dev, bs->cmdpool, 0) != VK_SUCCESS)
34 debug_printf("vkResetCommandPool failed\n");
35
36 /* unref all used resources */
37 set_foreach_remove(bs->resources, entry) {
38 struct zink_resource_object *obj = (struct zink_resource_object *)entry->key;
39 if (!zink_resource_object_usage_unset(obj, bs)) {
40 obj->unordered_barrier = false;
41 obj->access = 0;
42 obj->access_stage = 0;
43 }
44 util_dynarray_append(&bs->unref_resources, struct zink_resource_object*, obj);
45 }
46
47 for (unsigned i = 0; i < 2; i++) {
48 while (util_dynarray_contains(&bs->bindless_releases[i], uint32_t)) {
49 uint32_t handle = util_dynarray_pop(&bs->bindless_releases[i], uint32_t);
50 bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
51 struct util_idalloc *ids = i ? &ctx->di.bindless[is_buffer].img_slots : &ctx->di.bindless[is_buffer].tex_slots;
52 util_idalloc_free(ids, is_buffer ? handle - ZINK_MAX_BINDLESS_HANDLES : handle);
53 }
54 }
55
56 set_foreach_remove(bs->active_queries, entry) {
57 struct zink_query *query = (void*)entry->key;
58 zink_prune_query(screen, bs, query);
59 }
60
61 set_foreach_remove(bs->surfaces, entry) {
62 struct zink_surface *surf = (struct zink_surface *)entry->key;
63 zink_batch_usage_unset(&surf->batch_uses, bs);
64 zink_surface_reference(screen, &surf, NULL);
65 }
66 set_foreach_remove(bs->bufferviews, entry) {
67 struct zink_buffer_view *buffer_view = (struct zink_buffer_view *)entry->key;
68 zink_batch_usage_unset(&buffer_view->batch_uses, bs);
69 zink_buffer_view_reference(screen, &buffer_view, NULL);
70 }
71
72 util_dynarray_foreach(&bs->dead_framebuffers, struct zink_framebuffer*, fb) {
73 zink_framebuffer_reference(screen, fb, NULL);
74 }
75 util_dynarray_clear(&bs->dead_framebuffers);
76 util_dynarray_foreach(&bs->zombie_samplers, VkSampler, samp) {
77 VKSCR(DestroySampler)(screen->dev, *samp, NULL);
78 }
79 util_dynarray_clear(&bs->zombie_samplers);
80 util_dynarray_clear(&bs->persistent_resources);
81
82 screen->batch_descriptor_reset(screen, bs);
83
84 set_foreach_remove(bs->programs, entry) {
85 struct zink_program *pg = (struct zink_program*)entry->key;
86 zink_batch_usage_unset(&pg->batch_uses, bs);
87 if (pg->is_compute) {
88 struct zink_compute_program *comp = (struct zink_compute_program*)pg;
89 zink_compute_program_reference(screen, &comp, NULL);
90 } else {
91 struct zink_gfx_program *prog = (struct zink_gfx_program*)pg;
92 zink_gfx_program_reference(screen, &prog, NULL);
93 }
94 }
95
96 pipe_resource_reference(&bs->flush_res, NULL);
97
98 bs->resource_size = 0;
99
100 /* only reset submitted here so that tc fence desync can pick up the 'completed' flag
101 * before the state is reused
102 */
103 bs->fence.submitted = false;
104 bs->has_barriers = false;
105 bs->scanout_flush = false;
106 if (bs->fence.batch_id)
107 zink_screen_update_last_finished(screen, bs->fence.batch_id);
108 bs->submit_count++;
109 bs->fence.batch_id = 0;
110 bs->usage.usage = 0;
111 bs->next = NULL;
112 }
113
114 static void
unref_resources(struct zink_screen * screen,struct zink_batch_state * bs)115 unref_resources(struct zink_screen *screen, struct zink_batch_state *bs)
116 {
117 while (util_dynarray_contains(&bs->unref_resources, struct zink_resource_object*)) {
118 struct zink_resource_object *obj = util_dynarray_pop(&bs->unref_resources, struct zink_resource_object*);
119 zink_resource_object_reference(screen, &obj, NULL);
120 }
121 }
122
123 void
zink_clear_batch_state(struct zink_context * ctx,struct zink_batch_state * bs)124 zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
125 {
126 bs->fence.completed = true;
127 zink_reset_batch_state(ctx, bs);
128 unref_resources(zink_screen(ctx->base.screen), bs);
129 }
130
131 static void
pop_batch_state(struct zink_context * ctx)132 pop_batch_state(struct zink_context *ctx)
133 {
134 const struct zink_batch_state *bs = ctx->batch_states;
135 ctx->batch_states = bs->next;
136 ctx->batch_states_count--;
137 if (ctx->last_fence == &bs->fence)
138 ctx->last_fence = NULL;
139 }
140
141 void
zink_batch_reset_all(struct zink_context * ctx)142 zink_batch_reset_all(struct zink_context *ctx)
143 {
144 simple_mtx_lock(&ctx->batch_mtx);
145 while (ctx->batch_states) {
146 struct zink_batch_state *bs = ctx->batch_states;
147 bs->fence.completed = true;
148 pop_batch_state(ctx);
149 zink_reset_batch_state(ctx, bs);
150 util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
151 }
152 simple_mtx_unlock(&ctx->batch_mtx);
153 }
154
155 void
zink_batch_state_destroy(struct zink_screen * screen,struct zink_batch_state * bs)156 zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs)
157 {
158 if (!bs)
159 return;
160
161 util_queue_fence_destroy(&bs->flush_completed);
162
163 cnd_destroy(&bs->usage.flush);
164 mtx_destroy(&bs->usage.mtx);
165
166 if (bs->fence.fence)
167 VKSCR(DestroyFence)(screen->dev, bs->fence.fence, NULL);
168
169 if (bs->cmdbuf)
170 VKSCR(FreeCommandBuffers)(screen->dev, bs->cmdpool, 1, &bs->cmdbuf);
171 if (bs->barrier_cmdbuf)
172 VKSCR(FreeCommandBuffers)(screen->dev, bs->cmdpool, 1, &bs->barrier_cmdbuf);
173 if (bs->cmdpool)
174 VKSCR(DestroyCommandPool)(screen->dev, bs->cmdpool, NULL);
175
176 util_dynarray_fini(&bs->zombie_samplers);
177 util_dynarray_fini(&bs->dead_framebuffers);
178 util_dynarray_fini(&bs->unref_resources);
179 util_dynarray_fini(&bs->bindless_releases[0]);
180 util_dynarray_fini(&bs->bindless_releases[1]);
181 _mesa_set_destroy(bs->surfaces, NULL);
182 _mesa_set_destroy(bs->bufferviews, NULL);
183 _mesa_set_destroy(bs->programs, NULL);
184 _mesa_set_destroy(bs->active_queries, NULL);
185 screen->batch_descriptor_deinit(screen, bs);
186 ralloc_free(bs);
187 }
188
189 static struct zink_batch_state *
create_batch_state(struct zink_context * ctx)190 create_batch_state(struct zink_context *ctx)
191 {
192 struct zink_screen *screen = zink_screen(ctx->base.screen);
193 struct zink_batch_state *bs = rzalloc(NULL, struct zink_batch_state);
194 bs->have_timelines = ctx->have_timelines;
195 VkCommandPoolCreateInfo cpci = {0};
196 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
197 cpci.queueFamilyIndex = screen->gfx_queue;
198 if (VKSCR(CreateCommandPool)(screen->dev, &cpci, NULL, &bs->cmdpool) != VK_SUCCESS)
199 goto fail;
200
201 VkCommandBufferAllocateInfo cbai = {0};
202 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
203 cbai.commandPool = bs->cmdpool;
204 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
205 cbai.commandBufferCount = 1;
206
207 if (VKSCR(AllocateCommandBuffers)(screen->dev, &cbai, &bs->cmdbuf) != VK_SUCCESS)
208 goto fail;
209
210 if (VKSCR(AllocateCommandBuffers)(screen->dev, &cbai, &bs->barrier_cmdbuf) != VK_SUCCESS)
211 goto fail;
212
213 #define SET_CREATE_OR_FAIL(ptr) \
214 ptr = _mesa_pointer_set_create(bs); \
215 if (!ptr) \
216 goto fail
217
218 bs->ctx = ctx;
219
220 SET_CREATE_OR_FAIL(bs->resources);
221 SET_CREATE_OR_FAIL(bs->surfaces);
222 SET_CREATE_OR_FAIL(bs->bufferviews);
223 SET_CREATE_OR_FAIL(bs->programs);
224 SET_CREATE_OR_FAIL(bs->active_queries);
225 util_dynarray_init(&bs->zombie_samplers, NULL);
226 util_dynarray_init(&bs->dead_framebuffers, NULL);
227 util_dynarray_init(&bs->persistent_resources, NULL);
228 util_dynarray_init(&bs->unref_resources, NULL);
229 util_dynarray_init(&bs->bindless_releases[0], NULL);
230 util_dynarray_init(&bs->bindless_releases[1], NULL);
231
232 cnd_init(&bs->usage.flush);
233 mtx_init(&bs->usage.mtx, mtx_plain);
234
235 if (!screen->batch_descriptor_init(screen, bs))
236 goto fail;
237
238 VkFenceCreateInfo fci = {0};
239 fci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
240
241 if (VKSCR(CreateFence)(screen->dev, &fci, NULL, &bs->fence.fence) != VK_SUCCESS)
242 goto fail;
243
244 util_queue_fence_init(&bs->flush_completed);
245
246 return bs;
247 fail:
248 zink_batch_state_destroy(screen, bs);
249 return NULL;
250 }
251
252 static inline bool
find_unused_state(struct zink_batch_state * bs)253 find_unused_state(struct zink_batch_state *bs)
254 {
255 struct zink_fence *fence = &bs->fence;
256 /* we can't reset these from fence_finish because threads */
257 bool completed = p_atomic_read(&fence->completed);
258 bool submitted = p_atomic_read(&fence->submitted);
259 return submitted && completed;
260 }
261
262 static struct zink_batch_state *
get_batch_state(struct zink_context * ctx,struct zink_batch * batch)263 get_batch_state(struct zink_context *ctx, struct zink_batch *batch)
264 {
265 struct zink_screen *screen = zink_screen(ctx->base.screen);
266 struct zink_batch_state *bs = NULL;
267
268 simple_mtx_lock(&ctx->batch_mtx);
269 if (util_dynarray_num_elements(&ctx->free_batch_states, struct zink_batch_state*))
270 bs = util_dynarray_pop(&ctx->free_batch_states, struct zink_batch_state*);
271 if (!bs && ctx->batch_states) {
272 /* states are stored sequentially, so if the first one doesn't work, none of them will */
273 if (zink_screen_check_last_finished(screen, ctx->batch_states->fence.batch_id) ||
274 find_unused_state(ctx->batch_states)) {
275 bs = ctx->batch_states;
276 pop_batch_state(ctx);
277 }
278 }
279 simple_mtx_unlock(&ctx->batch_mtx);
280 if (bs) {
281 if (bs->fence.submitted && !bs->fence.completed)
282 /* this fence is already done, so we need vulkan to release the cmdbuf */
283 zink_vkfence_wait(screen, &bs->fence, PIPE_TIMEOUT_INFINITE);
284 zink_reset_batch_state(ctx, bs);
285 } else {
286 if (!batch->state) {
287 /* this is batch init, so create a few more states for later use */
288 for (int i = 0; i < 3; i++) {
289 struct zink_batch_state *state = create_batch_state(ctx);
290 util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, state);
291 }
292 }
293 bs = create_batch_state(ctx);
294 }
295 return bs;
296 }
297
298 void
zink_reset_batch(struct zink_context * ctx,struct zink_batch * batch)299 zink_reset_batch(struct zink_context *ctx, struct zink_batch *batch)
300 {
301 batch->state = get_batch_state(ctx, batch);
302 assert(batch->state);
303
304 batch->has_work = false;
305 }
306
307 void
zink_start_batch(struct zink_context * ctx,struct zink_batch * batch)308 zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
309 {
310 zink_reset_batch(ctx, batch);
311
312 batch->state->usage.unflushed = true;
313
314 VkCommandBufferBeginInfo cbbi = {0};
315 cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
316 cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
317 if (VKCTX(BeginCommandBuffer)(batch->state->cmdbuf, &cbbi) != VK_SUCCESS)
318 debug_printf("vkBeginCommandBuffer failed\n");
319 if (VKCTX(BeginCommandBuffer)(batch->state->barrier_cmdbuf, &cbbi) != VK_SUCCESS)
320 debug_printf("vkBeginCommandBuffer failed\n");
321
322 batch->state->fence.completed = false;
323 if (ctx->last_fence) {
324 struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
325 batch->last_batch_usage = &last_state->usage;
326 }
327
328 if (!ctx->queries_disabled)
329 zink_resume_queries(ctx, batch);
330 }
331
332 static void
post_submit(void * data,void * gdata,int thread_index)333 post_submit(void *data, void *gdata, int thread_index)
334 {
335 struct zink_batch_state *bs = data;
336 struct zink_screen *screen = zink_screen(bs->ctx->base.screen);
337
338 if (bs->is_device_lost) {
339 if (bs->ctx->reset.reset)
340 bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
341 screen->device_lost = true;
342 } else if (bs->ctx->batch_states_count > 5000) {
343 zink_screen_batch_id_wait(screen, bs->fence.batch_id - 2500, PIPE_TIMEOUT_INFINITE);
344 }
345 }
346
347 static void
submit_queue(void * data,void * gdata,int thread_index)348 submit_queue(void *data, void *gdata, int thread_index)
349 {
350 struct zink_batch_state *bs = data;
351 struct zink_context *ctx = bs->ctx;
352 struct zink_screen *screen = zink_screen(ctx->base.screen);
353 VkSubmitInfo si = {0};
354
355 while (!bs->fence.batch_id)
356 bs->fence.batch_id = p_atomic_inc_return(&screen->curr_batch);
357 bs->usage.usage = bs->fence.batch_id;
358 bs->usage.unflushed = false;
359
360 if (ctx->have_timelines && screen->last_finished > bs->fence.batch_id && bs->fence.batch_id == 1) {
361 if (!zink_screen_init_semaphore(screen)) {
362 debug_printf("timeline init failed, things are about to go dramatically wrong.");
363 ctx->have_timelines = false;
364 }
365 }
366
367 VKSCR(ResetFences)(screen->dev, 1, &bs->fence.fence);
368
369 uint64_t batch_id = bs->fence.batch_id;
370 si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
371 si.waitSemaphoreCount = 0;
372 si.pWaitSemaphores = NULL;
373 si.signalSemaphoreCount = 0;
374 si.pSignalSemaphores = NULL;
375 si.pWaitDstStageMask = NULL;
376 si.commandBufferCount = bs->has_barriers ? 2 : 1;
377 VkCommandBuffer cmdbufs[2] = {
378 bs->barrier_cmdbuf,
379 bs->cmdbuf,
380 };
381 si.pCommandBuffers = bs->has_barriers ? cmdbufs : &cmdbufs[1];
382
383 VkTimelineSemaphoreSubmitInfo tsi = {0};
384 if (bs->have_timelines) {
385 tsi.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
386 si.pNext = &tsi;
387 tsi.signalSemaphoreValueCount = 1;
388 tsi.pSignalSemaphoreValues = &batch_id;
389 si.signalSemaphoreCount = 1;
390 si.pSignalSemaphores = &screen->sem;
391 }
392
393 struct wsi_memory_signal_submit_info mem_signal = {
394 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
395 .pNext = si.pNext,
396 };
397
398 if (bs->flush_res && screen->needs_mesa_flush_wsi) {
399 struct zink_resource *flush_res = zink_resource(bs->flush_res);
400 mem_signal.memory = zink_bo_get_mem(flush_res->scanout_obj ? flush_res->scanout_obj->bo : flush_res->obj->bo);
401 si.pNext = &mem_signal;
402 }
403
404 if (VKSCR(EndCommandBuffer)(bs->cmdbuf) != VK_SUCCESS) {
405 debug_printf("vkEndCommandBuffer failed\n");
406 bs->is_device_lost = true;
407 goto end;
408 }
409 if (VKSCR(EndCommandBuffer)(bs->barrier_cmdbuf) != VK_SUCCESS) {
410 debug_printf("vkEndCommandBuffer failed\n");
411 bs->is_device_lost = true;
412 goto end;
413 }
414
415 while (util_dynarray_contains(&bs->persistent_resources, struct zink_resource_object*)) {
416 struct zink_resource_object *obj = util_dynarray_pop(&bs->persistent_resources, struct zink_resource_object*);
417 VkMappedMemoryRange range = zink_resource_init_mem_range(screen, obj, 0, obj->size);
418 VKSCR(FlushMappedMemoryRanges)(screen->dev, 1, &range);
419 }
420
421 simple_mtx_lock(&screen->queue_lock);
422 if (VKSCR(QueueSubmit)(bs->queue, 1, &si, bs->fence.fence) != VK_SUCCESS) {
423 debug_printf("ZINK: vkQueueSubmit() failed\n");
424 bs->is_device_lost = true;
425 }
426 simple_mtx_unlock(&screen->queue_lock);
427 bs->submit_count++;
428 end:
429 cnd_broadcast(&bs->usage.flush);
430
431 p_atomic_set(&bs->fence.submitted, true);
432 unref_resources(screen, bs);
433 }
434
435
436 /* TODO: remove for wsi */
437 static void
copy_scanout(struct zink_batch_state * bs,struct zink_resource * res)438 copy_scanout(struct zink_batch_state *bs, struct zink_resource *res)
439 {
440 if (!bs->scanout_flush)
441 return;
442 struct zink_context *ctx = bs->ctx;
443
444 VkImageCopy region = {0};
445 struct pipe_box box = {0, 0, 0,
446 u_minify(res->base.b.width0, 0),
447 u_minify(res->base.b.height0, 0), res->base.b.array_size};
448 box.depth = util_num_layers(&res->base.b, 0);
449 struct pipe_box *src_box = &box;
450 unsigned dstz = 0;
451
452 region.srcSubresource.aspectMask = res->aspect;
453 region.srcSubresource.mipLevel = 0;
454 switch (res->base.b.target) {
455 case PIPE_TEXTURE_CUBE:
456 case PIPE_TEXTURE_CUBE_ARRAY:
457 case PIPE_TEXTURE_2D_ARRAY:
458 case PIPE_TEXTURE_1D_ARRAY:
459 /* these use layer */
460 region.srcSubresource.baseArrayLayer = src_box->z;
461 region.srcSubresource.layerCount = src_box->depth;
462 region.srcOffset.z = 0;
463 region.extent.depth = 1;
464 break;
465 case PIPE_TEXTURE_3D:
466 /* this uses depth */
467 region.srcSubresource.baseArrayLayer = 0;
468 region.srcSubresource.layerCount = 1;
469 region.srcOffset.z = src_box->z;
470 region.extent.depth = src_box->depth;
471 break;
472 default:
473 /* these must only copy one layer */
474 region.srcSubresource.baseArrayLayer = 0;
475 region.srcSubresource.layerCount = 1;
476 region.srcOffset.z = 0;
477 region.extent.depth = 1;
478 }
479
480 region.srcOffset.x = src_box->x;
481 region.srcOffset.y = src_box->y;
482
483 region.dstSubresource.aspectMask = res->aspect;
484 region.dstSubresource.mipLevel = 0;
485 switch (res->base.b.target) {
486 case PIPE_TEXTURE_CUBE:
487 case PIPE_TEXTURE_CUBE_ARRAY:
488 case PIPE_TEXTURE_2D_ARRAY:
489 case PIPE_TEXTURE_1D_ARRAY:
490 /* these use layer */
491 region.dstSubresource.baseArrayLayer = dstz;
492 region.dstSubresource.layerCount = src_box->depth;
493 region.dstOffset.z = 0;
494 break;
495 case PIPE_TEXTURE_3D:
496 /* this uses depth */
497 region.dstSubresource.baseArrayLayer = 0;
498 region.dstSubresource.layerCount = 1;
499 region.dstOffset.z = dstz;
500 break;
501 default:
502 /* these must only copy one layer */
503 region.dstSubresource.baseArrayLayer = 0;
504 region.dstSubresource.layerCount = 1;
505 region.dstOffset.z = 0;
506 }
507
508 region.dstOffset.x = 0;
509 region.dstOffset.y = 0;
510 region.extent.width = src_box->width;
511 region.extent.height = src_box->height;
512
513 VkImageMemoryBarrier imb1;
514 zink_resource_image_barrier_init(&imb1, res, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
515 VKCTX(CmdPipelineBarrier)(
516 bs->cmdbuf,
517 res->obj->access_stage ? res->obj->access_stage : VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
518 VK_PIPELINE_STAGE_TRANSFER_BIT,
519 0,
520 0, NULL,
521 0, NULL,
522 1, &imb1
523 );
524
525 VkImageSubresourceRange isr = {
526 res->aspect,
527 0, VK_REMAINING_MIP_LEVELS,
528 0, VK_REMAINING_ARRAY_LAYERS
529 };
530 VkImageMemoryBarrier imb = {
531 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
532 NULL,
533 0,
534 VK_ACCESS_TRANSFER_WRITE_BIT,
535 res->scanout_obj_init ? VK_IMAGE_LAYOUT_PRESENT_SRC_KHR : VK_IMAGE_LAYOUT_UNDEFINED,
536 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
537 VK_QUEUE_FAMILY_IGNORED,
538 VK_QUEUE_FAMILY_IGNORED,
539 res->scanout_obj->image,
540 isr
541 };
542 VKCTX(CmdPipelineBarrier)(
543 bs->cmdbuf,
544 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
545 VK_PIPELINE_STAGE_TRANSFER_BIT,
546 0,
547 0, NULL,
548 0, NULL,
549 1, &imb
550 );
551
552 VKCTX(CmdCopyImage)(bs->cmdbuf, res->obj->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
553 res->scanout_obj->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
554 1, ®ion);
555 imb.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
556 imb.dstAccessMask = 0;
557 imb.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
558 imb.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
559 VKCTX(CmdPipelineBarrier)(
560 bs->cmdbuf,
561 VK_PIPELINE_STAGE_TRANSFER_BIT,
562 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
563 0,
564 0, NULL,
565 0, NULL,
566 1, &imb
567 );
568 /* separate flag to avoid annoying validation errors for new scanout objs */
569 res->scanout_obj_init = true;
570 }
571
572 void
zink_end_batch(struct zink_context * ctx,struct zink_batch * batch)573 zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
574 {
575 if (batch->state->flush_res)
576 copy_scanout(batch->state, zink_resource(batch->state->flush_res));
577 if (!ctx->queries_disabled)
578 zink_suspend_queries(ctx, batch);
579
580 tc_driver_internal_flush_notify(ctx->tc);
581
582 struct zink_screen *screen = zink_screen(ctx->base.screen);
583 struct zink_batch_state *bs;
584
585 simple_mtx_lock(&ctx->batch_mtx);
586 if (ctx->oom_flush || ctx->batch_states_count > 10) {
587 assert(!ctx->batch_states_count || ctx->batch_states);
588 while (ctx->batch_states) {
589 bs = ctx->batch_states;
590 struct zink_fence *fence = &bs->fence;
591 /* once an incomplete state is reached, no more will be complete */
592 if (!zink_check_batch_completion(ctx, fence->batch_id, true))
593 break;
594
595 if (bs->fence.submitted && !bs->fence.completed)
596 /* this fence is already done, so we need vulkan to release the cmdbuf */
597 zink_vkfence_wait(screen, &bs->fence, PIPE_TIMEOUT_INFINITE);
598 pop_batch_state(ctx);
599 zink_reset_batch_state(ctx, bs);
600 util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
601 }
602 if (ctx->batch_states_count > 50)
603 ctx->oom_flush = true;
604 }
605
606 bs = batch->state;
607 if (ctx->last_fence)
608 zink_batch_state(ctx->last_fence)->next = bs;
609 else {
610 assert(!ctx->batch_states);
611 ctx->batch_states = bs;
612 }
613 ctx->last_fence = &bs->fence;
614 ctx->batch_states_count++;
615 simple_mtx_unlock(&ctx->batch_mtx);
616 batch->work_count = 0;
617
618 if (screen->device_lost)
619 return;
620
621 if (screen->threaded) {
622 bs->queue = screen->thread_queue;
623 util_queue_add_job(&screen->flush_queue, bs, &bs->flush_completed,
624 submit_queue, post_submit, 0);
625 } else {
626 bs->queue = screen->queue;
627 submit_queue(bs, NULL, 0);
628 post_submit(bs, NULL, 0);
629 }
630 }
631
632 void
zink_batch_resource_usage_set(struct zink_batch * batch,struct zink_resource * res,bool write)633 zink_batch_resource_usage_set(struct zink_batch *batch, struct zink_resource *res, bool write)
634 {
635 zink_resource_usage_set(res, batch->state, write);
636 if (write && res->scanout_obj)
637 batch->state->scanout_flush = true;
638 /* multiple array entries are fine */
639 if (!res->obj->coherent && res->obj->persistent_maps)
640 util_dynarray_append(&batch->state->persistent_resources, struct zink_resource_object*, res->obj);
641
642 batch->has_work = true;
643 }
644
645 void
zink_batch_reference_resource_rw(struct zink_batch * batch,struct zink_resource * res,bool write)646 zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource *res, bool write)
647 {
648 /* if the resource already has usage of any sort set for this batch, */
649 if (!zink_resource_usage_matches(res, batch->state) ||
650 /* or if it's bound somewhere */
651 !zink_resource_has_binds(res))
652 /* then it already has a batch ref and doesn't need one here */
653 zink_batch_reference_resource(batch, res);
654 zink_batch_resource_usage_set(batch, res, write);
655 }
656
657 bool
batch_ptr_add_usage(struct zink_batch * batch,struct set * s,void * ptr)658 batch_ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr)
659 {
660 bool found = false;
661 _mesa_set_search_or_add(s, ptr, &found);
662 return !found;
663 }
664
665 ALWAYS_INLINE static void
check_oom_flush(struct zink_context * ctx,const struct zink_batch * batch)666 check_oom_flush(struct zink_context *ctx, const struct zink_batch *batch)
667 {
668 const VkDeviceSize resource_size = batch->state->resource_size;
669 if (resource_size >= zink_screen(ctx->base.screen)->clamp_video_mem) {
670 ctx->oom_flush = true;
671 ctx->oom_stall = true;
672 }
673 }
674
675 void
zink_batch_reference_resource(struct zink_batch * batch,struct zink_resource * res)676 zink_batch_reference_resource(struct zink_batch *batch, struct zink_resource *res)
677 {
678 if (!batch_ptr_add_usage(batch, batch->state->resources, res->obj))
679 return;
680 pipe_reference(NULL, &res->obj->reference);
681 batch->state->resource_size += res->obj->size;
682 check_oom_flush(batch->state->ctx, batch);
683 batch->has_work = true;
684 }
685
686 void
zink_batch_reference_resource_move(struct zink_batch * batch,struct zink_resource * res)687 zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resource *res)
688 {
689 if (!batch_ptr_add_usage(batch, batch->state->resources, res->obj))
690 return;
691 batch->state->resource_size += res->obj->size;
692 check_oom_flush(batch->state->ctx, batch);
693 batch->has_work = true;
694 }
695
696 void
zink_batch_reference_bufferview(struct zink_batch * batch,struct zink_buffer_view * buffer_view)697 zink_batch_reference_bufferview(struct zink_batch *batch, struct zink_buffer_view *buffer_view)
698 {
699 if (!batch_ptr_add_usage(batch, batch->state->bufferviews, buffer_view))
700 return;
701 pipe_reference(NULL, &buffer_view->reference);
702 batch->has_work = true;
703 }
704
705 void
zink_batch_reference_surface(struct zink_batch * batch,struct zink_surface * surface)706 zink_batch_reference_surface(struct zink_batch *batch, struct zink_surface *surface)
707 {
708 if (!batch_ptr_add_usage(batch, batch->state->surfaces, surface))
709 return;
710 struct pipe_surface *surf = NULL;
711 pipe_surface_reference(&surf, &surface->base);
712 batch->has_work = true;
713 }
714
715 void
zink_batch_reference_sampler_view(struct zink_batch * batch,struct zink_sampler_view * sv)716 zink_batch_reference_sampler_view(struct zink_batch *batch,
717 struct zink_sampler_view *sv)
718 {
719 if (sv->base.target == PIPE_BUFFER)
720 zink_batch_reference_bufferview(batch, sv->buffer_view);
721 else
722 zink_batch_reference_surface(batch, sv->image_view);
723 }
724
725 void
zink_batch_reference_program(struct zink_batch * batch,struct zink_program * pg)726 zink_batch_reference_program(struct zink_batch *batch,
727 struct zink_program *pg)
728 {
729 if (zink_batch_usage_matches(pg->batch_uses, batch->state) ||
730 !batch_ptr_add_usage(batch, batch->state->programs, pg))
731 return;
732 pipe_reference(NULL, &pg->reference);
733 zink_batch_usage_set(&pg->batch_uses, batch->state);
734 batch->has_work = true;
735 }
736
737 void
zink_batch_reference_image_view(struct zink_batch * batch,struct zink_image_view * image_view)738 zink_batch_reference_image_view(struct zink_batch *batch,
739 struct zink_image_view *image_view)
740 {
741 if (image_view->base.resource->target == PIPE_BUFFER)
742 zink_batch_reference_bufferview(batch, image_view->buffer_view);
743 else
744 zink_batch_reference_surface(batch, image_view->surface);
745 }
746
747 bool
zink_screen_usage_check_completion(struct zink_screen * screen,const struct zink_batch_usage * u)748 zink_screen_usage_check_completion(struct zink_screen *screen, const struct zink_batch_usage *u)
749 {
750 if (!zink_batch_usage_exists(u))
751 return true;
752 if (zink_batch_usage_is_unflushed(u))
753 return false;
754
755 return zink_screen_batch_id_wait(screen, u->usage, 0);
756 }
757
758 bool
zink_batch_usage_check_completion(struct zink_context * ctx,const struct zink_batch_usage * u)759 zink_batch_usage_check_completion(struct zink_context *ctx, const struct zink_batch_usage *u)
760 {
761 if (!zink_batch_usage_exists(u))
762 return true;
763 if (zink_batch_usage_is_unflushed(u))
764 return false;
765 return zink_check_batch_completion(ctx, u->usage, false);
766 }
767
768 void
zink_batch_usage_wait(struct zink_context * ctx,struct zink_batch_usage * u)769 zink_batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u)
770 {
771 if (!zink_batch_usage_exists(u))
772 return;
773 if (zink_batch_usage_is_unflushed(u)) {
774 if (likely(u == &ctx->batch.state->usage))
775 ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
776 else { //multi-context
777 mtx_lock(&u->mtx);
778 cnd_wait(&u->flush, &u->mtx);
779 mtx_unlock(&u->mtx);
780 }
781 }
782 zink_wait_on_batch(ctx, u->usage);
783 }
784