1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_instance.h"
12 
13 #include "util/driconf.h"
14 #include "venus-protocol/vn_protocol_driver_info.h"
15 #include "venus-protocol/vn_protocol_driver_instance.h"
16 #include "venus-protocol/vn_protocol_driver_transport.h"
17 
18 #include "vn_icd.h"
19 #include "vn_physical_device.h"
20 #include "vn_renderer.h"
21 
22 #define VN_INSTANCE_LARGE_RING_SIZE (64 * 1024)
23 #define VN_INSTANCE_LARGE_RING_DIRECT_THRESHOLD                              \
24    (VN_INSTANCE_LARGE_RING_SIZE / 16)
25 
26 /* this must not exceed 2KiB for the ring to fit in a 4K page */
27 #define VN_INSTANCE_RING_SIZE (2 * 1024)
28 #define VN_INSTANCE_RING_DIRECT_THRESHOLD (VN_INSTANCE_RING_SIZE / 8)
29 
30 /*
31  * Instance extensions add instance-level or physical-device-level
32  * functionalities.  It seems renderer support is either unnecessary or
33  * optional.  We should be able to advertise them or lie about them locally.
34  */
35 static const struct vk_instance_extension_table
36    vn_instance_supported_extensions = {
37       /* promoted to VK_VERSION_1_1 */
38       .KHR_device_group_creation = true,
39       .KHR_external_fence_capabilities = true,
40       .KHR_external_memory_capabilities = true,
41       .KHR_external_semaphore_capabilities = true,
42       .KHR_get_physical_device_properties2 = true,
43 
44 #ifdef VN_USE_WSI_PLATFORM
45       .KHR_get_surface_capabilities2 = true,
46       .KHR_surface = true,
47       .KHR_surface_protected_capabilities = true,
48 #endif
49 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
50       .KHR_wayland_surface = true,
51 #endif
52 #ifdef VK_USE_PLATFORM_XCB_KHR
53       .KHR_xcb_surface = true,
54 #endif
55 #ifdef VK_USE_PLATFORM_XLIB_KHR
56       .KHR_xlib_surface = true,
57 #endif
58    };
59 
60 static const driOptionDescription vn_dri_options[] = {
61    /* clang-format off */
62    DRI_CONF_SECTION_PERFORMANCE
63       DRI_CONF_VK_X11_ENSURE_MIN_IMAGE_COUNT(false)
64       DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
65       DRI_CONF_VK_X11_STRICT_IMAGE_COUNT(false)
66    DRI_CONF_SECTION_END
67    DRI_CONF_SECTION_DEBUG
68       DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST(false)
69    DRI_CONF_SECTION_END
70    /* clang-format on */
71 };
72 
73 static VkResult
vn_instance_init_renderer_versions(struct vn_instance * instance)74 vn_instance_init_renderer_versions(struct vn_instance *instance)
75 {
76    uint32_t instance_version = 0;
77    VkResult result =
78       vn_call_vkEnumerateInstanceVersion(instance, &instance_version);
79    if (result != VK_SUCCESS) {
80       if (VN_DEBUG(INIT))
81          vn_log(instance, "failed to enumerate renderer instance version");
82       return result;
83    }
84 
85    if (instance_version < VN_MIN_RENDERER_VERSION) {
86       if (VN_DEBUG(INIT)) {
87          vn_log(instance, "unsupported renderer instance version %d.%d",
88                 VK_VERSION_MAJOR(instance_version),
89                 VK_VERSION_MINOR(instance_version));
90       }
91       return VK_ERROR_INITIALIZATION_FAILED;
92    }
93 
94    if (VN_DEBUG(INIT)) {
95       vn_log(instance, "renderer instance version %d.%d.%d",
96              VK_VERSION_MAJOR(instance_version),
97              VK_VERSION_MINOR(instance_version),
98              VK_VERSION_PATCH(instance_version));
99    }
100 
101    /* request at least VN_MIN_RENDERER_VERSION internally */
102    instance->renderer_api_version =
103       MAX2(instance->base.base.app_info.api_version, VN_MIN_RENDERER_VERSION);
104 
105    /* instance version for internal use is capped */
106    instance_version = MIN3(instance_version, instance->renderer_api_version,
107                            instance->renderer->info.vk_xml_version);
108    assert(instance_version >= VN_MIN_RENDERER_VERSION);
109 
110    instance->renderer_version = instance_version;
111 
112    return VK_SUCCESS;
113 }
114 
115 static VkResult
vn_instance_init_ring(struct vn_instance * instance)116 vn_instance_init_ring(struct vn_instance *instance)
117 {
118    const size_t buf_size = instance->experimental.largeRing
119                               ? VN_INSTANCE_LARGE_RING_SIZE
120                               : VN_INSTANCE_RING_SIZE;
121    /* 32-bit seqno for renderer roundtrips */
122    const size_t extra_size = sizeof(uint32_t);
123    struct vn_ring_layout layout;
124    vn_ring_get_layout(buf_size, extra_size, &layout);
125 
126    instance->ring.shmem =
127       vn_renderer_shmem_create(instance->renderer, layout.shmem_size);
128    if (!instance->ring.shmem) {
129       if (VN_DEBUG(INIT))
130          vn_log(instance, "failed to allocate/map ring shmem");
131       return VK_ERROR_OUT_OF_HOST_MEMORY;
132    }
133 
134    mtx_init(&instance->ring.mutex, mtx_plain);
135 
136    struct vn_ring *ring = &instance->ring.ring;
137    vn_ring_init(ring, instance->renderer, &layout,
138                 instance->ring.shmem->mmap_ptr);
139 
140    instance->ring.id = (uintptr_t)ring;
141 
142    const struct VkRingCreateInfoMESA info = {
143       .sType = VK_STRUCTURE_TYPE_RING_CREATE_INFO_MESA,
144       .resourceId = instance->ring.shmem->res_id,
145       .size = layout.shmem_size,
146       .idleTimeout = 50ull * 1000 * 1000,
147       .headOffset = layout.head_offset,
148       .tailOffset = layout.tail_offset,
149       .statusOffset = layout.status_offset,
150       .bufferOffset = layout.buffer_offset,
151       .bufferSize = layout.buffer_size,
152       .extraOffset = layout.extra_offset,
153       .extraSize = layout.extra_size,
154    };
155 
156    uint32_t create_ring_data[64];
157    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
158       create_ring_data, sizeof(create_ring_data));
159    vn_encode_vkCreateRingMESA(&local_enc, 0, instance->ring.id, &info);
160    vn_renderer_submit_simple(instance->renderer, create_ring_data,
161                              vn_cs_encoder_get_len(&local_enc));
162 
163    vn_cs_encoder_init(&instance->ring.upload, instance,
164                       VN_CS_ENCODER_STORAGE_SHMEM_ARRAY, 1 * 1024 * 1024);
165 
166    mtx_init(&instance->ring.roundtrip_mutex, mtx_plain);
167    instance->ring.roundtrip_next = 1;
168 
169    return VK_SUCCESS;
170 }
171 
172 static struct vn_renderer_shmem *
173 vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
174                                    size_t size,
175                                    void **ptr);
176 
177 static VkResult
vn_instance_init_experimental_features(struct vn_instance * instance)178 vn_instance_init_experimental_features(struct vn_instance *instance)
179 {
180    if (instance->renderer->info.vk_mesa_venus_protocol_spec_version !=
181        100000) {
182       if (VN_DEBUG(INIT))
183          vn_log(instance, "renderer supports no experimental features");
184       return VK_SUCCESS;
185    }
186 
187    size_t struct_size = sizeof(instance->experimental);
188 
189    /* prepare the reply shmem */
190    const size_t reply_size =
191       vn_sizeof_vkGetVenusExperimentalFeatureData100000MESA_reply(
192          &struct_size, &instance->experimental);
193    void *reply_ptr;
194    struct vn_renderer_shmem *reply_shmem =
195       vn_instance_get_reply_shmem_locked(instance, reply_size, &reply_ptr);
196    if (!reply_shmem)
197       return VK_ERROR_OUT_OF_HOST_MEMORY;
198 
199    /* encode the command */
200    uint32_t local_data[16];
201    struct vn_cs_encoder local_enc =
202       VN_CS_ENCODER_INITIALIZER_LOCAL(local_data, sizeof(local_data));
203    vn_encode_vkGetVenusExperimentalFeatureData100000MESA(
204       &local_enc, VK_COMMAND_GENERATE_REPLY_BIT_EXT, &struct_size,
205       &instance->experimental);
206 
207    VkResult result = vn_renderer_submit_simple_sync(
208       instance->renderer, local_data, vn_cs_encoder_get_len(&local_enc));
209    if (result != VK_SUCCESS) {
210       vn_renderer_shmem_unref(instance->renderer, reply_shmem);
211       return result;
212    }
213 
214    struct vn_cs_decoder reply_dec =
215       VN_CS_DECODER_INITIALIZER(reply_ptr, reply_size);
216    vn_decode_vkGetVenusExperimentalFeatureData100000MESA_reply(
217       &reply_dec, &struct_size, &instance->experimental);
218    vn_renderer_shmem_unref(instance->renderer, reply_shmem);
219 
220    if (VN_DEBUG(INIT)) {
221       vn_log(instance,
222              "VkVenusExperimentalFeatures100000MESA is as below:"
223              "\n\tmemoryResourceAllocationSize = %u"
224              "\n\tglobalFencing = %u"
225              "\n\tlargeRing = %u",
226              instance->experimental.memoryResourceAllocationSize,
227              instance->experimental.globalFencing,
228              instance->experimental.largeRing);
229    }
230 
231    return VK_SUCCESS;
232 }
233 
234 static VkResult
vn_instance_init_renderer(struct vn_instance * instance)235 vn_instance_init_renderer(struct vn_instance *instance)
236 {
237    const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
238 
239    VkResult result = vn_renderer_create(instance, alloc, &instance->renderer);
240    if (result != VK_SUCCESS)
241       return result;
242 
243    struct vn_renderer_info *renderer_info = &instance->renderer->info;
244    uint32_t version = vn_info_wire_format_version();
245    if (renderer_info->wire_format_version != version) {
246       if (VN_DEBUG(INIT)) {
247          vn_log(instance, "wire format version %d != %d",
248                 renderer_info->wire_format_version, version);
249       }
250       return VK_ERROR_INITIALIZATION_FAILED;
251    }
252 
253    version = vn_info_vk_xml_version();
254    if (renderer_info->vk_xml_version > version)
255       renderer_info->vk_xml_version = version;
256    if (renderer_info->vk_xml_version < VN_MIN_RENDERER_VERSION) {
257       if (VN_DEBUG(INIT)) {
258          vn_log(instance, "vk xml version %d.%d.%d < %d.%d.%d",
259                 VK_VERSION_MAJOR(renderer_info->vk_xml_version),
260                 VK_VERSION_MINOR(renderer_info->vk_xml_version),
261                 VK_VERSION_PATCH(renderer_info->vk_xml_version),
262                 VK_VERSION_MAJOR(VN_MIN_RENDERER_VERSION),
263                 VK_VERSION_MINOR(VN_MIN_RENDERER_VERSION),
264                 VK_VERSION_PATCH(VN_MIN_RENDERER_VERSION));
265       }
266       return VK_ERROR_INITIALIZATION_FAILED;
267    }
268 
269    uint32_t spec_version =
270       vn_extension_get_spec_version("VK_EXT_command_serialization");
271    if (renderer_info->vk_ext_command_serialization_spec_version >
272        spec_version) {
273       renderer_info->vk_ext_command_serialization_spec_version = spec_version;
274    }
275 
276    spec_version = vn_extension_get_spec_version("VK_MESA_venus_protocol");
277    if (renderer_info->vk_mesa_venus_protocol_spec_version > spec_version)
278       renderer_info->vk_mesa_venus_protocol_spec_version = spec_version;
279 
280    if (VN_DEBUG(INIT)) {
281       vn_log(instance, "connected to renderer");
282       vn_log(instance, "wire format version %d",
283              renderer_info->wire_format_version);
284       vn_log(instance, "vk xml version %d.%d.%d",
285              VK_VERSION_MAJOR(renderer_info->vk_xml_version),
286              VK_VERSION_MINOR(renderer_info->vk_xml_version),
287              VK_VERSION_PATCH(renderer_info->vk_xml_version));
288       vn_log(instance, "VK_EXT_command_serialization spec version %d",
289              renderer_info->vk_ext_command_serialization_spec_version);
290       vn_log(instance, "VK_MESA_venus_protocol spec version %d",
291              renderer_info->vk_mesa_venus_protocol_spec_version);
292       vn_log(instance, "supports blob id 0: %d",
293              renderer_info->supports_blob_id_0);
294    }
295 
296    return VK_SUCCESS;
297 }
298 
299 VkResult
vn_instance_submit_roundtrip(struct vn_instance * instance,uint32_t * roundtrip_seqno)300 vn_instance_submit_roundtrip(struct vn_instance *instance,
301                              uint32_t *roundtrip_seqno)
302 {
303    uint32_t write_ring_extra_data[8];
304    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
305       write_ring_extra_data, sizeof(write_ring_extra_data));
306 
307    /* submit a vkWriteRingExtraMESA through the renderer */
308    mtx_lock(&instance->ring.roundtrip_mutex);
309    const uint32_t seqno = instance->ring.roundtrip_next++;
310    vn_encode_vkWriteRingExtraMESA(&local_enc, 0, instance->ring.id, 0, seqno);
311    VkResult result =
312       vn_renderer_submit_simple(instance->renderer, write_ring_extra_data,
313                                 vn_cs_encoder_get_len(&local_enc));
314    mtx_unlock(&instance->ring.roundtrip_mutex);
315 
316    *roundtrip_seqno = seqno;
317    return result;
318 }
319 
320 static bool
roundtrip_seqno_ge(uint32_t a,uint32_t b)321 roundtrip_seqno_ge(uint32_t a, uint32_t b)
322 {
323    /* a >= b, but deal with wrapping as well */
324    return (a - b) <= INT32_MAX;
325 }
326 
327 void
vn_instance_wait_roundtrip(struct vn_instance * instance,uint32_t roundtrip_seqno)328 vn_instance_wait_roundtrip(struct vn_instance *instance,
329                            uint32_t roundtrip_seqno)
330 {
331    VN_TRACE_FUNC();
332    const struct vn_ring *ring = &instance->ring.ring;
333    const volatile atomic_uint *ptr = ring->shared.extra;
334    uint32_t iter = 0;
335    do {
336       const uint32_t cur = atomic_load_explicit(ptr, memory_order_acquire);
337       if (roundtrip_seqno_ge(cur, roundtrip_seqno))
338          break;
339       vn_relax(&iter, "roundtrip");
340    } while (true);
341 }
342 
343 struct vn_instance_submission {
344    const struct vn_cs_encoder *cs;
345    struct vn_ring_submit *submit;
346 
347    struct {
348       struct vn_cs_encoder cs;
349       struct vn_cs_encoder_buffer buffer;
350       uint32_t data[64];
351    } indirect;
352 };
353 
354 static const struct vn_cs_encoder *
vn_instance_submission_get_cs(struct vn_instance_submission * submit,const struct vn_cs_encoder * cs,bool direct)355 vn_instance_submission_get_cs(struct vn_instance_submission *submit,
356                               const struct vn_cs_encoder *cs,
357                               bool direct)
358 {
359    if (direct)
360       return cs;
361 
362    VkCommandStreamDescriptionMESA local_descs[8];
363    VkCommandStreamDescriptionMESA *descs = local_descs;
364    if (cs->buffer_count > ARRAY_SIZE(local_descs)) {
365       descs =
366          malloc(sizeof(VkCommandStreamDescriptionMESA) * cs->buffer_count);
367       if (!descs)
368          return NULL;
369    }
370 
371    uint32_t desc_count = 0;
372    for (uint32_t i = 0; i < cs->buffer_count; i++) {
373       const struct vn_cs_encoder_buffer *buf = &cs->buffers[i];
374       if (buf->committed_size) {
375          descs[desc_count++] = (VkCommandStreamDescriptionMESA){
376             .resourceId = buf->shmem->res_id,
377             .offset = buf->offset,
378             .size = buf->committed_size,
379          };
380       }
381    }
382 
383    const size_t exec_size = vn_sizeof_vkExecuteCommandStreamsMESA(
384       desc_count, descs, NULL, 0, NULL, 0);
385    void *exec_data = submit->indirect.data;
386    if (exec_size > sizeof(submit->indirect.data)) {
387       exec_data = malloc(exec_size);
388       if (!exec_data) {
389          if (descs != local_descs)
390             free(descs);
391          return NULL;
392       }
393    }
394 
395    submit->indirect.buffer = VN_CS_ENCODER_BUFFER_INITIALIZER(exec_data);
396    submit->indirect.cs =
397       VN_CS_ENCODER_INITIALIZER(&submit->indirect.buffer, exec_size);
398    vn_encode_vkExecuteCommandStreamsMESA(&submit->indirect.cs, 0, desc_count,
399                                          descs, NULL, 0, NULL, 0);
400    vn_cs_encoder_commit(&submit->indirect.cs);
401 
402    if (descs != local_descs)
403       free(descs);
404 
405    return &submit->indirect.cs;
406 }
407 
408 static struct vn_ring_submit *
vn_instance_submission_get_ring_submit(struct vn_ring * ring,const struct vn_cs_encoder * cs,struct vn_renderer_shmem * extra_shmem,bool direct)409 vn_instance_submission_get_ring_submit(struct vn_ring *ring,
410                                        const struct vn_cs_encoder *cs,
411                                        struct vn_renderer_shmem *extra_shmem,
412                                        bool direct)
413 {
414    const uint32_t shmem_count =
415       (direct ? 0 : cs->buffer_count) + (extra_shmem ? 1 : 0);
416    struct vn_ring_submit *submit = vn_ring_get_submit(ring, shmem_count);
417    if (!submit)
418       return NULL;
419 
420    submit->shmem_count = shmem_count;
421    if (!direct) {
422       for (uint32_t i = 0; i < cs->buffer_count; i++) {
423          submit->shmems[i] =
424             vn_renderer_shmem_ref(ring->renderer, cs->buffers[i].shmem);
425       }
426    }
427    if (extra_shmem) {
428       submit->shmems[shmem_count - 1] =
429          vn_renderer_shmem_ref(ring->renderer, extra_shmem);
430    }
431 
432    return submit;
433 }
434 
435 static void
vn_instance_submission_cleanup(struct vn_instance_submission * submit)436 vn_instance_submission_cleanup(struct vn_instance_submission *submit)
437 {
438    if (submit->cs == &submit->indirect.cs &&
439        submit->indirect.buffer.base != submit->indirect.data)
440       free(submit->indirect.buffer.base);
441 }
442 
443 static VkResult
vn_instance_submission_prepare(struct vn_instance_submission * submit,const struct vn_cs_encoder * cs,struct vn_ring * ring,struct vn_renderer_shmem * extra_shmem,bool direct)444 vn_instance_submission_prepare(struct vn_instance_submission *submit,
445                                const struct vn_cs_encoder *cs,
446                                struct vn_ring *ring,
447                                struct vn_renderer_shmem *extra_shmem,
448                                bool direct)
449 {
450    submit->cs = vn_instance_submission_get_cs(submit, cs, direct);
451    if (!submit->cs)
452       return VK_ERROR_OUT_OF_HOST_MEMORY;
453 
454    submit->submit =
455       vn_instance_submission_get_ring_submit(ring, cs, extra_shmem, direct);
456    if (!submit->submit) {
457       vn_instance_submission_cleanup(submit);
458       return VK_ERROR_OUT_OF_HOST_MEMORY;
459    }
460 
461    return VK_SUCCESS;
462 }
463 
464 static bool
vn_instance_submission_can_direct(const struct vn_instance * instance,const struct vn_cs_encoder * cs)465 vn_instance_submission_can_direct(const struct vn_instance *instance,
466                                   const struct vn_cs_encoder *cs)
467 {
468    const size_t threshold = instance->experimental.largeRing
469                                ? VN_INSTANCE_LARGE_RING_DIRECT_THRESHOLD
470                                : VN_INSTANCE_RING_DIRECT_THRESHOLD;
471    return vn_cs_encoder_get_len(cs) <= threshold;
472 }
473 
474 static struct vn_cs_encoder *
vn_instance_ring_cs_upload_locked(struct vn_instance * instance,const struct vn_cs_encoder * cs)475 vn_instance_ring_cs_upload_locked(struct vn_instance *instance,
476                                   const struct vn_cs_encoder *cs)
477 {
478    VN_TRACE_FUNC();
479    assert(cs->storage_type == VN_CS_ENCODER_STORAGE_POINTER &&
480           cs->buffer_count == 1);
481    const void *cs_data = cs->buffers[0].base;
482    const size_t cs_size = cs->total_committed_size;
483    assert(cs_size == vn_cs_encoder_get_len(cs));
484 
485    struct vn_cs_encoder *upload = &instance->ring.upload;
486    vn_cs_encoder_reset(upload);
487 
488    if (!vn_cs_encoder_reserve(upload, cs_size))
489       return NULL;
490 
491    vn_cs_encoder_write(upload, cs_size, cs_data, cs_size);
492    vn_cs_encoder_commit(upload);
493 
494    if (unlikely(!instance->renderer->info.supports_blob_id_0))
495       vn_instance_wait_roundtrip(instance, upload->current_buffer_roundtrip);
496 
497    return upload;
498 }
499 
500 static VkResult
vn_instance_ring_submit_locked(struct vn_instance * instance,const struct vn_cs_encoder * cs,struct vn_renderer_shmem * extra_shmem,uint32_t * ring_seqno)501 vn_instance_ring_submit_locked(struct vn_instance *instance,
502                                const struct vn_cs_encoder *cs,
503                                struct vn_renderer_shmem *extra_shmem,
504                                uint32_t *ring_seqno)
505 {
506    struct vn_ring *ring = &instance->ring.ring;
507 
508    const bool direct = vn_instance_submission_can_direct(instance, cs);
509    if (!direct && cs->storage_type == VN_CS_ENCODER_STORAGE_POINTER) {
510       cs = vn_instance_ring_cs_upload_locked(instance, cs);
511       if (!cs)
512          return VK_ERROR_OUT_OF_HOST_MEMORY;
513       assert(cs->storage_type != VN_CS_ENCODER_STORAGE_POINTER);
514    }
515 
516    struct vn_instance_submission submit;
517    VkResult result =
518       vn_instance_submission_prepare(&submit, cs, ring, extra_shmem, direct);
519    if (result != VK_SUCCESS)
520       return result;
521 
522    uint32_t seqno;
523    const bool notify = vn_ring_submit(ring, submit.submit, submit.cs, &seqno);
524    if (notify) {
525       uint32_t notify_ring_data[8];
526       struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
527          notify_ring_data, sizeof(notify_ring_data));
528       vn_encode_vkNotifyRingMESA(&local_enc, 0, instance->ring.id, seqno, 0);
529       vn_renderer_submit_simple(instance->renderer, notify_ring_data,
530                                 vn_cs_encoder_get_len(&local_enc));
531    }
532 
533    vn_instance_submission_cleanup(&submit);
534 
535    if (ring_seqno)
536       *ring_seqno = seqno;
537 
538    return VK_SUCCESS;
539 }
540 
541 VkResult
vn_instance_ring_submit(struct vn_instance * instance,const struct vn_cs_encoder * cs)542 vn_instance_ring_submit(struct vn_instance *instance,
543                         const struct vn_cs_encoder *cs)
544 {
545    mtx_lock(&instance->ring.mutex);
546    VkResult result = vn_instance_ring_submit_locked(instance, cs, NULL, NULL);
547    mtx_unlock(&instance->ring.mutex);
548 
549    return result;
550 }
551 
552 static struct vn_renderer_shmem *
vn_instance_get_reply_shmem_locked(struct vn_instance * instance,size_t size,void ** out_ptr)553 vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
554                                    size_t size,
555                                    void **out_ptr)
556 {
557    VN_TRACE_FUNC();
558    struct vn_renderer_shmem_pool *pool = &instance->reply_shmem_pool;
559    const struct vn_renderer_shmem *saved_pool_shmem = pool->shmem;
560 
561    size_t offset;
562    struct vn_renderer_shmem *shmem =
563       vn_renderer_shmem_pool_alloc(instance->renderer, pool, size, &offset);
564    if (!shmem)
565       return NULL;
566 
567    assert(shmem == pool->shmem);
568    *out_ptr = shmem->mmap_ptr + offset;
569 
570    if (shmem != saved_pool_shmem) {
571       uint32_t set_reply_command_stream_data[16];
572       struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
573          set_reply_command_stream_data,
574          sizeof(set_reply_command_stream_data));
575       const struct VkCommandStreamDescriptionMESA stream = {
576          .resourceId = shmem->res_id,
577          .size = pool->size,
578       };
579       vn_encode_vkSetReplyCommandStreamMESA(&local_enc, 0, &stream);
580       vn_cs_encoder_commit(&local_enc);
581 
582       /* vn_instance_init_experimental_features calls this before the ring is
583        * created
584        */
585       if (likely(instance->ring.id)) {
586          if (unlikely(!instance->renderer->info.supports_blob_id_0))
587             vn_instance_roundtrip(instance);
588 
589          vn_instance_ring_submit_locked(instance, &local_enc, NULL, NULL);
590       } else {
591          vn_renderer_submit_simple(instance->renderer,
592                                    set_reply_command_stream_data,
593                                    vn_cs_encoder_get_len(&local_enc));
594       }
595    }
596 
597    /* TODO avoid this seek command and go lock-free? */
598    uint32_t seek_reply_command_stream_data[8];
599    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
600       seek_reply_command_stream_data, sizeof(seek_reply_command_stream_data));
601    vn_encode_vkSeekReplyCommandStreamMESA(&local_enc, 0, offset);
602    vn_cs_encoder_commit(&local_enc);
603 
604    /* vn_instance_init_experimental_features calls this before the ring is
605     * created
606     */
607    if (likely(instance->ring.id)) {
608       vn_instance_ring_submit_locked(instance, &local_enc, NULL, NULL);
609    } else {
610       vn_renderer_submit_simple(instance->renderer,
611                                 seek_reply_command_stream_data,
612                                 vn_cs_encoder_get_len(&local_enc));
613    }
614 
615    return shmem;
616 }
617 
618 void
vn_instance_submit_command(struct vn_instance * instance,struct vn_instance_submit_command * submit)619 vn_instance_submit_command(struct vn_instance *instance,
620                            struct vn_instance_submit_command *submit)
621 {
622    void *reply_ptr = NULL;
623    submit->reply_shmem = NULL;
624 
625    mtx_lock(&instance->ring.mutex);
626 
627    if (vn_cs_encoder_is_empty(&submit->command))
628       goto fail;
629    vn_cs_encoder_commit(&submit->command);
630 
631    if (submit->reply_size) {
632       submit->reply_shmem = vn_instance_get_reply_shmem_locked(
633          instance, submit->reply_size, &reply_ptr);
634       if (!submit->reply_shmem)
635          goto fail;
636    }
637 
638    uint32_t ring_seqno;
639    VkResult result = vn_instance_ring_submit_locked(
640       instance, &submit->command, submit->reply_shmem, &ring_seqno);
641 
642    mtx_unlock(&instance->ring.mutex);
643 
644    submit->reply = VN_CS_DECODER_INITIALIZER(reply_ptr, submit->reply_size);
645 
646    if (submit->reply_size && result == VK_SUCCESS)
647       vn_ring_wait(&instance->ring.ring, ring_seqno);
648 
649    return;
650 
651 fail:
652    instance->ring.command_dropped++;
653    mtx_unlock(&instance->ring.mutex);
654 }
655 
656 /* instance commands */
657 
658 VkResult
vn_EnumerateInstanceVersion(uint32_t * pApiVersion)659 vn_EnumerateInstanceVersion(uint32_t *pApiVersion)
660 {
661    *pApiVersion = VN_MAX_API_VERSION;
662    return VK_SUCCESS;
663 }
664 
665 VkResult
vn_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)666 vn_EnumerateInstanceExtensionProperties(const char *pLayerName,
667                                         uint32_t *pPropertyCount,
668                                         VkExtensionProperties *pProperties)
669 {
670    if (pLayerName)
671       return vn_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
672 
673    return vk_enumerate_instance_extension_properties(
674       &vn_instance_supported_extensions, pPropertyCount, pProperties);
675 }
676 
677 VkResult
vn_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)678 vn_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
679                                     VkLayerProperties *pProperties)
680 {
681    *pPropertyCount = 0;
682    return VK_SUCCESS;
683 }
684 
685 VkResult
vn_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)686 vn_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
687                   const VkAllocationCallbacks *pAllocator,
688                   VkInstance *pInstance)
689 {
690    const VkAllocationCallbacks *alloc =
691       pAllocator ? pAllocator : vk_default_allocator();
692    struct vn_instance *instance;
693    VkResult result;
694 
695    vn_debug_init();
696    vn_trace_init();
697 
698    instance = vk_zalloc(alloc, sizeof(*instance), VN_DEFAULT_ALIGN,
699                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
700    if (!instance)
701       return vn_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
702 
703    struct vk_instance_dispatch_table dispatch_table;
704    vk_instance_dispatch_table_from_entrypoints(
705       &dispatch_table, &vn_instance_entrypoints, true);
706    vk_instance_dispatch_table_from_entrypoints(
707       &dispatch_table, &wsi_instance_entrypoints, false);
708    result = vn_instance_base_init(&instance->base,
709                                   &vn_instance_supported_extensions,
710                                   &dispatch_table, pCreateInfo, alloc);
711    if (result != VK_SUCCESS) {
712       vk_free(alloc, instance);
713       return vn_error(NULL, result);
714    }
715 
716    mtx_init(&instance->physical_device.mutex, mtx_plain);
717    mtx_init(&instance->cs_shmem.mutex, mtx_plain);
718 
719    if (!vn_icd_supports_api_version(
720           instance->base.base.app_info.api_version)) {
721       result = VK_ERROR_INCOMPATIBLE_DRIVER;
722       goto fail;
723    }
724 
725    if (pCreateInfo->enabledLayerCount) {
726       result = VK_ERROR_LAYER_NOT_PRESENT;
727       goto fail;
728    }
729 
730    result = vn_instance_init_renderer(instance);
731    if (result != VK_SUCCESS)
732       goto fail;
733 
734    vn_renderer_shmem_pool_init(instance->renderer,
735                                &instance->reply_shmem_pool, 1u << 20);
736 
737    result = vn_instance_init_experimental_features(instance);
738    if (result != VK_SUCCESS)
739       goto fail;
740 
741    result = vn_instance_init_ring(instance);
742    if (result != VK_SUCCESS)
743       goto fail;
744 
745    result = vn_instance_init_renderer_versions(instance);
746    if (result != VK_SUCCESS)
747       goto fail;
748 
749    vn_renderer_shmem_pool_init(instance->renderer, &instance->cs_shmem.pool,
750                                8u << 20);
751 
752    VkInstanceCreateInfo local_create_info = *pCreateInfo;
753    local_create_info.ppEnabledExtensionNames = NULL;
754    local_create_info.enabledExtensionCount = 0;
755    pCreateInfo = &local_create_info;
756 
757    VkApplicationInfo local_app_info;
758    if (instance->base.base.app_info.api_version <
759        instance->renderer_api_version) {
760       if (pCreateInfo->pApplicationInfo) {
761          local_app_info = *pCreateInfo->pApplicationInfo;
762          local_app_info.apiVersion = instance->renderer_api_version;
763       } else {
764          local_app_info = (const VkApplicationInfo){
765             .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
766             .apiVersion = instance->renderer_api_version,
767          };
768       }
769       local_create_info.pApplicationInfo = &local_app_info;
770    }
771 
772    VkInstance instance_handle = vn_instance_to_handle(instance);
773    result =
774       vn_call_vkCreateInstance(instance, pCreateInfo, NULL, &instance_handle);
775    if (result != VK_SUCCESS)
776       goto fail;
777 
778    driParseOptionInfo(&instance->available_dri_options, vn_dri_options,
779                       ARRAY_SIZE(vn_dri_options));
780    driParseConfigFiles(&instance->dri_options,
781                        &instance->available_dri_options, 0, "venus", NULL,
782                        NULL, instance->base.base.app_info.app_name,
783                        instance->base.base.app_info.app_version,
784                        instance->base.base.app_info.engine_name,
785                        instance->base.base.app_info.engine_version);
786 
787    *pInstance = instance_handle;
788 
789    return VK_SUCCESS;
790 
791 fail:
792    if (instance->ring.shmem) {
793       uint32_t destroy_ring_data[4];
794       struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
795          destroy_ring_data, sizeof(destroy_ring_data));
796       vn_encode_vkDestroyRingMESA(&local_enc, 0, instance->ring.id);
797       vn_renderer_submit_simple(instance->renderer, destroy_ring_data,
798                                 vn_cs_encoder_get_len(&local_enc));
799 
800       mtx_destroy(&instance->ring.roundtrip_mutex);
801       vn_cs_encoder_fini(&instance->ring.upload);
802       vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
803       vn_ring_fini(&instance->ring.ring);
804       mtx_destroy(&instance->ring.mutex);
805    }
806 
807    vn_renderer_shmem_pool_fini(instance->renderer,
808                                &instance->reply_shmem_pool);
809 
810    if (instance->renderer)
811       vn_renderer_destroy(instance->renderer, alloc);
812 
813    mtx_destroy(&instance->physical_device.mutex);
814    mtx_destroy(&instance->cs_shmem.mutex);
815 
816    vn_instance_base_fini(&instance->base);
817    vk_free(alloc, instance);
818 
819    return vn_error(NULL, result);
820 }
821 
822 void
vn_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)823 vn_DestroyInstance(VkInstance _instance,
824                    const VkAllocationCallbacks *pAllocator)
825 {
826    struct vn_instance *instance = vn_instance_from_handle(_instance);
827    const VkAllocationCallbacks *alloc =
828       pAllocator ? pAllocator : &instance->base.base.alloc;
829 
830    if (!instance)
831       return;
832 
833    if (instance->physical_device.initialized) {
834       for (uint32_t i = 0; i < instance->physical_device.device_count; i++)
835          vn_physical_device_fini(&instance->physical_device.devices[i]);
836       vk_free(alloc, instance->physical_device.devices);
837       vk_free(alloc, instance->physical_device.groups);
838    }
839    mtx_destroy(&instance->physical_device.mutex);
840 
841    vn_call_vkDestroyInstance(instance, _instance, NULL);
842 
843    vn_renderer_shmem_pool_fini(instance->renderer, &instance->cs_shmem.pool);
844    mtx_destroy(&instance->cs_shmem.mutex);
845 
846    uint32_t destroy_ring_data[4];
847    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
848       destroy_ring_data, sizeof(destroy_ring_data));
849    vn_encode_vkDestroyRingMESA(&local_enc, 0, instance->ring.id);
850    vn_renderer_submit_simple(instance->renderer, destroy_ring_data,
851                              vn_cs_encoder_get_len(&local_enc));
852 
853    mtx_destroy(&instance->ring.roundtrip_mutex);
854    vn_cs_encoder_fini(&instance->ring.upload);
855    vn_ring_fini(&instance->ring.ring);
856    mtx_destroy(&instance->ring.mutex);
857    vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
858 
859    vn_renderer_shmem_pool_fini(instance->renderer,
860                                &instance->reply_shmem_pool);
861 
862    vn_renderer_destroy(instance->renderer, alloc);
863 
864    driDestroyOptionCache(&instance->dri_options);
865    driDestroyOptionInfo(&instance->available_dri_options);
866 
867    vn_instance_base_fini(&instance->base);
868    vk_free(alloc, instance);
869 }
870 
871 PFN_vkVoidFunction
vn_GetInstanceProcAddr(VkInstance _instance,const char * pName)872 vn_GetInstanceProcAddr(VkInstance _instance, const char *pName)
873 {
874    struct vn_instance *instance = vn_instance_from_handle(_instance);
875    return vk_instance_get_proc_addr(&instance->base.base,
876                                     &vn_instance_entrypoints, pName);
877 }
878