1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_device_memory.h"
12 
13 #include "venus-protocol/vn_protocol_driver_device_memory.h"
14 #include "venus-protocol/vn_protocol_driver_transport.h"
15 
16 #include "vn_android.h"
17 #include "vn_buffer.h"
18 #include "vn_device.h"
19 #include "vn_image.h"
20 #include "vn_physical_device.h"
21 
22 /* device memory commands */
23 
24 static VkResult
vn_device_memory_simple_alloc(struct vn_device * dev,uint32_t mem_type_index,VkDeviceSize size,struct vn_device_memory ** out_mem)25 vn_device_memory_simple_alloc(struct vn_device *dev,
26                               uint32_t mem_type_index,
27                               VkDeviceSize size,
28                               struct vn_device_memory **out_mem)
29 {
30    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
31 
32    struct vn_device_memory *mem =
33       vk_zalloc(alloc, sizeof(*mem), VN_DEFAULT_ALIGN,
34                 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
35    if (!mem)
36       return VK_ERROR_OUT_OF_HOST_MEMORY;
37 
38    vn_object_base_init(&mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY, &dev->base);
39    mem->size = size;
40 
41    VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
42    VkResult result = vn_call_vkAllocateMemory(
43       dev->instance, vn_device_to_handle(dev),
44       &(const VkMemoryAllocateInfo){
45          .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
46          .allocationSize = size,
47          .memoryTypeIndex = mem_type_index,
48       },
49       NULL, &mem_handle);
50    if (result != VK_SUCCESS) {
51       vn_object_base_fini(&mem->base);
52       vk_free(alloc, mem);
53       return result;
54    }
55 
56    const VkPhysicalDeviceMemoryProperties *mem_props =
57       &dev->physical_device->memory_properties.memoryProperties;
58    const VkMemoryType *mem_type = &mem_props->memoryTypes[mem_type_index];
59    result = vn_renderer_bo_create_from_device_memory(
60       dev->renderer, mem->size, mem->base.id, mem_type->propertyFlags, 0,
61       &mem->base_bo);
62    if (result != VK_SUCCESS) {
63       vn_async_vkFreeMemory(dev->instance, vn_device_to_handle(dev),
64                             mem_handle, NULL);
65       vn_object_base_fini(&mem->base);
66       vk_free(alloc, mem);
67       return result;
68    }
69    vn_instance_roundtrip(dev->instance);
70 
71    *out_mem = mem;
72 
73    return VK_SUCCESS;
74 }
75 
76 static void
vn_device_memory_simple_free(struct vn_device * dev,struct vn_device_memory * mem)77 vn_device_memory_simple_free(struct vn_device *dev,
78                              struct vn_device_memory *mem)
79 {
80    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
81 
82    if (mem->base_bo)
83       vn_renderer_bo_unref(dev->renderer, mem->base_bo);
84 
85    vn_async_vkFreeMemory(dev->instance, vn_device_to_handle(dev),
86                          vn_device_memory_to_handle(mem), NULL);
87    vn_object_base_fini(&mem->base);
88    vk_free(alloc, mem);
89 }
90 
91 void
vn_device_memory_pool_fini(struct vn_device * dev,uint32_t mem_type_index)92 vn_device_memory_pool_fini(struct vn_device *dev, uint32_t mem_type_index)
93 {
94    struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
95    if (pool->memory)
96       vn_device_memory_simple_free(dev, pool->memory);
97    mtx_destroy(&pool->mutex);
98 }
99 
100 static VkResult
vn_device_memory_pool_grow_locked(struct vn_device * dev,uint32_t mem_type_index,VkDeviceSize size)101 vn_device_memory_pool_grow_locked(struct vn_device *dev,
102                                   uint32_t mem_type_index,
103                                   VkDeviceSize size)
104 {
105    struct vn_device_memory *mem;
106    VkResult result =
107       vn_device_memory_simple_alloc(dev, mem_type_index, size, &mem);
108    if (result != VK_SUCCESS)
109       return result;
110 
111    struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
112    if (pool->memory) {
113       const bool bo_destroyed =
114          vn_renderer_bo_unref(dev->renderer, pool->memory->base_bo);
115       pool->memory->base_bo = NULL;
116 
117       /* we use pool->memory's base_bo to keep it alive */
118       if (bo_destroyed)
119          vn_device_memory_simple_free(dev, pool->memory);
120    }
121 
122    pool->memory = mem;
123    pool->used = 0;
124 
125    return VK_SUCCESS;
126 }
127 
128 static VkResult
vn_device_memory_pool_alloc(struct vn_device * dev,uint32_t mem_type_index,VkDeviceSize size,struct vn_device_memory ** base_mem,struct vn_renderer_bo ** base_bo,VkDeviceSize * base_offset)129 vn_device_memory_pool_alloc(struct vn_device *dev,
130                             uint32_t mem_type_index,
131                             VkDeviceSize size,
132                             struct vn_device_memory **base_mem,
133                             struct vn_renderer_bo **base_bo,
134                             VkDeviceSize *base_offset)
135 {
136    const VkDeviceSize pool_size = 16 * 1024 * 1024;
137    /* XXX We don't know the alignment requirement.  We should probably use 64K
138     * because some GPUs have 64K pages.
139     */
140    const VkDeviceSize pool_align = 4096;
141    struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
142 
143    assert(size <= pool_size);
144 
145    mtx_lock(&pool->mutex);
146 
147    if (!pool->memory || pool->used + size > pool_size) {
148       VkResult result =
149          vn_device_memory_pool_grow_locked(dev, mem_type_index, pool_size);
150       if (result != VK_SUCCESS) {
151          mtx_unlock(&pool->mutex);
152          return result;
153       }
154    }
155 
156    /* we use base_bo to keep base_mem alive */
157    *base_mem = pool->memory;
158    *base_bo = vn_renderer_bo_ref(dev->renderer, pool->memory->base_bo);
159 
160    *base_offset = pool->used;
161    pool->used += align64(size, pool_align);
162 
163    mtx_unlock(&pool->mutex);
164 
165    return VK_SUCCESS;
166 }
167 
168 static void
vn_device_memory_pool_free(struct vn_device * dev,struct vn_device_memory * base_mem,struct vn_renderer_bo * base_bo)169 vn_device_memory_pool_free(struct vn_device *dev,
170                            struct vn_device_memory *base_mem,
171                            struct vn_renderer_bo *base_bo)
172 {
173    /* we use base_bo to keep base_mem alive */
174    if (vn_renderer_bo_unref(dev->renderer, base_bo))
175       vn_device_memory_simple_free(dev, base_mem);
176 }
177 
178 static bool
vn_device_memory_should_suballocate(const VkMemoryAllocateInfo * alloc_info,const VkMemoryType * mem_type)179 vn_device_memory_should_suballocate(const VkMemoryAllocateInfo *alloc_info,
180                                     const VkMemoryType *mem_type)
181 {
182    /* We should not support suballocations because apps can do better.  But
183     * each BO takes up a KVM memslot currently and some CTS tests exhausts
184     * them.  This might not be needed on newer (host) kernels where there are
185     * many more KVM memslots.
186     */
187 
188    /* consider host-visible memory only */
189    if (!(mem_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
190       return false;
191 
192    /* reject larger allocations */
193    if (alloc_info->allocationSize > 64 * 1024)
194       return false;
195 
196    /* reject if there is any pnext struct other than
197     * VkMemoryDedicatedAllocateInfo, or if dedicated allocation is required
198     */
199    if (alloc_info->pNext) {
200       const VkMemoryDedicatedAllocateInfo *dedicated = alloc_info->pNext;
201       if (dedicated->sType !=
202              VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO ||
203           dedicated->pNext)
204          return false;
205 
206       const struct vn_image *img = vn_image_from_handle(dedicated->image);
207       if (img) {
208          for (uint32_t i = 0; i < ARRAY_SIZE(img->dedicated_requirements);
209               i++) {
210             if (img->dedicated_requirements[i].requiresDedicatedAllocation)
211                return false;
212          }
213       }
214 
215       const struct vn_buffer *buf = vn_buffer_from_handle(dedicated->buffer);
216       if (buf && buf->dedicated_requirements.requiresDedicatedAllocation)
217          return false;
218    }
219 
220    return true;
221 }
222 
223 VkResult
vn_device_memory_import_dma_buf(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,bool force_unmappable,int fd)224 vn_device_memory_import_dma_buf(struct vn_device *dev,
225                                 struct vn_device_memory *mem,
226                                 const VkMemoryAllocateInfo *alloc_info,
227                                 bool force_unmappable,
228                                 int fd)
229 {
230    VkDevice device = vn_device_to_handle(dev);
231    VkDeviceMemory memory = vn_device_memory_to_handle(mem);
232    const VkPhysicalDeviceMemoryProperties *mem_props =
233       &dev->physical_device->memory_properties.memoryProperties;
234    VkMemoryPropertyFlags mem_flags =
235       mem_props->memoryTypes[alloc_info->memoryTypeIndex].propertyFlags;
236    struct vn_renderer_bo *bo;
237    VkResult result = VK_SUCCESS;
238 
239    if (force_unmappable)
240       mem_flags &= ~VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
241 
242    result = vn_renderer_bo_create_from_dma_buf(
243       dev->renderer, alloc_info->allocationSize, fd, mem_flags, &bo);
244    if (result != VK_SUCCESS)
245       return result;
246 
247    vn_instance_roundtrip(dev->instance);
248 
249    /* XXX fix VkImportMemoryResourceInfoMESA to support memory planes */
250    const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
251       .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
252       .pNext = alloc_info->pNext,
253       .resourceId = bo->res_id,
254    };
255    const VkMemoryAllocateInfo memory_allocate_info = {
256       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
257       .pNext = &import_memory_resource_info,
258       .allocationSize = alloc_info->allocationSize,
259       .memoryTypeIndex = alloc_info->memoryTypeIndex,
260    };
261    result = vn_call_vkAllocateMemory(dev->instance, device,
262                                      &memory_allocate_info, NULL, &memory);
263    if (result != VK_SUCCESS) {
264       vn_renderer_bo_unref(dev->renderer, bo);
265       return result;
266    }
267 
268    /* need to close import fd on success to avoid fd leak */
269    close(fd);
270    mem->base_bo = bo;
271 
272    return VK_SUCCESS;
273 }
274 
275 static VkResult
vn_device_memory_alloc(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,bool need_bo,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles)276 vn_device_memory_alloc(struct vn_device *dev,
277                        struct vn_device_memory *mem,
278                        const VkMemoryAllocateInfo *alloc_info,
279                        bool need_bo,
280                        VkMemoryPropertyFlags flags,
281                        VkExternalMemoryHandleTypeFlags external_handles)
282 {
283    VkDevice dev_handle = vn_device_to_handle(dev);
284    VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
285    VkResult result = vn_call_vkAllocateMemory(dev->instance, dev_handle,
286                                               alloc_info, NULL, &mem_handle);
287    if (result != VK_SUCCESS || !need_bo)
288       return result;
289 
290    result = vn_renderer_bo_create_from_device_memory(
291       dev->renderer, mem->size, mem->base.id, flags, external_handles,
292       &mem->base_bo);
293    if (result != VK_SUCCESS) {
294       vn_async_vkFreeMemory(dev->instance, dev_handle, mem_handle, NULL);
295       return result;
296    }
297 
298    vn_instance_roundtrip(dev->instance);
299 
300    return VK_SUCCESS;
301 }
302 
303 VkResult
vn_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)304 vn_AllocateMemory(VkDevice device,
305                   const VkMemoryAllocateInfo *pAllocateInfo,
306                   const VkAllocationCallbacks *pAllocator,
307                   VkDeviceMemory *pMemory)
308 {
309    struct vn_device *dev = vn_device_from_handle(device);
310    const VkAllocationCallbacks *alloc =
311       pAllocator ? pAllocator : &dev->base.base.alloc;
312 
313    const VkPhysicalDeviceMemoryProperties *mem_props =
314       &dev->physical_device->memory_properties.memoryProperties;
315    const VkMemoryType *mem_type =
316       &mem_props->memoryTypes[pAllocateInfo->memoryTypeIndex];
317 
318    const VkExportMemoryAllocateInfo *export_info = NULL;
319    const VkImportAndroidHardwareBufferInfoANDROID *import_ahb_info = NULL;
320    const VkImportMemoryFdInfoKHR *import_fd_info = NULL;
321    bool export_ahb = false;
322 
323    vk_foreach_struct_const(pnext, pAllocateInfo->pNext) {
324       switch (pnext->sType) {
325       case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
326          export_info = (void *)pnext;
327          if (export_info->handleTypes &
328              VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
329             export_ahb = true;
330          else if (!export_info->handleTypes)
331             export_info = NULL;
332          break;
333       case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
334          import_ahb_info = (void *)pnext;
335          break;
336       case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
337          import_fd_info = (void *)pnext;
338          break;
339       default:
340          break;
341       }
342    }
343 
344    struct vn_device_memory *mem =
345       vk_zalloc(alloc, sizeof(*mem), VN_DEFAULT_ALIGN,
346                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
347    if (!mem)
348       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
349 
350    vn_object_base_init(&mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY, &dev->base);
351    mem->size = pAllocateInfo->allocationSize;
352 
353    VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
354    VkResult result;
355    if (import_ahb_info) {
356       result = vn_android_device_import_ahb(dev, mem, pAllocateInfo, alloc,
357                                             import_ahb_info->buffer);
358    } else if (export_ahb) {
359       result = vn_android_device_allocate_ahb(dev, mem, pAllocateInfo, alloc);
360    } else if (import_fd_info) {
361       result = vn_device_memory_import_dma_buf(dev, mem, pAllocateInfo, false,
362                                                import_fd_info->fd);
363    } else if (export_info) {
364       result = vn_device_memory_alloc(dev, mem, pAllocateInfo, true,
365                                       mem_type->propertyFlags,
366                                       export_info->handleTypes);
367    } else if (vn_device_memory_should_suballocate(pAllocateInfo, mem_type)) {
368       result = vn_device_memory_pool_alloc(
369          dev, pAllocateInfo->memoryTypeIndex, mem->size, &mem->base_memory,
370          &mem->base_bo, &mem->base_offset);
371    } else {
372       const bool need_bo =
373          mem_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
374       result = vn_device_memory_alloc(dev, mem, pAllocateInfo, need_bo,
375                                       mem_type->propertyFlags, 0);
376    }
377    if (result != VK_SUCCESS) {
378       vn_object_base_fini(&mem->base);
379       vk_free(alloc, mem);
380       return vn_error(dev->instance, result);
381    }
382 
383    *pMemory = mem_handle;
384 
385    return VK_SUCCESS;
386 }
387 
388 void
vn_FreeMemory(VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocator)389 vn_FreeMemory(VkDevice device,
390               VkDeviceMemory memory,
391               const VkAllocationCallbacks *pAllocator)
392 {
393    struct vn_device *dev = vn_device_from_handle(device);
394    struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
395    const VkAllocationCallbacks *alloc =
396       pAllocator ? pAllocator : &dev->base.base.alloc;
397 
398    if (!mem)
399       return;
400 
401    if (mem->base_memory) {
402       vn_device_memory_pool_free(dev, mem->base_memory, mem->base_bo);
403    } else {
404       if (mem->base_bo)
405          vn_renderer_bo_unref(dev->renderer, mem->base_bo);
406       vn_async_vkFreeMemory(dev->instance, device, memory, NULL);
407    }
408 
409    if (mem->ahb)
410       vn_android_release_ahb(mem->ahb);
411 
412    vn_object_base_fini(&mem->base);
413    vk_free(alloc, mem);
414 }
415 
416 uint64_t
vn_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)417 vn_GetDeviceMemoryOpaqueCaptureAddress(
418    VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
419 {
420    struct vn_device *dev = vn_device_from_handle(device);
421    ASSERTED struct vn_device_memory *mem =
422       vn_device_memory_from_handle(pInfo->memory);
423 
424    assert(!mem->base_memory);
425    return vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(dev->instance, device,
426                                                         pInfo);
427 }
428 
429 VkResult
vn_MapMemory(VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)430 vn_MapMemory(VkDevice device,
431              VkDeviceMemory memory,
432              VkDeviceSize offset,
433              VkDeviceSize size,
434              VkMemoryMapFlags flags,
435              void **ppData)
436 {
437    struct vn_device *dev = vn_device_from_handle(device);
438    struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
439 
440    void *ptr = vn_renderer_bo_map(dev->renderer, mem->base_bo);
441    if (!ptr)
442       return vn_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
443 
444    mem->map_end = size == VK_WHOLE_SIZE ? mem->size : offset + size;
445 
446    *ppData = ptr + mem->base_offset + offset;
447 
448    return VK_SUCCESS;
449 }
450 
451 void
vn_UnmapMemory(VkDevice device,VkDeviceMemory memory)452 vn_UnmapMemory(VkDevice device, VkDeviceMemory memory)
453 {
454 }
455 
456 VkResult
vn_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)457 vn_FlushMappedMemoryRanges(VkDevice device,
458                            uint32_t memoryRangeCount,
459                            const VkMappedMemoryRange *pMemoryRanges)
460 {
461    struct vn_device *dev = vn_device_from_handle(device);
462 
463    for (uint32_t i = 0; i < memoryRangeCount; i++) {
464       const VkMappedMemoryRange *range = &pMemoryRanges[i];
465       struct vn_device_memory *mem =
466          vn_device_memory_from_handle(range->memory);
467 
468       const VkDeviceSize size = range->size == VK_WHOLE_SIZE
469                                    ? mem->map_end - range->offset
470                                    : range->size;
471       vn_renderer_bo_flush(dev->renderer, mem->base_bo,
472                            mem->base_offset + range->offset, size);
473    }
474 
475    return VK_SUCCESS;
476 }
477 
478 VkResult
vn_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)479 vn_InvalidateMappedMemoryRanges(VkDevice device,
480                                 uint32_t memoryRangeCount,
481                                 const VkMappedMemoryRange *pMemoryRanges)
482 {
483    struct vn_device *dev = vn_device_from_handle(device);
484 
485    for (uint32_t i = 0; i < memoryRangeCount; i++) {
486       const VkMappedMemoryRange *range = &pMemoryRanges[i];
487       struct vn_device_memory *mem =
488          vn_device_memory_from_handle(range->memory);
489 
490       const VkDeviceSize size = range->size == VK_WHOLE_SIZE
491                                    ? mem->map_end - range->offset
492                                    : range->size;
493       vn_renderer_bo_invalidate(dev->renderer, mem->base_bo,
494                                 mem->base_offset + range->offset, size);
495    }
496 
497    return VK_SUCCESS;
498 }
499 
500 void
vn_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory memory,VkDeviceSize * pCommittedMemoryInBytes)501 vn_GetDeviceMemoryCommitment(VkDevice device,
502                              VkDeviceMemory memory,
503                              VkDeviceSize *pCommittedMemoryInBytes)
504 {
505    struct vn_device *dev = vn_device_from_handle(device);
506    ASSERTED struct vn_device_memory *mem =
507       vn_device_memory_from_handle(memory);
508 
509    assert(!mem->base_memory);
510    vn_call_vkGetDeviceMemoryCommitment(dev->instance, device, memory,
511                                        pCommittedMemoryInBytes);
512 }
513 
514 VkResult
vn_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)515 vn_GetMemoryFdKHR(VkDevice device,
516                   const VkMemoryGetFdInfoKHR *pGetFdInfo,
517                   int *pFd)
518 {
519    struct vn_device *dev = vn_device_from_handle(device);
520    struct vn_device_memory *mem =
521       vn_device_memory_from_handle(pGetFdInfo->memory);
522 
523    /* At the moment, we support only the below handle types. */
524    assert(pGetFdInfo->handleType &
525           (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
526            VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
527    assert(!mem->base_memory && mem->base_bo);
528    *pFd = vn_renderer_bo_export_dma_buf(dev->renderer, mem->base_bo);
529    if (*pFd < 0)
530       return vn_error(dev->instance, VK_ERROR_TOO_MANY_OBJECTS);
531 
532    return VK_SUCCESS;
533 }
534 
535 VkResult
vn_get_memory_dma_buf_properties(struct vn_device * dev,int fd,uint64_t * out_alloc_size,uint32_t * out_mem_type_bits)536 vn_get_memory_dma_buf_properties(struct vn_device *dev,
537                                  int fd,
538                                  uint64_t *out_alloc_size,
539                                  uint32_t *out_mem_type_bits)
540 {
541    VkDevice device = vn_device_to_handle(dev);
542    struct vn_renderer_bo *bo = NULL;
543    VkResult result = VK_SUCCESS;
544 
545    result = vn_renderer_bo_create_from_dma_buf(dev->renderer, 0 /* size */,
546                                                fd, 0 /* flags */, &bo);
547    if (result != VK_SUCCESS)
548       return result;
549 
550    vn_instance_roundtrip(dev->instance);
551 
552    VkMemoryResourceAllocationSizeProperties100000MESA alloc_size_props = {
553       .sType =
554          VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA,
555       .pNext = NULL,
556       .allocationSize = 0,
557    };
558    VkMemoryResourcePropertiesMESA props = {
559       .sType = VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA,
560       .pNext =
561          dev->instance->experimental.memoryResourceAllocationSize == VK_TRUE
562             ? &alloc_size_props
563             : NULL,
564       .memoryTypeBits = 0,
565    };
566    result = vn_call_vkGetMemoryResourcePropertiesMESA(dev->instance, device,
567                                                       bo->res_id, &props);
568    vn_renderer_bo_unref(dev->renderer, bo);
569    if (result != VK_SUCCESS)
570       return result;
571 
572    *out_alloc_size = alloc_size_props.allocationSize;
573    *out_mem_type_bits = props.memoryTypeBits;
574 
575    return VK_SUCCESS;
576 }
577 
578 VkResult
vn_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)579 vn_GetMemoryFdPropertiesKHR(VkDevice device,
580                             VkExternalMemoryHandleTypeFlagBits handleType,
581                             int fd,
582                             VkMemoryFdPropertiesKHR *pMemoryFdProperties)
583 {
584    struct vn_device *dev = vn_device_from_handle(device);
585    uint64_t alloc_size = 0;
586    uint32_t mem_type_bits = 0;
587    VkResult result = VK_SUCCESS;
588 
589    if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
590       return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
591 
592    result =
593       vn_get_memory_dma_buf_properties(dev, fd, &alloc_size, &mem_type_bits);
594    if (result != VK_SUCCESS)
595       return vn_error(dev->instance, result);
596 
597    pMemoryFdProperties->memoryTypeBits = mem_type_bits;
598 
599    return VK_SUCCESS;
600 }
601