1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_buffer.h"
12 
13 #include "venus-protocol/vn_protocol_driver_buffer.h"
14 #include "venus-protocol/vn_protocol_driver_buffer_view.h"
15 
16 #include "vn_android.h"
17 #include "vn_device.h"
18 #include "vn_device_memory.h"
19 
20 /* buffer commands */
21 
22 /* mandatory buffer create infos to cache */
23 static const VkBufferCreateInfo cache_infos[] = {
24    {
25       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
26       .size = 1,
27       .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
28       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
29    },
30    {
31       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
32       .size = 1,
33       .usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
34       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
35    },
36    {
37       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
38       .size = 1,
39       .usage =
40          VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
41       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
42    },
43    {
44       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
45       .size = 1,
46       .usage =
47          VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
48       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
49    },
50 };
51 
52 static inline bool
vn_buffer_create_info_can_be_cached(const VkBufferCreateInfo * create_info)53 vn_buffer_create_info_can_be_cached(const VkBufferCreateInfo *create_info)
54 {
55    /* cache only VK_SHARING_MODE_EXCLUSIVE and without pNext for simplicity */
56    return (create_info->pNext == NULL) &&
57           (create_info->sharingMode == VK_SHARING_MODE_EXCLUSIVE);
58 }
59 
60 static VkResult
vn_buffer_cache_entries_create(struct vn_device * dev,struct vn_buffer_cache_entry ** out_entries,uint32_t * out_entry_count)61 vn_buffer_cache_entries_create(struct vn_device *dev,
62                                struct vn_buffer_cache_entry **out_entries,
63                                uint32_t *out_entry_count)
64 {
65    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
66    VkDevice dev_handle = vn_device_to_handle(dev);
67    struct vn_buffer_cache_entry *entries;
68    const uint32_t entry_count = ARRAY_SIZE(cache_infos);
69    VkResult result;
70 
71    entries = vk_zalloc(alloc, sizeof(*entries) * entry_count,
72                        VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
73    if (!entries)
74       return VK_ERROR_OUT_OF_HOST_MEMORY;
75 
76    for (uint32_t i = 0; i < entry_count; i++) {
77       VkBuffer buf_handle = VK_NULL_HANDLE;
78       struct vn_buffer *buf = NULL;
79 
80       assert(vn_buffer_create_info_can_be_cached(&cache_infos[i]));
81 
82       result =
83          vn_CreateBuffer(dev_handle, &cache_infos[i], alloc, &buf_handle);
84       if (result != VK_SUCCESS) {
85          vk_free(alloc, entries);
86          return result;
87       }
88 
89       buf = vn_buffer_from_handle(buf_handle);
90 
91       /* TODO remove below after VK_KHR_maintenance4 is available */
92       if (buf->requirements.memory.memoryRequirements.alignment <
93           buf->requirements.memory.memoryRequirements.size) {
94          vk_free(alloc, entries);
95          *out_entries = entries;
96          *out_entry_count = entry_count;
97          return VK_SUCCESS;
98       }
99 
100       entries[i].create_info = &cache_infos[i];
101       entries[i].requirements.memory = buf->requirements.memory;
102       entries[i].requirements.dedicated = buf->requirements.dedicated;
103 
104       vn_DestroyBuffer(dev_handle, buf_handle, alloc);
105    }
106 
107    *out_entries = entries;
108    *out_entry_count = entry_count;
109    return VK_SUCCESS;
110 }
111 
112 static void
vn_buffer_cache_entries_destroy(struct vn_device * dev,struct vn_buffer_cache_entry * entries)113 vn_buffer_cache_entries_destroy(struct vn_device *dev,
114                                 struct vn_buffer_cache_entry *entries)
115 {
116    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
117 
118    if (entries)
119       vk_free(alloc, entries);
120 }
121 
122 static VkResult
vn_buffer_get_max_buffer_size(struct vn_device * dev,uint64_t * out_max_buffer_size)123 vn_buffer_get_max_buffer_size(struct vn_device *dev,
124                               uint64_t *out_max_buffer_size)
125 {
126    /* TODO use VK_KHR_maintenance4 when available */
127    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
128    VkDevice dev_handle = vn_device_to_handle(dev);
129    VkBuffer buf_handle;
130    VkBufferCreateInfo create_info = {
131       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
132       .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
133       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
134    };
135    uint64_t max_buffer_size = 0;
136    uint8_t begin = 0;
137    uint8_t end = 64;
138 
139    while (begin < end) {
140       uint8_t mid = (begin + end) >> 1;
141       create_info.size = 1ull << mid;
142       if (vn_CreateBuffer(dev_handle, &create_info, alloc, &buf_handle) ==
143           VK_SUCCESS) {
144          vn_DestroyBuffer(dev_handle, buf_handle, alloc);
145          max_buffer_size = create_info.size;
146          begin = mid + 1;
147       } else {
148          end = mid;
149       }
150    }
151 
152    *out_max_buffer_size = max_buffer_size;
153    return VK_SUCCESS;
154 }
155 
156 VkResult
vn_buffer_cache_init(struct vn_device * dev)157 vn_buffer_cache_init(struct vn_device *dev)
158 {
159    uint32_t ahb_mem_type_bits = 0;
160    uint64_t max_buffer_size = 0;
161    struct vn_buffer_cache_entry *entries = NULL;
162    uint32_t entry_count = 0;
163    VkResult result;
164 
165    if (dev->base.base.enabled_extensions
166           .ANDROID_external_memory_android_hardware_buffer) {
167       result =
168          vn_android_get_ahb_buffer_memory_type_bits(dev, &ahb_mem_type_bits);
169       if (result != VK_SUCCESS)
170          return result;
171    }
172 
173    result = vn_buffer_get_max_buffer_size(dev, &max_buffer_size);
174    if (result != VK_SUCCESS)
175       return result;
176 
177    result = vn_buffer_cache_entries_create(dev, &entries, &entry_count);
178    if (result != VK_SUCCESS)
179       return result;
180 
181    dev->buffer_cache.ahb_mem_type_bits = ahb_mem_type_bits;
182    dev->buffer_cache.max_buffer_size = max_buffer_size;
183    dev->buffer_cache.entries = entries;
184    dev->buffer_cache.entry_count = entry_count;
185    return VK_SUCCESS;
186 }
187 
188 void
vn_buffer_cache_fini(struct vn_device * dev)189 vn_buffer_cache_fini(struct vn_device *dev)
190 {
191    vn_buffer_cache_entries_destroy(dev, dev->buffer_cache.entries);
192 }
193 
194 static bool
vn_buffer_cache_get_memory_requirements(struct vn_buffer_cache * cache,const VkBufferCreateInfo * create_info,struct vn_buffer_memory_requirements * out)195 vn_buffer_cache_get_memory_requirements(
196    struct vn_buffer_cache *cache,
197    const VkBufferCreateInfo *create_info,
198    struct vn_buffer_memory_requirements *out)
199 {
200    if (create_info->size > cache->max_buffer_size)
201       return false;
202 
203    if (!vn_buffer_create_info_can_be_cached(create_info))
204       return false;
205 
206    /* 12.7. Resource Memory Association
207     *
208     * The memoryTypeBits member is identical for all VkBuffer objects created
209     * with the same value for the flags and usage members in the
210     * VkBufferCreateInfo structure and the handleTypes member of the
211     * VkExternalMemoryBufferCreateInfo structure passed to vkCreateBuffer.
212     * Further, if usage1 and usage2 of type VkBufferUsageFlags are such that
213     * the bits set in usage2 are a subset of the bits set in usage1, and they
214     * have the same flags and VkExternalMemoryBufferCreateInfo::handleTypes,
215     * then the bits set in memoryTypeBits returned for usage1 must be a subset
216     * of the bits set in memoryTypeBits returned for usage2, for all values of
217     * flags.
218     */
219    for (uint32_t i = 0; i < cache->entry_count; i++) {
220       const struct vn_buffer_cache_entry *entry = &cache->entries[i];
221       if ((entry->create_info->flags == create_info->flags) &&
222           ((entry->create_info->usage & create_info->usage) ==
223            create_info->usage)) {
224          *out = entry->requirements;
225 
226          /* TODO remove the comment after VK_KHR_maintenance4 is available
227           *
228           * This is based on below implementation defined behavior:
229           *
230           *    req.size <= align64(info.size, req.alignment)
231           */
232          out->memory.memoryRequirements.size = align64(
233             create_info->size, out->memory.memoryRequirements.alignment);
234          return true;
235       }
236    }
237 
238    return false;
239 }
240 
241 static VkResult
vn_buffer_init(struct vn_device * dev,const VkBufferCreateInfo * create_info,struct vn_buffer * buf)242 vn_buffer_init(struct vn_device *dev,
243                const VkBufferCreateInfo *create_info,
244                struct vn_buffer *buf)
245 {
246    VkDevice dev_handle = vn_device_to_handle(dev);
247    VkBuffer buf_handle = vn_buffer_to_handle(buf);
248    VkResult result;
249 
250    if (vn_buffer_cache_get_memory_requirements(
251           &dev->buffer_cache, create_info, &buf->requirements)) {
252       vn_async_vkCreateBuffer(dev->instance, dev_handle, create_info, NULL,
253                               &buf_handle);
254       return VK_SUCCESS;
255    }
256 
257    result = vn_call_vkCreateBuffer(dev->instance, dev_handle, create_info,
258                                    NULL, &buf_handle);
259    if (result != VK_SUCCESS)
260       return result;
261 
262    buf->requirements.memory.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
263    buf->requirements.memory.pNext = &buf->requirements.dedicated;
264    buf->requirements.dedicated.sType =
265       VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS;
266    buf->requirements.dedicated.pNext = NULL;
267 
268    vn_call_vkGetBufferMemoryRequirements2(
269       dev->instance, dev_handle,
270       &(VkBufferMemoryRequirementsInfo2){
271          .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
272          .buffer = buf_handle,
273       },
274       &buf->requirements.memory);
275 
276    return VK_SUCCESS;
277 }
278 
279 VkResult
vn_buffer_create(struct vn_device * dev,const VkBufferCreateInfo * create_info,const VkAllocationCallbacks * alloc,struct vn_buffer ** out_buf)280 vn_buffer_create(struct vn_device *dev,
281                  const VkBufferCreateInfo *create_info,
282                  const VkAllocationCallbacks *alloc,
283                  struct vn_buffer **out_buf)
284 {
285    struct vn_buffer *buf = NULL;
286    VkResult result;
287 
288    buf = vk_zalloc(alloc, sizeof(*buf), VN_DEFAULT_ALIGN,
289                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
290    if (!buf)
291       return VK_ERROR_OUT_OF_HOST_MEMORY;
292 
293    vn_object_base_init(&buf->base, VK_OBJECT_TYPE_BUFFER, &dev->base);
294 
295    result = vn_buffer_init(dev, create_info, buf);
296    if (result != VK_SUCCESS) {
297       vn_object_base_fini(&buf->base);
298       vk_free(alloc, buf);
299       return result;
300    }
301 
302    *out_buf = buf;
303 
304    return VK_SUCCESS;
305 }
306 
307 VkResult
vn_CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)308 vn_CreateBuffer(VkDevice device,
309                 const VkBufferCreateInfo *pCreateInfo,
310                 const VkAllocationCallbacks *pAllocator,
311                 VkBuffer *pBuffer)
312 {
313    struct vn_device *dev = vn_device_from_handle(device);
314    const VkAllocationCallbacks *alloc =
315       pAllocator ? pAllocator : &dev->base.base.alloc;
316    struct vn_buffer *buf = NULL;
317    VkResult result;
318 
319    const VkExternalMemoryBufferCreateInfo *external_info =
320       vk_find_struct_const(pCreateInfo->pNext,
321                            EXTERNAL_MEMORY_BUFFER_CREATE_INFO);
322    const bool ahb_info =
323       external_info &&
324       external_info->handleTypes ==
325          VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
326 
327    if (ahb_info)
328       result = vn_android_buffer_from_ahb(dev, pCreateInfo, alloc, &buf);
329    else
330       result = vn_buffer_create(dev, pCreateInfo, alloc, &buf);
331 
332    if (result != VK_SUCCESS)
333       return vn_error(dev->instance, result);
334 
335    *pBuffer = vn_buffer_to_handle(buf);
336 
337    return VK_SUCCESS;
338 }
339 
340 void
vn_DestroyBuffer(VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)341 vn_DestroyBuffer(VkDevice device,
342                  VkBuffer buffer,
343                  const VkAllocationCallbacks *pAllocator)
344 {
345    struct vn_device *dev = vn_device_from_handle(device);
346    struct vn_buffer *buf = vn_buffer_from_handle(buffer);
347    const VkAllocationCallbacks *alloc =
348       pAllocator ? pAllocator : &dev->base.base.alloc;
349 
350    if (!buf)
351       return;
352 
353    vn_async_vkDestroyBuffer(dev->instance, device, buffer, NULL);
354 
355    vn_object_base_fini(&buf->base);
356    vk_free(alloc, buf);
357 }
358 
359 VkDeviceAddress
vn_GetBufferDeviceAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)360 vn_GetBufferDeviceAddress(VkDevice device,
361                           const VkBufferDeviceAddressInfo *pInfo)
362 {
363    struct vn_device *dev = vn_device_from_handle(device);
364 
365    return vn_call_vkGetBufferDeviceAddress(dev->instance, device, pInfo);
366 }
367 
368 uint64_t
vn_GetBufferOpaqueCaptureAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)369 vn_GetBufferOpaqueCaptureAddress(VkDevice device,
370                                  const VkBufferDeviceAddressInfo *pInfo)
371 {
372    struct vn_device *dev = vn_device_from_handle(device);
373 
374    return vn_call_vkGetBufferOpaqueCaptureAddress(dev->instance, device,
375                                                   pInfo);
376 }
377 
378 void
vn_GetBufferMemoryRequirements2(VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)379 vn_GetBufferMemoryRequirements2(VkDevice device,
380                                 const VkBufferMemoryRequirementsInfo2 *pInfo,
381                                 VkMemoryRequirements2 *pMemoryRequirements)
382 {
383    const struct vn_buffer *buf = vn_buffer_from_handle(pInfo->buffer);
384    union {
385       VkBaseOutStructure *pnext;
386       VkMemoryRequirements2 *two;
387       VkMemoryDedicatedRequirements *dedicated;
388    } u = { .two = pMemoryRequirements };
389 
390    while (u.pnext) {
391       switch (u.pnext->sType) {
392       case VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2:
393          u.two->memoryRequirements =
394             buf->requirements.memory.memoryRequirements;
395          break;
396       case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
397          u.dedicated->prefersDedicatedAllocation =
398             buf->requirements.dedicated.prefersDedicatedAllocation;
399          u.dedicated->requiresDedicatedAllocation =
400             buf->requirements.dedicated.requiresDedicatedAllocation;
401          break;
402       default:
403          break;
404       }
405       u.pnext = u.pnext->pNext;
406    }
407 }
408 
409 VkResult
vn_BindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)410 vn_BindBufferMemory2(VkDevice device,
411                      uint32_t bindInfoCount,
412                      const VkBindBufferMemoryInfo *pBindInfos)
413 {
414    struct vn_device *dev = vn_device_from_handle(device);
415    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
416 
417    VkBindBufferMemoryInfo *local_infos = NULL;
418    for (uint32_t i = 0; i < bindInfoCount; i++) {
419       const VkBindBufferMemoryInfo *info = &pBindInfos[i];
420       struct vn_device_memory *mem =
421          vn_device_memory_from_handle(info->memory);
422       if (!mem->base_memory)
423          continue;
424 
425       if (!local_infos) {
426          const size_t size = sizeof(*local_infos) * bindInfoCount;
427          local_infos = vk_alloc(alloc, size, VN_DEFAULT_ALIGN,
428                                 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
429          if (!local_infos)
430             return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
431 
432          memcpy(local_infos, pBindInfos, size);
433       }
434 
435       local_infos[i].memory = vn_device_memory_to_handle(mem->base_memory);
436       local_infos[i].memoryOffset += mem->base_offset;
437    }
438    if (local_infos)
439       pBindInfos = local_infos;
440 
441    vn_async_vkBindBufferMemory2(dev->instance, device, bindInfoCount,
442                                 pBindInfos);
443 
444    vk_free(alloc, local_infos);
445 
446    return VK_SUCCESS;
447 }
448 
449 /* buffer view commands */
450 
451 VkResult
vn_CreateBufferView(VkDevice device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)452 vn_CreateBufferView(VkDevice device,
453                     const VkBufferViewCreateInfo *pCreateInfo,
454                     const VkAllocationCallbacks *pAllocator,
455                     VkBufferView *pView)
456 {
457    struct vn_device *dev = vn_device_from_handle(device);
458    const VkAllocationCallbacks *alloc =
459       pAllocator ? pAllocator : &dev->base.base.alloc;
460 
461    struct vn_buffer_view *view =
462       vk_zalloc(alloc, sizeof(*view), VN_DEFAULT_ALIGN,
463                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
464    if (!view)
465       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
466 
467    vn_object_base_init(&view->base, VK_OBJECT_TYPE_BUFFER_VIEW, &dev->base);
468 
469    VkBufferView view_handle = vn_buffer_view_to_handle(view);
470    vn_async_vkCreateBufferView(dev->instance, device, pCreateInfo, NULL,
471                                &view_handle);
472 
473    *pView = view_handle;
474 
475    return VK_SUCCESS;
476 }
477 
478 void
vn_DestroyBufferView(VkDevice device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)479 vn_DestroyBufferView(VkDevice device,
480                      VkBufferView bufferView,
481                      const VkAllocationCallbacks *pAllocator)
482 {
483    struct vn_device *dev = vn_device_from_handle(device);
484    struct vn_buffer_view *view = vn_buffer_view_from_handle(bufferView);
485    const VkAllocationCallbacks *alloc =
486       pAllocator ? pAllocator : &dev->base.base.alloc;
487 
488    if (!view)
489       return;
490 
491    vn_async_vkDestroyBufferView(dev->instance, device, bufferView, NULL);
492 
493    vn_object_base_fini(&view->base);
494    vk_free(alloc, view);
495 }
496