1 /*
2  * Copyright © 2018 Google, Inc.
3  * Copyright © 2015 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <stdint.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <xf86drm.h>
31 
32 #include "vk_util.h"
33 
34 #include "drm-uapi/msm_drm.h"
35 
36 #include "tu_private.h"
37 
38 static int
tu_drm_get_param(const struct tu_physical_device * dev,uint32_t param,uint64_t * value)39 tu_drm_get_param(const struct tu_physical_device *dev,
40                  uint32_t param,
41                  uint64_t *value)
42 {
43    /* Technically this requires a pipe, but the kernel only supports one pipe
44     * anyway at the time of writing and most of these are clearly pipe
45     * independent. */
46    struct drm_msm_param req = {
47       .pipe = MSM_PIPE_3D0,
48       .param = param,
49    };
50 
51    int ret = drmCommandWriteRead(dev->local_fd, DRM_MSM_GET_PARAM, &req,
52                                  sizeof(req));
53    if (ret)
54       return ret;
55 
56    *value = req.value;
57 
58    return 0;
59 }
60 
61 static int
tu_drm_get_gpu_id(const struct tu_physical_device * dev,uint32_t * id)62 tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id)
63 {
64    uint64_t value;
65    int ret = tu_drm_get_param(dev, MSM_PARAM_GPU_ID, &value);
66    if (ret)
67       return ret;
68 
69    *id = value;
70    return 0;
71 }
72 
73 static int
tu_drm_get_gmem_size(const struct tu_physical_device * dev,uint32_t * size)74 tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size)
75 {
76    uint64_t value;
77    int ret = tu_drm_get_param(dev, MSM_PARAM_GMEM_SIZE, &value);
78    if (ret)
79       return ret;
80 
81    *size = value;
82    return 0;
83 }
84 
85 static int
tu_drm_get_gmem_base(const struct tu_physical_device * dev,uint64_t * base)86 tu_drm_get_gmem_base(const struct tu_physical_device *dev, uint64_t *base)
87 {
88    return tu_drm_get_param(dev, MSM_PARAM_GMEM_BASE, base);
89 }
90 
91 int
tu_drm_submitqueue_new(const struct tu_device * dev,int priority,uint32_t * queue_id)92 tu_drm_submitqueue_new(const struct tu_device *dev,
93                        int priority,
94                        uint32_t *queue_id)
95 {
96    struct drm_msm_submitqueue req = {
97       .flags = 0,
98       .prio = priority,
99    };
100 
101    int ret = drmCommandWriteRead(dev->physical_device->local_fd,
102                                  DRM_MSM_SUBMITQUEUE_NEW, &req, sizeof(req));
103    if (ret)
104       return ret;
105 
106    *queue_id = req.id;
107    return 0;
108 }
109 
110 void
tu_drm_submitqueue_close(const struct tu_device * dev,uint32_t queue_id)111 tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
112 {
113    drmCommandWrite(dev->physical_device->local_fd, DRM_MSM_SUBMITQUEUE_CLOSE,
114                    &queue_id, sizeof(uint32_t));
115 }
116 
117 static void
tu_gem_close(const struct tu_device * dev,uint32_t gem_handle)118 tu_gem_close(const struct tu_device *dev, uint32_t gem_handle)
119 {
120    struct drm_gem_close req = {
121       .handle = gem_handle,
122    };
123 
124    drmIoctl(dev->physical_device->local_fd, DRM_IOCTL_GEM_CLOSE, &req);
125 }
126 
127 /** Helper for DRM_MSM_GEM_INFO, returns 0 on error. */
128 static uint64_t
tu_gem_info(const struct tu_device * dev,uint32_t gem_handle,uint32_t info)129 tu_gem_info(const struct tu_device *dev, uint32_t gem_handle, uint32_t info)
130 {
131    struct drm_msm_gem_info req = {
132       .handle = gem_handle,
133       .info = info,
134    };
135 
136    int ret = drmCommandWriteRead(dev->physical_device->local_fd,
137                                  DRM_MSM_GEM_INFO, &req, sizeof(req));
138    if (ret < 0)
139       return 0;
140 
141    return req.value;
142 }
143 
144 static VkResult
tu_bo_init(struct tu_device * dev,struct tu_bo * bo,uint32_t gem_handle,uint64_t size)145 tu_bo_init(struct tu_device *dev,
146            struct tu_bo *bo,
147            uint32_t gem_handle,
148            uint64_t size)
149 {
150    uint64_t iova = tu_gem_info(dev, gem_handle, MSM_INFO_GET_IOVA);
151    if (!iova) {
152       tu_gem_close(dev, gem_handle);
153       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
154    }
155 
156    *bo = (struct tu_bo) {
157       .gem_handle = gem_handle,
158       .size = size,
159       .iova = iova,
160    };
161 
162    return VK_SUCCESS;
163 }
164 
165 VkResult
tu_bo_init_new(struct tu_device * dev,struct tu_bo * bo,uint64_t size)166 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
167 {
168    /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
169     * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
170     */
171    struct drm_msm_gem_new req = {
172       .size = size,
173       .flags = MSM_BO_WC
174    };
175 
176    int ret = drmCommandWriteRead(dev->physical_device->local_fd,
177                                  DRM_MSM_GEM_NEW, &req, sizeof(req));
178    if (ret)
179       return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
180 
181    return tu_bo_init(dev, bo, req.handle, size);
182 }
183 
184 VkResult
tu_bo_init_dmabuf(struct tu_device * dev,struct tu_bo * bo,uint64_t size,int prime_fd)185 tu_bo_init_dmabuf(struct tu_device *dev,
186                   struct tu_bo *bo,
187                   uint64_t size,
188                   int prime_fd)
189 {
190    /* lseek() to get the real size */
191    off_t real_size = lseek(prime_fd, 0, SEEK_END);
192    lseek(prime_fd, 0, SEEK_SET);
193    if (real_size < 0 || (uint64_t) real_size < size)
194       return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
195 
196    uint32_t gem_handle;
197    int ret = drmPrimeFDToHandle(dev->physical_device->local_fd, prime_fd,
198                                 &gem_handle);
199    if (ret)
200       return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
201 
202    return tu_bo_init(dev, bo, gem_handle, size);
203 }
204 
205 int
tu_bo_export_dmabuf(struct tu_device * dev,struct tu_bo * bo)206 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
207 {
208    int prime_fd;
209    int ret = drmPrimeHandleToFD(dev->physical_device->local_fd, bo->gem_handle,
210                                 DRM_CLOEXEC, &prime_fd);
211 
212    return ret == 0 ? prime_fd : -1;
213 }
214 
215 VkResult
tu_bo_map(struct tu_device * dev,struct tu_bo * bo)216 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
217 {
218    if (bo->map)
219       return VK_SUCCESS;
220 
221    uint64_t offset = tu_gem_info(dev, bo->gem_handle, MSM_INFO_GET_OFFSET);
222    if (!offset)
223       return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
224 
225    /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
226    void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
227                     dev->physical_device->local_fd, offset);
228    if (map == MAP_FAILED)
229       return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
230 
231    bo->map = map;
232    return VK_SUCCESS;
233 }
234 
235 void
tu_bo_finish(struct tu_device * dev,struct tu_bo * bo)236 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
237 {
238    assert(bo->gem_handle);
239 
240    if (bo->map)
241       munmap(bo->map, bo->size);
242 
243    tu_gem_close(dev, bo->gem_handle);
244 }
245 
246 static VkResult
tu_drm_device_init(struct tu_physical_device * device,struct tu_instance * instance,drmDevicePtr drm_device)247 tu_drm_device_init(struct tu_physical_device *device,
248                    struct tu_instance *instance,
249                    drmDevicePtr drm_device)
250 {
251    const char *path = drm_device->nodes[DRM_NODE_RENDER];
252    VkResult result = VK_SUCCESS;
253    drmVersionPtr version;
254    int fd;
255    int master_fd = -1;
256 
257    fd = open(path, O_RDWR | O_CLOEXEC);
258    if (fd < 0) {
259       return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
260                        "failed to open device %s", path);
261    }
262 
263    /* Version 1.3 added MSM_INFO_IOVA. */
264    const int min_version_major = 1;
265    const int min_version_minor = 3;
266 
267    version = drmGetVersion(fd);
268    if (!version) {
269       close(fd);
270       return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
271                        "failed to query kernel driver version for device %s",
272                        path);
273    }
274 
275    if (strcmp(version->name, "msm")) {
276       drmFreeVersion(version);
277       close(fd);
278       return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
279                        "device %s does not use the msm kernel driver", path);
280    }
281 
282    if (version->version_major != min_version_major ||
283        version->version_minor < min_version_minor) {
284       result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
285                          "kernel driver for device %s has version %d.%d, "
286                          "but Vulkan requires version >= %d.%d",
287                          path, version->version_major, version->version_minor,
288                          min_version_major, min_version_minor);
289       drmFreeVersion(version);
290       close(fd);
291       return result;
292    }
293 
294    device->msm_major_version = version->version_major;
295    device->msm_minor_version = version->version_minor;
296 
297    drmFreeVersion(version);
298 
299    if (instance->debug_flags & TU_DEBUG_STARTUP)
300       tu_logi("Found compatible device '%s'.", path);
301 
302    vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
303    device->instance = instance;
304    assert(strlen(path) < ARRAY_SIZE(device->path));
305    strncpy(device->path, path, ARRAY_SIZE(device->path));
306 
307    if (instance->enabled_extensions.KHR_display) {
308       master_fd =
309          open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
310       if (master_fd >= 0) {
311          /* TODO: free master_fd is accel is not working? */
312       }
313    }
314 
315    device->master_fd = master_fd;
316    device->local_fd = fd;
317 
318    if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
319       if (instance->debug_flags & TU_DEBUG_STARTUP)
320          tu_logi("Could not query the GPU ID");
321       result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
322                          "could not get GPU ID");
323       goto fail;
324    }
325 
326    if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
327       if (instance->debug_flags & TU_DEBUG_STARTUP)
328          tu_logi("Could not query the GMEM size");
329       result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
330                          "could not get GMEM size");
331       goto fail;
332    }
333 
334    if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
335       if (instance->debug_flags & TU_DEBUG_STARTUP)
336          tu_logi("Could not query the GMEM size");
337       result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
338                          "could not get GMEM size");
339       goto fail;
340    }
341 
342    return tu_physical_device_init(device, instance);
343 
344 fail:
345    close(fd);
346    if (master_fd != -1)
347       close(master_fd);
348    return result;
349 }
350 
351 VkResult
tu_enumerate_devices(struct tu_instance * instance)352 tu_enumerate_devices(struct tu_instance *instance)
353 {
354    /* TODO: Check for more devices ? */
355    drmDevicePtr devices[8];
356    VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
357    int max_devices;
358 
359    instance->physical_device_count = 0;
360 
361    max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
362 
363    if (instance->debug_flags & TU_DEBUG_STARTUP) {
364       if (max_devices < 0)
365          tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices));
366       else
367          tu_logi("Found %d drm nodes", max_devices);
368    }
369 
370    if (max_devices < 1)
371       return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
372 
373    for (unsigned i = 0; i < (unsigned) max_devices; i++) {
374       if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
375           devices[i]->bustype == DRM_BUS_PLATFORM) {
376 
377          result = tu_drm_device_init(
378             instance->physical_devices + instance->physical_device_count,
379             instance, devices[i]);
380          if (result == VK_SUCCESS)
381             ++instance->physical_device_count;
382          else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
383             break;
384       }
385    }
386    drmFreeDevices(devices, max_devices);
387 
388    return result;
389 }
390 
391 // Queue semaphore functions
392 
393 static void
tu_semaphore_part_destroy(struct tu_device * device,struct tu_semaphore_part * part)394 tu_semaphore_part_destroy(struct tu_device *device,
395                           struct tu_semaphore_part *part)
396 {
397    switch(part->kind) {
398    case TU_SEMAPHORE_NONE:
399       break;
400    case TU_SEMAPHORE_SYNCOBJ:
401       drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
402       break;
403    }
404    part->kind = TU_SEMAPHORE_NONE;
405 }
406 
407 static void
tu_semaphore_remove_temp(struct tu_device * device,struct tu_semaphore * sem)408 tu_semaphore_remove_temp(struct tu_device *device,
409                          struct tu_semaphore *sem)
410 {
411    if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
412       tu_semaphore_part_destroy(device, &sem->temporary);
413    }
414 }
415 
416 static VkResult
tu_get_semaphore_syncobjs(const VkSemaphore * sems,uint32_t sem_count,bool wait,struct drm_msm_gem_submit_syncobj ** out,uint32_t * out_count)417 tu_get_semaphore_syncobjs(const VkSemaphore *sems,
418                           uint32_t sem_count,
419                           bool wait,
420                           struct drm_msm_gem_submit_syncobj **out,
421                           uint32_t *out_count)
422 {
423    uint32_t syncobj_count = 0;
424    struct drm_msm_gem_submit_syncobj *syncobjs;
425 
426    for (uint32_t i = 0; i  < sem_count; ++i) {
427       TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
428 
429       struct tu_semaphore_part *part =
430          sem->temporary.kind != TU_SEMAPHORE_NONE ?
431             &sem->temporary : &sem->permanent;
432 
433       if (part->kind == TU_SEMAPHORE_SYNCOBJ)
434          ++syncobj_count;
435    }
436 
437    *out = NULL;
438    *out_count = syncobj_count;
439    if (!syncobj_count)
440       return VK_SUCCESS;
441 
442    *out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
443    if (!syncobjs)
444       return VK_ERROR_OUT_OF_HOST_MEMORY;
445 
446    for (uint32_t i = 0, j = 0; i  < sem_count; ++i) {
447       TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
448 
449       struct tu_semaphore_part *part =
450          sem->temporary.kind != TU_SEMAPHORE_NONE ?
451             &sem->temporary : &sem->permanent;
452 
453       if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
454          syncobjs[j].handle = part->syncobj;
455          syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
456          ++j;
457       }
458    }
459 
460    return VK_SUCCESS;
461 }
462 
463 static void
tu_semaphores_remove_temp(struct tu_device * device,const VkSemaphore * sems,uint32_t sem_count)464 tu_semaphores_remove_temp(struct tu_device *device,
465                           const VkSemaphore *sems,
466                           uint32_t sem_count)
467 {
468    for (uint32_t i = 0; i  < sem_count; ++i) {
469       TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
470       tu_semaphore_remove_temp(device, sem);
471    }
472 }
473 
474 VkResult
tu_CreateSemaphore(VkDevice _device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)475 tu_CreateSemaphore(VkDevice _device,
476                    const VkSemaphoreCreateInfo *pCreateInfo,
477                    const VkAllocationCallbacks *pAllocator,
478                    VkSemaphore *pSemaphore)
479 {
480    TU_FROM_HANDLE(tu_device, device, _device);
481 
482    struct tu_semaphore *sem =
483          vk_object_alloc(&device->vk, pAllocator, sizeof(*sem),
484                          VK_OBJECT_TYPE_SEMAPHORE);
485    if (!sem)
486       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
487 
488    const VkExportSemaphoreCreateInfo *export =
489       vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
490    VkExternalSemaphoreHandleTypeFlags handleTypes =
491       export ? export->handleTypes : 0;
492 
493    sem->permanent.kind = TU_SEMAPHORE_NONE;
494    sem->temporary.kind = TU_SEMAPHORE_NONE;
495 
496    if (handleTypes) {
497       if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
498           vk_free2(&device->vk.alloc, pAllocator, sem);
499           return VK_ERROR_OUT_OF_HOST_MEMORY;
500       }
501       sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
502    }
503    *pSemaphore = tu_semaphore_to_handle(sem);
504    return VK_SUCCESS;
505 }
506 
507 void
tu_DestroySemaphore(VkDevice _device,VkSemaphore _semaphore,const VkAllocationCallbacks * pAllocator)508 tu_DestroySemaphore(VkDevice _device,
509                     VkSemaphore _semaphore,
510                     const VkAllocationCallbacks *pAllocator)
511 {
512    TU_FROM_HANDLE(tu_device, device, _device);
513    TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
514    if (!_semaphore)
515       return;
516 
517    tu_semaphore_part_destroy(device, &sem->permanent);
518    tu_semaphore_part_destroy(device, &sem->temporary);
519 
520    vk_object_free(&device->vk, pAllocator, sem);
521 }
522 
523 VkResult
tu_ImportSemaphoreFdKHR(VkDevice _device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)524 tu_ImportSemaphoreFdKHR(VkDevice _device,
525                         const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
526 {
527    TU_FROM_HANDLE(tu_device, device, _device);
528    TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
529    int ret;
530    struct tu_semaphore_part *dst = NULL;
531 
532    if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
533       dst = &sem->temporary;
534    } else {
535       dst = &sem->permanent;
536    }
537 
538    uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
539 
540    switch(pImportSemaphoreFdInfo->handleType) {
541       case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
542          uint32_t old_syncobj = syncobj;
543          ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
544          if (ret == 0) {
545             close(pImportSemaphoreFdInfo->fd);
546             if (old_syncobj)
547                drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
548          }
549          break;
550       }
551       case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
552          if (!syncobj) {
553             ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
554             if (ret)
555                break;
556          }
557          if (pImportSemaphoreFdInfo->fd == -1) {
558             ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
559          } else {
560             ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
561          }
562          if (!ret)
563             close(pImportSemaphoreFdInfo->fd);
564          break;
565       }
566       default:
567          unreachable("Unhandled semaphore handle type");
568    }
569 
570    if (ret) {
571       return VK_ERROR_INVALID_EXTERNAL_HANDLE;
572    }
573    dst->syncobj = syncobj;
574    dst->kind = TU_SEMAPHORE_SYNCOBJ;
575 
576    return VK_SUCCESS;
577 }
578 
579 VkResult
tu_GetSemaphoreFdKHR(VkDevice _device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)580 tu_GetSemaphoreFdKHR(VkDevice _device,
581                      const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
582                      int *pFd)
583 {
584    TU_FROM_HANDLE(tu_device, device, _device);
585    TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
586    int ret;
587    uint32_t syncobj_handle;
588 
589    if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
590       assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
591       syncobj_handle = sem->temporary.syncobj;
592    } else {
593       assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
594       syncobj_handle = sem->permanent.syncobj;
595    }
596 
597    switch(pGetFdInfo->handleType) {
598    case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
599       ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
600       break;
601    case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
602       ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
603       if (!ret) {
604          if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
605             tu_semaphore_part_destroy(device, &sem->temporary);
606          } else {
607             drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
608          }
609       }
610       break;
611    default:
612       unreachable("Unhandled semaphore handle type");
613    }
614 
615    if (ret)
616       return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
617    return VK_SUCCESS;
618 }
619 
tu_has_syncobj(struct tu_physical_device * pdev)620 static bool tu_has_syncobj(struct tu_physical_device *pdev)
621 {
622    uint64_t value;
623    if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
624       return false;
625    return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
626 }
627 
628 void
tu_GetPhysicalDeviceExternalSemaphoreProperties(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)629 tu_GetPhysicalDeviceExternalSemaphoreProperties(
630    VkPhysicalDevice physicalDevice,
631    const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
632    VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
633 {
634    TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
635 
636    if (tu_has_syncobj(pdev) &&
637        (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
638         pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
639       pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
640       pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
641       pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
642          VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
643    } else {
644       pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
645       pExternalSemaphoreProperties->compatibleHandleTypes = 0;
646       pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
647    }
648 }
649 
650 VkResult
tu_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence _fence)651 tu_QueueSubmit(VkQueue _queue,
652                uint32_t submitCount,
653                const VkSubmitInfo *pSubmits,
654                VkFence _fence)
655 {
656    TU_FROM_HANDLE(tu_queue, queue, _queue);
657    VkResult result;
658 
659    for (uint32_t i = 0; i < submitCount; ++i) {
660       const VkSubmitInfo *submit = pSubmits + i;
661       const bool last_submit = (i == submitCount - 1);
662       struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
663       uint32_t nr_in_syncobjs, nr_out_syncobjs;
664       struct tu_bo_list bo_list;
665       tu_bo_list_init(&bo_list);
666 
667       result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
668                                          pSubmits[i].waitSemaphoreCount,
669                                          false, &in_syncobjs, &nr_in_syncobjs);
670       if (result != VK_SUCCESS) {
671          return tu_device_set_lost(queue->device,
672                                    "failed to allocate space for semaphore submission\n");
673       }
674 
675       result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
676                                          pSubmits[i].signalSemaphoreCount,
677                                          false, &out_syncobjs, &nr_out_syncobjs);
678       if (result != VK_SUCCESS) {
679          free(in_syncobjs);
680          return tu_device_set_lost(queue->device,
681                                    "failed to allocate space for semaphore submission\n");
682       }
683 
684       uint32_t entry_count = 0;
685       for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
686          TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
687          entry_count += cmdbuf->cs.entry_count;
688       }
689 
690       struct drm_msm_gem_submit_cmd cmds[entry_count];
691       uint32_t entry_idx = 0;
692       for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
693          TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
694          struct tu_cs *cs = &cmdbuf->cs;
695          for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
696             cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
697             cmds[entry_idx].submit_idx =
698                tu_bo_list_add(&bo_list, cs->entries[i].bo,
699                               MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
700             cmds[entry_idx].submit_offset = cs->entries[i].offset;
701             cmds[entry_idx].size = cs->entries[i].size;
702             cmds[entry_idx].pad = 0;
703             cmds[entry_idx].nr_relocs = 0;
704             cmds[entry_idx].relocs = 0;
705          }
706 
707          tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
708       }
709 
710       uint32_t flags = MSM_PIPE_3D0;
711       if (nr_in_syncobjs) {
712          flags |= MSM_SUBMIT_SYNCOBJ_IN;
713       }
714       if (nr_out_syncobjs) {
715          flags |= MSM_SUBMIT_SYNCOBJ_OUT;
716       }
717 
718       if (last_submit) {
719          flags |= MSM_SUBMIT_FENCE_FD_OUT;
720       }
721 
722       struct drm_msm_gem_submit req = {
723          .flags = flags,
724          .queueid = queue->msm_queue_id,
725          .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
726          .nr_bos = bo_list.count,
727          .cmds = (uint64_t)(uintptr_t)cmds,
728          .nr_cmds = entry_count,
729          .in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
730          .out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
731          .nr_in_syncobjs = nr_in_syncobjs,
732          .nr_out_syncobjs = nr_out_syncobjs,
733          .syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
734       };
735 
736       int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
737                                     DRM_MSM_GEM_SUBMIT,
738                                     &req, sizeof(req));
739       if (ret) {
740          free(in_syncobjs);
741          free(out_syncobjs);
742          return tu_device_set_lost(queue->device, "submit failed: %s\n",
743                                    strerror(errno));
744       }
745 
746       tu_bo_list_destroy(&bo_list);
747       free(in_syncobjs);
748       free(out_syncobjs);
749 
750       tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
751                                 pSubmits[i].waitSemaphoreCount);
752       if (last_submit) {
753          /* no need to merge fences as queue execution is serialized */
754          tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
755       } else if (last_submit) {
756          close(req.fence_fd);
757       }
758    }
759 
760    if (_fence != VK_NULL_HANDLE) {
761       TU_FROM_HANDLE(tu_fence, fence, _fence);
762       tu_fence_copy(fence, &queue->submit_fence);
763    }
764 
765    return VK_SUCCESS;
766 }
767