1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 
32 #include "anv_private.h"
33 #include "common/gen_defines.h"
34 #include "common/gen_gem.h"
35 #include "drm-uapi/sync_file.h"
36 
37 /**
38  * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
39  *
40  * Return gem handle, or 0 on failure. Gem handles are never 0.
41  */
42 uint32_t
anv_gem_create(struct anv_device * device,uint64_t size)43 anv_gem_create(struct anv_device *device, uint64_t size)
44 {
45    struct drm_i915_gem_create gem_create = {
46       .size = size,
47    };
48 
49    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
50    if (ret != 0) {
51       /* FIXME: What do we do if this fails? */
52       return 0;
53    }
54 
55    return gem_create.handle;
56 }
57 
58 void
anv_gem_close(struct anv_device * device,uint32_t gem_handle)59 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
60 {
61    struct drm_gem_close close = {
62       .handle = gem_handle,
63    };
64 
65    gen_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
66 }
67 
68 /**
69  * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
70  */
71 static void*
anv_gem_mmap_offset(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)72 anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
73                     uint64_t offset, uint64_t size, uint32_t flags)
74 {
75    struct drm_i915_gem_mmap_offset gem_mmap = {
76       .handle = gem_handle,
77       .flags = (flags & I915_MMAP_WC) ?
78          I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
79    };
80    assert(offset == 0);
81 
82    /* Get the fake offset back */
83    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
84    if (ret != 0)
85       return MAP_FAILED;
86 
87    /* And map it */
88    void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
89                     device->fd, gem_mmap.offset);
90    return map;
91 }
92 
93 static void*
anv_gem_mmap_legacy(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)94 anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
95                     uint64_t offset, uint64_t size, uint32_t flags)
96 {
97    struct drm_i915_gem_mmap gem_mmap = {
98       .handle = gem_handle,
99       .offset = offset,
100       .size = size,
101       .flags = flags,
102    };
103 
104    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
105    if (ret != 0)
106       return MAP_FAILED;
107 
108    return (void *)(uintptr_t) gem_mmap.addr_ptr;
109 }
110 
111 /**
112  * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
113  */
114 void*
anv_gem_mmap(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)115 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
116              uint64_t offset, uint64_t size, uint32_t flags)
117 {
118    void *map;
119    if (device->physical->has_mmap_offset)
120       map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
121    else
122       map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
123 
124    if (map != MAP_FAILED)
125       VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
126 
127    return map;
128 }
129 
130 /* This is just a wrapper around munmap, but it also notifies valgrind that
131  * this map is no longer valid.  Pair this with anv_gem_mmap().
132  */
133 void
anv_gem_munmap(struct anv_device * device,void * p,uint64_t size)134 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
135 {
136    VG(VALGRIND_FREELIKE_BLOCK(p, 0));
137    munmap(p, size);
138 }
139 
140 uint32_t
anv_gem_userptr(struct anv_device * device,void * mem,size_t size)141 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
142 {
143    struct drm_i915_gem_userptr userptr = {
144       .user_ptr = (__u64)((unsigned long) mem),
145       .user_size = size,
146       .flags = 0,
147    };
148 
149    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
150    if (ret == -1)
151       return 0;
152 
153    return userptr.handle;
154 }
155 
156 int
anv_gem_set_caching(struct anv_device * device,uint32_t gem_handle,uint32_t caching)157 anv_gem_set_caching(struct anv_device *device,
158                     uint32_t gem_handle, uint32_t caching)
159 {
160    struct drm_i915_gem_caching gem_caching = {
161       .handle = gem_handle,
162       .caching = caching,
163    };
164 
165    return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
166 }
167 
168 int
anv_gem_set_domain(struct anv_device * device,uint32_t gem_handle,uint32_t read_domains,uint32_t write_domain)169 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
170                    uint32_t read_domains, uint32_t write_domain)
171 {
172    struct drm_i915_gem_set_domain gem_set_domain = {
173       .handle = gem_handle,
174       .read_domains = read_domains,
175       .write_domain = write_domain,
176    };
177 
178    return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
179 }
180 
181 /**
182  * Returns 0, 1, or negative to indicate error
183  */
184 int
anv_gem_busy(struct anv_device * device,uint32_t gem_handle)185 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
186 {
187    struct drm_i915_gem_busy busy = {
188       .handle = gem_handle,
189    };
190 
191    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
192    if (ret < 0)
193       return ret;
194 
195    return busy.busy != 0;
196 }
197 
198 /**
199  * On error, \a timeout_ns holds the remaining time.
200  */
201 int
anv_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)202 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
203 {
204    struct drm_i915_gem_wait wait = {
205       .bo_handle = gem_handle,
206       .timeout_ns = *timeout_ns,
207       .flags = 0,
208    };
209 
210    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
211    *timeout_ns = wait.timeout_ns;
212 
213    return ret;
214 }
215 
216 int
anv_gem_execbuffer(struct anv_device * device,struct drm_i915_gem_execbuffer2 * execbuf)217 anv_gem_execbuffer(struct anv_device *device,
218                    struct drm_i915_gem_execbuffer2 *execbuf)
219 {
220    if (execbuf->flags & I915_EXEC_FENCE_OUT)
221       return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
222    else
223       return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
224 }
225 
226 /** Return -1 on error. */
227 int
anv_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)228 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
229 {
230    struct drm_i915_gem_get_tiling get_tiling = {
231       .handle = gem_handle,
232    };
233 
234    /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
235     * anymore, so we will need another way to get the tiling. Apparently this
236     * is only used in Android code, so we may need some other way to
237     * communicate the tiling mode.
238     */
239    if (gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
240       assert(!"Failed to get BO tiling");
241       return -1;
242    }
243 
244    return get_tiling.tiling_mode;
245 }
246 
247 int
anv_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)248 anv_gem_set_tiling(struct anv_device *device,
249                    uint32_t gem_handle, uint32_t stride, uint32_t tiling)
250 {
251    int ret;
252 
253    /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
254     * nothing needs to be done.
255     */
256    if (!device->info.has_tiling_uapi)
257       return 0;
258 
259    /* set_tiling overwrites the input on the error path, so we have to open
260     * code gen_ioctl.
261     */
262    do {
263       struct drm_i915_gem_set_tiling set_tiling = {
264          .handle = gem_handle,
265          .tiling_mode = tiling,
266          .stride = stride,
267       };
268 
269       ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
270    } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
271 
272    return ret;
273 }
274 
275 int
anv_gem_get_param(int fd,uint32_t param)276 anv_gem_get_param(int fd, uint32_t param)
277 {
278    int tmp;
279 
280    drm_i915_getparam_t gp = {
281       .param = param,
282       .value = &tmp,
283    };
284 
285    int ret = gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
286    if (ret == 0)
287       return tmp;
288 
289    return 0;
290 }
291 
292 bool
anv_gem_get_bit6_swizzle(int fd,uint32_t tiling)293 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
294 {
295    struct drm_gem_close close;
296    int ret;
297 
298    struct drm_i915_gem_create gem_create = {
299       .size = 4096,
300    };
301 
302    if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
303       assert(!"Failed to create GEM BO");
304       return false;
305    }
306 
307    bool swizzled = false;
308 
309    /* set_tiling overwrites the input on the error path, so we have to open
310     * code gen_ioctl.
311     */
312    do {
313       struct drm_i915_gem_set_tiling set_tiling = {
314          .handle = gem_create.handle,
315          .tiling_mode = tiling,
316          .stride = tiling == I915_TILING_X ? 512 : 128,
317       };
318 
319       ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
320    } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
321 
322    if (ret != 0) {
323       assert(!"Failed to set BO tiling");
324       goto close_and_return;
325    }
326 
327    struct drm_i915_gem_get_tiling get_tiling = {
328       .handle = gem_create.handle,
329    };
330 
331    if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
332       assert(!"Failed to get BO tiling");
333       goto close_and_return;
334    }
335 
336    swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
337 
338 close_and_return:
339 
340    memset(&close, 0, sizeof(close));
341    close.handle = gem_create.handle;
342    gen_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
343 
344    return swizzled;
345 }
346 
347 bool
anv_gem_has_context_priority(int fd)348 anv_gem_has_context_priority(int fd)
349 {
350    return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
351                                      GEN_CONTEXT_MEDIUM_PRIORITY);
352 }
353 
354 int
anv_gem_create_context(struct anv_device * device)355 anv_gem_create_context(struct anv_device *device)
356 {
357    struct drm_i915_gem_context_create create = { 0 };
358 
359    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
360    if (ret == -1)
361       return -1;
362 
363    return create.ctx_id;
364 }
365 
366 int
anv_gem_destroy_context(struct anv_device * device,int context)367 anv_gem_destroy_context(struct anv_device *device, int context)
368 {
369    struct drm_i915_gem_context_destroy destroy = {
370       .ctx_id = context,
371    };
372 
373    return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
374 }
375 
376 int
anv_gem_set_context_param(int fd,int context,uint32_t param,uint64_t value)377 anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
378 {
379    struct drm_i915_gem_context_param p = {
380       .ctx_id = context,
381       .param = param,
382       .value = value,
383    };
384    int err = 0;
385 
386    if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
387       err = -errno;
388    return err;
389 }
390 
391 int
anv_gem_get_context_param(int fd,int context,uint32_t param,uint64_t * value)392 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
393 {
394    struct drm_i915_gem_context_param gp = {
395       .ctx_id = context,
396       .param = param,
397    };
398 
399    int ret = gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
400    if (ret == -1)
401       return -1;
402 
403    *value = gp.value;
404    return 0;
405 }
406 
407 int
anv_gem_gpu_get_reset_stats(struct anv_device * device,uint32_t * active,uint32_t * pending)408 anv_gem_gpu_get_reset_stats(struct anv_device *device,
409                             uint32_t *active, uint32_t *pending)
410 {
411    struct drm_i915_reset_stats stats = {
412       .ctx_id = device->context_id,
413    };
414 
415    int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
416    if (ret == 0) {
417       *active = stats.batch_active;
418       *pending = stats.batch_pending;
419    }
420 
421    return ret;
422 }
423 
424 int
anv_gem_handle_to_fd(struct anv_device * device,uint32_t gem_handle)425 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
426 {
427    struct drm_prime_handle args = {
428       .handle = gem_handle,
429       .flags = DRM_CLOEXEC,
430    };
431 
432    int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
433    if (ret == -1)
434       return -1;
435 
436    return args.fd;
437 }
438 
439 uint32_t
anv_gem_fd_to_handle(struct anv_device * device,int fd)440 anv_gem_fd_to_handle(struct anv_device *device, int fd)
441 {
442    struct drm_prime_handle args = {
443       .fd = fd,
444    };
445 
446    int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
447    if (ret == -1)
448       return 0;
449 
450    return args.handle;
451 }
452 
453 int
anv_gem_reg_read(int fd,uint32_t offset,uint64_t * result)454 anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
455 {
456    struct drm_i915_reg_read args = {
457       .offset = offset
458    };
459 
460    int ret = gen_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
461 
462    *result = args.val;
463    return ret;
464 }
465 
466 int
anv_gem_sync_file_merge(struct anv_device * device,int fd1,int fd2)467 anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
468 {
469    struct sync_merge_data args = {
470       .name = "anv merge fence",
471       .fd2 = fd2,
472       .fence = -1,
473    };
474 
475    int ret = gen_ioctl(fd1, SYNC_IOC_MERGE, &args);
476    if (ret == -1)
477       return -1;
478 
479    return args.fence;
480 }
481 
482 uint32_t
anv_gem_syncobj_create(struct anv_device * device,uint32_t flags)483 anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
484 {
485    struct drm_syncobj_create args = {
486       .flags = flags,
487    };
488 
489    int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
490    if (ret)
491       return 0;
492 
493    return args.handle;
494 }
495 
496 void
anv_gem_syncobj_destroy(struct anv_device * device,uint32_t handle)497 anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
498 {
499    struct drm_syncobj_destroy args = {
500       .handle = handle,
501    };
502 
503    gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
504 }
505 
506 int
anv_gem_syncobj_handle_to_fd(struct anv_device * device,uint32_t handle)507 anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
508 {
509    struct drm_syncobj_handle args = {
510       .handle = handle,
511    };
512 
513    int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
514    if (ret)
515       return -1;
516 
517    return args.fd;
518 }
519 
520 uint32_t
anv_gem_syncobj_fd_to_handle(struct anv_device * device,int fd)521 anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
522 {
523    struct drm_syncobj_handle args = {
524       .fd = fd,
525    };
526 
527    int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
528    if (ret)
529       return 0;
530 
531    return args.handle;
532 }
533 
534 int
anv_gem_syncobj_export_sync_file(struct anv_device * device,uint32_t handle)535 anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
536 {
537    struct drm_syncobj_handle args = {
538       .handle = handle,
539       .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
540    };
541 
542    int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
543    if (ret)
544       return -1;
545 
546    return args.fd;
547 }
548 
549 int
anv_gem_syncobj_import_sync_file(struct anv_device * device,uint32_t handle,int fd)550 anv_gem_syncobj_import_sync_file(struct anv_device *device,
551                                  uint32_t handle, int fd)
552 {
553    struct drm_syncobj_handle args = {
554       .handle = handle,
555       .fd = fd,
556       .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
557    };
558 
559    return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
560 }
561 
562 void
anv_gem_syncobj_reset(struct anv_device * device,uint32_t handle)563 anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
564 {
565    struct drm_syncobj_array args = {
566       .handles = (uint64_t)(uintptr_t)&handle,
567       .count_handles = 1,
568    };
569 
570    gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
571 }
572 
573 bool
anv_gem_supports_syncobj_wait(int fd)574 anv_gem_supports_syncobj_wait(int fd)
575 {
576    return gen_gem_supports_syncobj_wait(fd);
577 }
578 
579 int
anv_gem_syncobj_wait(struct anv_device * device,uint32_t * handles,uint32_t num_handles,int64_t abs_timeout_ns,bool wait_all)580 anv_gem_syncobj_wait(struct anv_device *device,
581                      uint32_t *handles, uint32_t num_handles,
582                      int64_t abs_timeout_ns, bool wait_all)
583 {
584    struct drm_syncobj_wait args = {
585       .handles = (uint64_t)(uintptr_t)handles,
586       .count_handles = num_handles,
587       .timeout_nsec = abs_timeout_ns,
588       .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
589    };
590 
591    if (wait_all)
592       args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
593 
594    return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
595 }
596