1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 
32 #include "anv_private.h"
33 #include "common/intel_defines.h"
34 #include "common/intel_gem.h"
35 
36 /**
37  * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
38  *
39  * Return gem handle, or 0 on failure. Gem handles are never 0.
40  */
41 uint32_t
anv_gem_create(struct anv_device * device,uint64_t size)42 anv_gem_create(struct anv_device *device, uint64_t size)
43 {
44    struct drm_i915_gem_create gem_create = {
45       .size = size,
46    };
47 
48    int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
49    if (ret != 0) {
50       /* FIXME: What do we do if this fails? */
51       return 0;
52    }
53 
54    return gem_create.handle;
55 }
56 
57 void
anv_gem_close(struct anv_device * device,uint32_t gem_handle)58 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
59 {
60    struct drm_gem_close close = {
61       .handle = gem_handle,
62    };
63 
64    intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
65 }
66 
67 uint32_t
anv_gem_create_regions(struct anv_device * device,uint64_t anv_bo_size,uint32_t num_regions,struct drm_i915_gem_memory_class_instance * regions)68 anv_gem_create_regions(struct anv_device *device, uint64_t anv_bo_size,
69                        uint32_t num_regions,
70                        struct drm_i915_gem_memory_class_instance *regions)
71 {
72    struct drm_i915_gem_create_ext_memory_regions ext_regions = {
73       .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
74       .num_regions = num_regions,
75       .regions = (uintptr_t)regions,
76    };
77 
78    struct drm_i915_gem_create_ext gem_create = {
79       .size = anv_bo_size,
80       .extensions = (uintptr_t) &ext_regions,
81    };
82 
83    int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE_EXT,
84                          &gem_create);
85    if (ret != 0) {
86       return 0;
87    }
88 
89    return gem_create.handle;
90 }
91 
92 /**
93  * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
94  */
95 static void*
anv_gem_mmap_offset(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)96 anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
97                     uint64_t offset, uint64_t size, uint32_t flags)
98 {
99    struct drm_i915_gem_mmap_offset gem_mmap = {
100       .handle = gem_handle,
101       .flags = device->info.has_local_mem ? I915_MMAP_OFFSET_FIXED :
102          (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
103    };
104    assert(offset == 0);
105 
106    /* Get the fake offset back */
107    int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
108    if (ret != 0)
109       return MAP_FAILED;
110 
111    /* And map it */
112    void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
113                     device->fd, gem_mmap.offset);
114    return map;
115 }
116 
117 static void*
anv_gem_mmap_legacy(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)118 anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
119                     uint64_t offset, uint64_t size, uint32_t flags)
120 {
121    assert(!device->info.has_local_mem);
122 
123    struct drm_i915_gem_mmap gem_mmap = {
124       .handle = gem_handle,
125       .offset = offset,
126       .size = size,
127       .flags = flags,
128    };
129 
130    int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
131    if (ret != 0)
132       return MAP_FAILED;
133 
134    return (void *)(uintptr_t) gem_mmap.addr_ptr;
135 }
136 
137 /**
138  * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
139  */
140 void*
anv_gem_mmap(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)141 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
142              uint64_t offset, uint64_t size, uint32_t flags)
143 {
144    void *map;
145    if (device->physical->has_mmap_offset)
146       map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
147    else
148       map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
149 
150    if (map != MAP_FAILED)
151       VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
152 
153    return map;
154 }
155 
156 /* This is just a wrapper around munmap, but it also notifies valgrind that
157  * this map is no longer valid.  Pair this with anv_gem_mmap().
158  */
159 void
anv_gem_munmap(struct anv_device * device,void * p,uint64_t size)160 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
161 {
162    VG(VALGRIND_FREELIKE_BLOCK(p, 0));
163    munmap(p, size);
164 }
165 
166 uint32_t
anv_gem_userptr(struct anv_device * device,void * mem,size_t size)167 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
168 {
169    struct drm_i915_gem_userptr userptr = {
170       .user_ptr = (__u64)((unsigned long) mem),
171       .user_size = size,
172       .flags = 0,
173    };
174 
175    if (device->physical->has_userptr_probe)
176       userptr.flags |= I915_USERPTR_PROBE;
177 
178    int ret;
179 retry:
180    ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
181    if (ret == -1) {
182       if (errno == ENODEV && userptr.flags == 0) {
183          userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
184          goto retry;
185       }
186       if (geteuid() != 0) {
187          fprintf(stderr, "%s", "ioctl(I915_GEM_USERPTR) failed. Try running as root but expect poor stability.\n");
188       }
189       return 0;
190    }
191 
192    return userptr.handle;
193 }
194 
195 int
anv_gem_set_caching(struct anv_device * device,uint32_t gem_handle,uint32_t caching)196 anv_gem_set_caching(struct anv_device *device,
197                     uint32_t gem_handle, uint32_t caching)
198 {
199    struct drm_i915_gem_caching gem_caching = {
200       .handle = gem_handle,
201       .caching = caching,
202    };
203 
204    return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
205 }
206 
207 int
anv_gem_set_domain(struct anv_device * device,uint32_t gem_handle,uint32_t read_domains,uint32_t write_domain)208 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
209                    uint32_t read_domains, uint32_t write_domain)
210 {
211    struct drm_i915_gem_set_domain gem_set_domain = {
212       .handle = gem_handle,
213       .read_domains = read_domains,
214       .write_domain = write_domain,
215    };
216 
217    return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
218 }
219 
220 /**
221  * Returns 0, 1, or negative to indicate error
222  */
223 int
anv_gem_busy(struct anv_device * device,uint32_t gem_handle)224 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
225 {
226    struct drm_i915_gem_busy busy = {
227       .handle = gem_handle,
228    };
229 
230    int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
231    if (ret < 0)
232       return ret;
233 
234    return busy.busy != 0;
235 }
236 
237 /**
238  * On error, \a timeout_ns holds the remaining time.
239  */
240 int
anv_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)241 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
242 {
243    struct drm_i915_gem_wait wait = {
244       .bo_handle = gem_handle,
245       .timeout_ns = *timeout_ns,
246       .flags = 0,
247    };
248 
249    int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
250    *timeout_ns = wait.timeout_ns;
251 
252    return ret;
253 }
254 
255 int
anv_gem_execbuffer(struct anv_device * device,struct drm_i915_gem_execbuffer2 * execbuf)256 anv_gem_execbuffer(struct anv_device *device,
257                    struct drm_i915_gem_execbuffer2 *execbuf)
258 {
259    if (execbuf->flags & I915_EXEC_FENCE_OUT)
260       return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
261    else
262       return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
263 }
264 
265 /** Return -1 on error. */
266 int
anv_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)267 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
268 {
269    struct drm_i915_gem_get_tiling get_tiling = {
270       .handle = gem_handle,
271    };
272 
273    /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
274     * anymore, so we will need another way to get the tiling. Apparently this
275     * is only used in Android code, so we may need some other way to
276     * communicate the tiling mode.
277     */
278    if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
279       assert(!"Failed to get BO tiling");
280       return -1;
281    }
282 
283    return get_tiling.tiling_mode;
284 }
285 
286 int
anv_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)287 anv_gem_set_tiling(struct anv_device *device,
288                    uint32_t gem_handle, uint32_t stride, uint32_t tiling)
289 {
290    int ret;
291 
292    /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
293     * nothing needs to be done.
294     */
295    if (!device->info.has_tiling_uapi)
296       return 0;
297 
298    /* set_tiling overwrites the input on the error path, so we have to open
299     * code intel_ioctl.
300     */
301    do {
302       struct drm_i915_gem_set_tiling set_tiling = {
303          .handle = gem_handle,
304          .tiling_mode = tiling,
305          .stride = stride,
306       };
307 
308       ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
309    } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
310 
311    return ret;
312 }
313 
314 int
anv_gem_get_param(int fd,uint32_t param)315 anv_gem_get_param(int fd, uint32_t param)
316 {
317    int tmp;
318 
319    drm_i915_getparam_t gp = {
320       .param = param,
321       .value = &tmp,
322    };
323 
324    int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
325    if (ret == 0)
326       return tmp;
327 
328    return 0;
329 }
330 
331 bool
anv_gem_has_context_priority(int fd,int priority)332 anv_gem_has_context_priority(int fd, int priority)
333 {
334    return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
335                                      priority);
336 }
337 
338 int
anv_gem_create_context(struct anv_device * device)339 anv_gem_create_context(struct anv_device *device)
340 {
341    struct drm_i915_gem_context_create create = { 0 };
342 
343    int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
344    if (ret == -1)
345       return -1;
346 
347    return create.ctx_id;
348 }
349 
350 int
anv_gem_destroy_context(struct anv_device * device,int context)351 anv_gem_destroy_context(struct anv_device *device, int context)
352 {
353    struct drm_i915_gem_context_destroy destroy = {
354       .ctx_id = context,
355    };
356 
357    return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
358 }
359 
360 int
anv_gem_set_context_param(int fd,int context,uint32_t param,uint64_t value)361 anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
362 {
363    struct drm_i915_gem_context_param p = {
364       .ctx_id = context,
365       .param = param,
366       .value = value,
367    };
368    int err = 0;
369 
370    if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
371       err = -errno;
372    return err;
373 }
374 
375 int
anv_gem_context_get_reset_stats(int fd,int context,uint32_t * active,uint32_t * pending)376 anv_gem_context_get_reset_stats(int fd, int context,
377                                 uint32_t *active, uint32_t *pending)
378 {
379    struct drm_i915_reset_stats stats = {
380       .ctx_id = context,
381    };
382 
383    int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
384    if (ret == 0) {
385       *active = stats.batch_active;
386       *pending = stats.batch_pending;
387    }
388 
389    return ret;
390 }
391 
392 int
anv_gem_handle_to_fd(struct anv_device * device,uint32_t gem_handle)393 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
394 {
395    struct drm_prime_handle args = {
396       .handle = gem_handle,
397       .flags = DRM_CLOEXEC | DRM_RDWR,
398    };
399 
400    int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
401    if (ret == -1)
402       return -1;
403 
404    return args.fd;
405 }
406 
407 uint32_t
anv_gem_fd_to_handle(struct anv_device * device,int fd)408 anv_gem_fd_to_handle(struct anv_device *device, int fd)
409 {
410    struct drm_prime_handle args = {
411       .fd = fd,
412    };
413 
414    int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
415    if (ret == -1)
416       return 0;
417 
418    return args.handle;
419 }
420 
421 int
anv_gem_reg_read(int fd,uint32_t offset,uint64_t * result)422 anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
423 {
424    struct drm_i915_reg_read args = {
425       .offset = offset
426    };
427 
428    int ret = intel_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
429 
430    *result = args.val;
431    return ret;
432 }
433 
434 struct drm_i915_query_engine_info *
anv_gem_get_engine_info(int fd)435 anv_gem_get_engine_info(int fd)
436 {
437    return intel_i915_query_alloc(fd, DRM_I915_QUERY_ENGINE_INFO, NULL);
438 }
439