1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27 /*-
28 * Copyright (c) 2011 The FreeBSD Foundation
29 * All rights reserved.
30 *
31 * This software was developed by Konstantin Belousov under sponsorship from
32 * the FreeBSD Foundation.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 #include <linux/types.h>
57 #include <linux/slab.h>
58 #include <linux/mm.h>
59 #include <linux/uaccess.h>
60 #include <linux/fs.h>
61 #include <linux/file.h>
62 #include <linux/module.h>
63 #include <linux/mman.h>
64 #include <linux/pagemap.h>
65 #include <linux/shmem_fs.h>
66 #include <linux/dma-buf.h>
67 #include <drm/drmP.h>
68 #include <drm/drm_vma_manager.h>
69 #include <drm/drm_gem.h>
70 #include "drm_internal.h"
71
72 #ifdef __DragonFly__
73 struct drm_gem_mm {
74 struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */
75 struct drm_open_hash offset_hash; /**< User token hash table for maps */
76 struct unrhdr *idxunr;
77 };
78 #endif
79
80 /** @file drm_gem.c
81 *
82 * This file provides some of the base ioctls and library routines for
83 * the graphics memory manager implemented by each device driver.
84 *
85 * Because various devices have different requirements in terms of
86 * synchronization and migration strategies, implementing that is left up to
87 * the driver, and all that the general API provides should be generic --
88 * allocating objects, reading/writing data with the cpu, freeing objects.
89 * Even there, platform-dependent optimizations for reading/writing data with
90 * the CPU mean we'll likely hook those out to driver-specific calls. However,
91 * the DRI2 implementation wants to have at least allocate/mmap be generic.
92 *
93 * The goal was to have swap-backed object allocation managed through
94 * struct file. However, file descriptors as handles to a struct file have
95 * two major failings:
96 * - Process limits prevent more than 1024 or so being used at a time by
97 * default.
98 * - Inability to allocate high fds will aggravate the X Server's select()
99 * handling, and likely that of many GL client applications as well.
100 *
101 * This led to a plan of using our own integer IDs (called handles, following
102 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
103 * ioctls. The objects themselves will still include the struct file so
104 * that we can transition to fds if the required kernel infrastructure shows
105 * up at a later date, and as our interface with shmfs for memory allocation.
106 */
107
108 /*
109 * We make up offsets for buffer objects so we can recognize them at
110 * mmap time.
111 */
112
113 /* pgoff in mmap is an unsigned long, so we need to make sure that
114 * the faked up offset will fit
115 */
116
117 #if BITS_PER_LONG == 64
118 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
119 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
120 #else
121 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
122 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
123 #endif
124
125 /**
126 * drm_gem_init - Initialize the GEM device fields
127 * @dev: drm_devic structure to initialize
128 */
129 int
drm_gem_init(struct drm_device * dev)130 drm_gem_init(struct drm_device *dev)
131 {
132 struct drm_gem_mm *mm;
133 struct drm_vma_offset_manager *vma_offset_manager;
134
135 lockinit(&dev->object_name_lock, "objnam", 0, LK_CANRECURSE);
136 idr_init(&dev->object_name_idr);
137
138 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
139 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
140 if (!vma_offset_manager) {
141 DRM_ERROR("out of memory\n");
142 return -ENOMEM;
143 }
144
145 dev->mm_private = mm;
146
147 if (drm_ht_create(&mm->offset_hash, 12)) {
148 kfree(mm);
149 return -ENOMEM;
150 }
151
152 mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
153 drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
154 DRM_FILE_PAGE_OFFSET_SIZE);
155
156 dev->vma_offset_manager = vma_offset_manager;
157 drm_vma_offset_manager_init(vma_offset_manager,
158 DRM_FILE_PAGE_OFFSET_START,
159 DRM_FILE_PAGE_OFFSET_SIZE);
160
161 return 0;
162 }
163
164 void
drm_gem_destroy(struct drm_device * dev)165 drm_gem_destroy(struct drm_device *dev)
166 {
167 struct drm_gem_mm *mm = dev->mm_private;
168
169 drm_mm_takedown(&mm->offset_manager);
170 drm_ht_remove(&mm->offset_hash);
171 delete_unrhdr(mm->idxunr);
172 kfree(mm);
173 dev->mm_private = NULL;
174
175 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
176 kfree(dev->vma_offset_manager);
177 dev->vma_offset_manager = NULL;
178 }
179
180 /**
181 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
182 * @dev: drm_device the object should be initialized for
183 * @obj: drm_gem_object to initialize
184 * @size: object size
185 *
186 * Initialize an already allocated GEM object of the specified size with
187 * shmfs backing store.
188 */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)189 int drm_gem_object_init(struct drm_device *dev,
190 struct drm_gem_object *obj, size_t size)
191 {
192 struct vm_object *filp;
193
194 drm_gem_private_object_init(dev, obj, size);
195
196 filp = default_pager_alloc(NULL, size,
197 VM_PROT_READ | VM_PROT_WRITE, 0);
198
199 obj->filp = filp;
200
201 return 0;
202 }
203 EXPORT_SYMBOL(drm_gem_object_init);
204
205 /**
206 * drm_gem_private_object_init - initialize an allocated private GEM object
207 * @dev: drm_device the object should be initialized for
208 * @obj: drm_gem_object to initialize
209 * @size: object size
210 *
211 * Initialize an already allocated GEM object of the specified size with
212 * no GEM provided backing store. Instead the caller is responsible for
213 * backing the object and handling it.
214 */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)215 void drm_gem_private_object_init(struct drm_device *dev,
216 struct drm_gem_object *obj, size_t size)
217 {
218 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
219
220 obj->dev = dev;
221 obj->filp = NULL;
222
223 kref_init(&obj->refcount);
224 obj->handle_count = 0;
225 obj->size = size;
226 drm_vma_node_reset(&obj->vma_node);
227 }
228 EXPORT_SYMBOL(drm_gem_private_object_init);
229
230 static void
drm_gem_remove_prime_handles(struct drm_gem_object * obj,struct drm_file * filp)231 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
232 {
233 /*
234 * Note: obj->dma_buf can't disappear as long as we still hold a
235 * handle reference in obj->handle_count.
236 */
237 mutex_lock(&filp->prime.lock);
238 if (obj->dma_buf) {
239 drm_prime_remove_buf_handle_locked(&filp->prime,
240 obj->dma_buf);
241 }
242 mutex_unlock(&filp->prime.lock);
243 }
244
245 /**
246 * drm_gem_object_handle_free - release resources bound to userspace handles
247 * @obj: GEM object to clean up.
248 *
249 * Called after the last handle to the object has been closed
250 *
251 * Removes any name for the object. Note that this must be
252 * called before drm_gem_object_free or we'll be touching
253 * freed memory
254 */
drm_gem_object_handle_free(struct drm_gem_object * obj)255 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
256 {
257 struct drm_device *dev = obj->dev;
258
259 /* Remove any name for this object */
260 if (obj->name) {
261 idr_remove(&dev->object_name_idr, obj->name);
262 obj->name = 0;
263 }
264 }
265
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)266 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
267 {
268 /* Unbreak the reference cycle if we have an exported dma_buf. */
269 if (obj->dma_buf) {
270 dma_buf_put(obj->dma_buf);
271 obj->dma_buf = NULL;
272 }
273 }
274
275 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)276 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
277 {
278 struct drm_device *dev = obj->dev;
279 bool final = false;
280
281 if (WARN_ON(obj->handle_count == 0))
282 return;
283
284 /*
285 * Must bump handle count first as this may be the last
286 * ref, in which case the object would disappear before we
287 * checked for a name
288 */
289
290 mutex_lock(&dev->object_name_lock);
291 if (--obj->handle_count == 0) {
292 drm_gem_object_handle_free(obj);
293 drm_gem_object_exported_dma_buf_free(obj);
294 final = true;
295 }
296 mutex_unlock(&dev->object_name_lock);
297
298 if (final)
299 drm_gem_object_put_unlocked(obj);
300 }
301
302 /*
303 * Called at device or object close to release the file's
304 * handle references on objects.
305 */
306 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)307 drm_gem_object_release_handle(int id, void *ptr, void *data)
308 {
309 struct drm_file *file_priv = data;
310 struct drm_gem_object *obj = ptr;
311 struct drm_device *dev = obj->dev;
312
313 if (dev->driver->gem_close_object)
314 dev->driver->gem_close_object(obj, file_priv);
315
316 if (drm_core_check_feature(dev, DRIVER_PRIME))
317 drm_gem_remove_prime_handles(obj, file_priv);
318 drm_vma_node_revoke(&obj->vma_node, file_priv);
319
320 drm_gem_object_handle_put_unlocked(obj);
321
322 return 0;
323 }
324
325 /**
326 * drm_gem_handle_delete - deletes the given file-private handle
327 * @filp: drm file-private structure to use for the handle look up
328 * @handle: userspace handle to delete
329 *
330 * Removes the GEM handle from the @filp lookup table which has been added with
331 * drm_gem_handle_create(). If this is the last handle also cleans up linked
332 * resources like GEM names.
333 */
334 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)335 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
336 {
337 struct drm_gem_object *obj;
338
339 /* This is gross. The idr system doesn't let us try a delete and
340 * return an error code. It just spews if you fail at deleting.
341 * So, we have to grab a lock around finding the object and then
342 * doing the delete on it and dropping the refcount, or the user
343 * could race us to double-decrement the refcount and cause a
344 * use-after-free later. Given the frequency of our handle lookups,
345 * we may want to use ida for number allocation and a hash table
346 * for the pointers, anyway.
347 */
348 lockmgr(&filp->table_lock, LK_EXCLUSIVE);
349
350 /* Check if we currently have a reference on the object */
351 obj = idr_replace(&filp->object_idr, NULL, handle);
352 lockmgr(&filp->table_lock, LK_RELEASE);
353 if (IS_ERR_OR_NULL(obj))
354 return -EINVAL;
355
356 /* Release driver's reference and decrement refcount. */
357 drm_gem_object_release_handle(handle, obj, filp);
358
359 /* And finally make the handle available for future allocations. */
360 lockmgr(&filp->table_lock, LK_EXCLUSIVE);
361 idr_remove(&filp->object_idr, handle);
362 lockmgr(&filp->table_lock, LK_RELEASE);
363
364 return 0;
365 }
366 EXPORT_SYMBOL(drm_gem_handle_delete);
367
368 /**
369 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
370 * @file: drm file-private structure containing the gem object
371 * @dev: corresponding drm_device
372 * @handle: gem object handle
373 * @offset: return location for the fake mmap offset
374 *
375 * This implements the &drm_driver.dumb_map_offset kms driver callback for
376 * drivers which use gem to manage their backing storage.
377 *
378 * Returns:
379 * 0 on success or a negative error code on failure.
380 */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)381 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
382 u32 handle, u64 *offset)
383 {
384 struct drm_gem_object *obj;
385 int ret;
386
387 obj = drm_gem_object_lookup(file, handle);
388 if (!obj)
389 return -ENOENT;
390
391 /* Don't allow imported objects to be mapped */
392 if (obj->import_attach) {
393 ret = -EINVAL;
394 goto out;
395 }
396
397 ret = drm_gem_create_mmap_offset(obj);
398 if (ret)
399 goto out;
400
401 *offset = drm_vma_node_offset_addr(&obj->vma_node);
402 out:
403 drm_gem_object_put_unlocked(obj);
404
405 return ret;
406 }
407 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
408
409 /**
410 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
411 * @file: drm file-private structure to remove the dumb handle from
412 * @dev: corresponding drm_device
413 * @handle: the dumb handle to remove
414 *
415 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
416 * which use gem to manage their backing storage.
417 */
drm_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,uint32_t handle)418 int drm_gem_dumb_destroy(struct drm_file *file,
419 struct drm_device *dev,
420 uint32_t handle)
421 {
422 return drm_gem_handle_delete(file, handle);
423 }
424 EXPORT_SYMBOL(drm_gem_dumb_destroy);
425
426 /**
427 * drm_gem_handle_create_tail - internal functions to create a handle
428 * @file_priv: drm file-private structure to register the handle for
429 * @obj: object to register
430 * @handlep: pointer to return the created handle to the caller
431 *
432 * This expects the &drm_device.object_name_lock to be held already and will
433 * drop it before returning. Used to avoid races in establishing new handles
434 * when importing an object from either an flink name or a dma-buf.
435 *
436 * Handles must be release again through drm_gem_handle_delete(). This is done
437 * when userspace closes @file_priv for all attached handles, or through the
438 * GEM_CLOSE ioctl for individual handles.
439 */
440 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)441 drm_gem_handle_create_tail(struct drm_file *file_priv,
442 struct drm_gem_object *obj,
443 u32 *handlep)
444 {
445 struct drm_device *dev = obj->dev;
446 u32 handle;
447 int ret;
448
449 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
450 if (obj->handle_count++ == 0)
451 drm_gem_object_get(obj);
452
453 /*
454 * Get the user-visible handle using idr. Preload and perform
455 * allocation under our spinlock.
456 */
457 idr_preload(GFP_KERNEL);
458 lockmgr(&file_priv->table_lock, LK_EXCLUSIVE);
459
460 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
461
462 lockmgr(&file_priv->table_lock, LK_RELEASE);
463 idr_preload_end();
464
465 mutex_unlock(&dev->object_name_lock);
466 if (ret < 0)
467 goto err_unref;
468
469 handle = ret;
470
471 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
472 if (ret)
473 goto err_remove;
474
475 if (dev->driver->gem_open_object) {
476 ret = dev->driver->gem_open_object(obj, file_priv);
477 if (ret)
478 goto err_revoke;
479 }
480
481 *handlep = handle;
482 return 0;
483
484 err_revoke:
485 drm_vma_node_revoke(&obj->vma_node, file_priv);
486 err_remove:
487 lockmgr(&file_priv->table_lock, LK_EXCLUSIVE);
488 idr_remove(&file_priv->object_idr, handle);
489 lockmgr(&file_priv->table_lock, LK_RELEASE);
490 err_unref:
491 drm_gem_object_handle_put_unlocked(obj);
492 return ret;
493 }
494
495 /**
496 * drm_gem_handle_create - create a gem handle for an object
497 * @file_priv: drm file-private structure to register the handle for
498 * @obj: object to register
499 * @handlep: pionter to return the created handle to the caller
500 *
501 * Create a handle for this object. This adds a handle reference
502 * to the object, which includes a regular reference count. Callers
503 * will likely want to dereference the object afterwards.
504 */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)505 int drm_gem_handle_create(struct drm_file *file_priv,
506 struct drm_gem_object *obj,
507 u32 *handlep)
508 {
509 mutex_lock(&obj->dev->object_name_lock);
510
511 return drm_gem_handle_create_tail(file_priv, obj, handlep);
512 }
513 EXPORT_SYMBOL(drm_gem_handle_create);
514
515 /**
516 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
517 * @obj: obj in question
518 *
519 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
520 *
521 * Note that drm_gem_object_release() already calls this function, so drivers
522 * don't have to take care of releasing the mmap offset themselves when freeing
523 * the GEM object.
524 */
525 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)526 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
527 {
528 struct drm_device *dev = obj->dev;
529
530 struct drm_gem_mm *mm = dev->mm_private;
531 struct drm_hash_item *list;
532
533 if (!obj->on_map)
534 return;
535 list = &obj->map_list;
536
537 drm_ht_remove_item(&mm->offset_hash, list);
538 free_unr(mm->idxunr, list->key);
539 obj->on_map = false;
540
541 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
542 }
543 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
544
545 /**
546 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
547 * @obj: obj in question
548 * @size: the virtual size
549 *
550 * GEM memory mapping works by handing back to userspace a fake mmap offset
551 * it can use in a subsequent mmap(2) call. The DRM core code then looks
552 * up the object based on the offset and sets up the various memory mapping
553 * structures.
554 *
555 * This routine allocates and attaches a fake offset for @obj, in cases where
556 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
557 * Otherwise just use drm_gem_create_mmap_offset().
558 *
559 * This function is idempotent and handles an already allocated mmap offset
560 * transparently. Drivers do not need to check for this case.
561 */
562 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)563 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
564 {
565 struct drm_device *dev = obj->dev;
566 struct drm_gem_mm *mm = dev->mm_private;
567 int ret = 0;
568
569 if (obj->on_map)
570 return (0);
571
572 obj->map_list.key = alloc_unr(mm->idxunr);
573 ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
574 if (ret != 0) {
575 DRM_ERROR("failed to add to map hash\n");
576 free_unr(mm->idxunr, obj->map_list.key);
577 return (ret);
578 }
579 obj->on_map = true;
580
581 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
582 size / PAGE_SIZE);
583 }
584 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
585
586 /**
587 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
588 * @obj: obj in question
589 *
590 * GEM memory mapping works by handing back to userspace a fake mmap offset
591 * it can use in a subsequent mmap(2) call. The DRM core code then looks
592 * up the object based on the offset and sets up the various memory mapping
593 * structures.
594 *
595 * This routine allocates and attaches a fake offset for @obj.
596 *
597 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
598 * the fake offset again.
599 */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)600 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
601 {
602 return drm_gem_create_mmap_offset_size(obj, obj->size);
603 }
604 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
605
606 /**
607 * drm_gem_object_lookup - look up a GEM object from it's handle
608 * @filp: DRM file private date
609 * @handle: userspace handle
610 *
611 * Returns:
612 *
613 * A reference to the object named by the handle if such exists on @filp, NULL
614 * otherwise.
615 */
616 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)617 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
618 {
619 struct drm_gem_object *obj;
620
621 lockmgr(&filp->table_lock, LK_EXCLUSIVE);
622
623 /* Check if we currently have a reference on the object */
624 obj = idr_find(&filp->object_idr, handle);
625 if (obj)
626 drm_gem_object_get(obj);
627
628 lockmgr(&filp->table_lock, LK_RELEASE);
629
630 return obj;
631 }
632 EXPORT_SYMBOL(drm_gem_object_lookup);
633
634 /**
635 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
636 * @dev: drm_device
637 * @data: ioctl data
638 * @file_priv: drm file-private structure
639 *
640 * Releases the handle to an mm object.
641 */
642 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)643 drm_gem_close_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv)
645 {
646 struct drm_gem_close *args = data;
647 int ret;
648
649 if (!drm_core_check_feature(dev, DRIVER_GEM))
650 return -ENODEV;
651
652 ret = drm_gem_handle_delete(file_priv, args->handle);
653
654 return ret;
655 }
656
657 /**
658 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
659 * @dev: drm_device
660 * @data: ioctl data
661 * @file_priv: drm file-private structure
662 *
663 * Create a global name for an object, returning the name.
664 *
665 * Note that the name does not hold a reference; when the object
666 * is freed, the name goes away.
667 */
668 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)669 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
670 struct drm_file *file_priv)
671 {
672 struct drm_gem_flink *args = data;
673 struct drm_gem_object *obj;
674 int ret;
675
676 if (!drm_core_check_feature(dev, DRIVER_GEM))
677 return -ENODEV;
678
679 obj = drm_gem_object_lookup(file_priv, args->handle);
680 if (obj == NULL)
681 return -ENOENT;
682
683 mutex_lock(&dev->object_name_lock);
684 /* prevent races with concurrent gem_close. */
685 if (obj->handle_count == 0) {
686 ret = -ENOENT;
687 goto err;
688 }
689
690 if (!obj->name) {
691 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
692 if (ret < 0)
693 goto err;
694
695 obj->name = ret;
696 }
697
698 args->name = (uint64_t) obj->name;
699 ret = 0;
700
701 err:
702 mutex_unlock(&dev->object_name_lock);
703 drm_gem_object_put_unlocked(obj);
704 return ret;
705 }
706
707 /**
708 * drm_gem_open - implementation of the GEM_OPEN ioctl
709 * @dev: drm_device
710 * @data: ioctl data
711 * @file_priv: drm file-private structure
712 *
713 * Open an object using the global name, returning a handle and the size.
714 *
715 * This handle (of course) holds a reference to the object, so the object
716 * will not go away until the handle is deleted.
717 */
718 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)719 drm_gem_open_ioctl(struct drm_device *dev, void *data,
720 struct drm_file *file_priv)
721 {
722 struct drm_gem_open *args = data;
723 struct drm_gem_object *obj;
724 int ret;
725 u32 handle;
726
727 if (!drm_core_check_feature(dev, DRIVER_GEM))
728 return -ENODEV;
729
730 mutex_lock(&dev->object_name_lock);
731 obj = idr_find(&dev->object_name_idr, (int) args->name);
732 if (obj) {
733 drm_gem_object_get(obj);
734 } else {
735 mutex_unlock(&dev->object_name_lock);
736 return -ENOENT;
737 }
738
739 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
740 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
741 drm_gem_object_put_unlocked(obj);
742 if (ret)
743 return ret;
744
745 args->handle = handle;
746 args->size = obj->size;
747
748 return 0;
749 }
750
751 /**
752 * gem_gem_open - initalizes GEM file-private structures at devnode open time
753 * @dev: drm_device which is being opened by userspace
754 * @file_private: drm file-private structure to set up
755 *
756 * Called at device open time, sets up the structure for handling refcounting
757 * of mm objects.
758 */
759 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)760 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
761 {
762 idr_init(&file_private->object_idr);
763 lockinit(&file_private->table_lock, "fptab", 0, 0);
764 }
765
766 /**
767 * drm_gem_release - release file-private GEM resources
768 * @dev: drm_device which is being closed by userspace
769 * @file_private: drm file-private structure to clean up
770 *
771 * Called at close time when the filp is going away.
772 *
773 * Releases any remaining references on objects by this filp.
774 */
775 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)776 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
777 {
778 idr_for_each(&file_private->object_idr,
779 &drm_gem_object_release_handle, file_private);
780 idr_destroy(&file_private->object_idr);
781 }
782
783 /**
784 * drm_gem_object_release - release GEM buffer object resources
785 * @obj: GEM buffer object
786 *
787 * This releases any structures and resources used by @obj and is the invers of
788 * drm_gem_object_init().
789 */
790 void
drm_gem_object_release(struct drm_gem_object * obj)791 drm_gem_object_release(struct drm_gem_object *obj)
792 {
793 WARN_ON(obj->dma_buf);
794
795 /*
796 * obj->vm_obj can be NULL for private gem objects.
797 */
798 vm_object_deallocate(obj->filp);
799
800 drm_gem_free_mmap_offset(obj);
801 }
802 EXPORT_SYMBOL(drm_gem_object_release);
803
804 /**
805 * drm_gem_object_free - free a GEM object
806 * @kref: kref of the object to free
807 *
808 * Called after the last reference to the object has been lost.
809 * Must be called holding &drm_device.struct_mutex.
810 *
811 * Frees the object
812 */
813 void
drm_gem_object_free(struct kref * kref)814 drm_gem_object_free(struct kref *kref)
815 {
816 struct drm_gem_object *obj =
817 container_of(kref, struct drm_gem_object, refcount);
818 struct drm_device *dev = obj->dev;
819
820 if (dev->driver->gem_free_object_unlocked) {
821 dev->driver->gem_free_object_unlocked(obj);
822 } else if (dev->driver->gem_free_object) {
823 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
824
825 dev->driver->gem_free_object(obj);
826 }
827 }
828 EXPORT_SYMBOL(drm_gem_object_free);
829
830 /**
831 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
832 * @obj: GEM buffer object
833 *
834 * This releases a reference to @obj. Callers must not hold the
835 * &drm_device.struct_mutex lock when calling this function.
836 *
837 * See also __drm_gem_object_put().
838 */
839 void
drm_gem_object_put_unlocked(struct drm_gem_object * obj)840 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
841 {
842 struct drm_device *dev;
843
844 if (!obj)
845 return;
846
847 dev = obj->dev;
848 might_lock(&dev->struct_mutex);
849
850 if (dev->driver->gem_free_object_unlocked)
851 kref_put(&obj->refcount, drm_gem_object_free);
852 else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
853 &dev->struct_mutex))
854 mutex_unlock(&dev->struct_mutex);
855 }
856 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
857
858 /**
859 * drm_gem_object_put - release a GEM buffer object reference
860 * @obj: GEM buffer object
861 *
862 * This releases a reference to @obj. Callers must hold the
863 * &drm_device.struct_mutex lock when calling this function, even when the
864 * driver doesn't use &drm_device.struct_mutex for anything.
865 *
866 * For drivers not encumbered with legacy locking use
867 * drm_gem_object_put_unlocked() instead.
868 */
869 void
drm_gem_object_put(struct drm_gem_object * obj)870 drm_gem_object_put(struct drm_gem_object *obj)
871 {
872 if (obj) {
873 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
874
875 kref_put(&obj->refcount, drm_gem_object_free);
876 }
877 }
878 EXPORT_SYMBOL(drm_gem_object_put);
879
880 /**
881 * drm_gem_vm_open - vma->ops->open implementation for GEM
882 * @vma: VM area structure
883 *
884 * This function implements the #vm_operations_struct open() callback for GEM
885 * drivers. This must be used together with drm_gem_vm_close().
886 */
drm_gem_vm_open(struct vm_area_struct * vma)887 void drm_gem_vm_open(struct vm_area_struct *vma)
888 {
889 struct drm_gem_object *obj = vma->vm_private_data;
890
891 drm_gem_object_get(obj);
892 }
893 EXPORT_SYMBOL(drm_gem_vm_open);
894
895 /**
896 * drm_gem_vm_close - vma->ops->close implementation for GEM
897 * @vma: VM area structure
898 *
899 * This function implements the #vm_operations_struct close() callback for GEM
900 * drivers. This must be used together with drm_gem_vm_open().
901 */
drm_gem_vm_close(struct vm_area_struct * vma)902 void drm_gem_vm_close(struct vm_area_struct *vma)
903 {
904 struct drm_gem_object *obj = vma->vm_private_data;
905
906 drm_gem_object_put_unlocked(obj);
907 }
908 EXPORT_SYMBOL(drm_gem_vm_close);
909
910 #if 0
911 /**
912 * drm_gem_mmap_obj - memory map a GEM object
913 * @obj: the GEM object to map
914 * @obj_size: the object size to be mapped, in bytes
915 * @vma: VMA for the area to be mapped
916 *
917 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
918 * provided by the driver. Depending on their requirements, drivers can either
919 * provide a fault handler in their gem_vm_ops (in which case any accesses to
920 * the object will be trapped, to perform migration, GTT binding, surface
921 * register allocation, or performance monitoring), or mmap the buffer memory
922 * synchronously after calling drm_gem_mmap_obj.
923 *
924 * This function is mainly intended to implement the DMABUF mmap operation, when
925 * the GEM object is not looked up based on its fake offset. To implement the
926 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
927 *
928 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
929 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
930 * callers must verify access restrictions before calling this helper.
931 *
932 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
933 * size, or if no gem_vm_ops are provided.
934 */
935 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
936 struct vm_area_struct *vma)
937 {
938 struct drm_device *dev = obj->dev;
939
940 /* Check for valid size. */
941 if (obj_size < vma->vm_end - vma->vm_start)
942 return -EINVAL;
943
944 if (!dev->driver->gem_vm_ops)
945 return -EINVAL;
946
947 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
948 vma->vm_ops = dev->driver->gem_vm_ops;
949 vma->vm_private_data = obj;
950 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
951 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
952
953 /* Take a ref for this mapping of the object, so that the fault
954 * handler can dereference the mmap offset's pointer to the object.
955 * This reference is cleaned up by the corresponding vm_close
956 * (which should happen whether the vma was created by this call, or
957 * by a vm_open due to mremap or partial unmap or whatever).
958 */
959 drm_gem_object_get(obj);
960
961 return 0;
962 }
963 EXPORT_SYMBOL(drm_gem_mmap_obj);
964
965 /**
966 * drm_gem_mmap - memory map routine for GEM objects
967 * @filp: DRM file pointer
968 * @vma: VMA for the area to be mapped
969 *
970 * If a driver supports GEM object mapping, mmap calls on the DRM file
971 * descriptor will end up here.
972 *
973 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
974 * contain the fake offset we created when the GTT map ioctl was called on
975 * the object) and map it with a call to drm_gem_mmap_obj().
976 *
977 * If the caller is not granted access to the buffer object, the mmap will fail
978 * with EACCES. Please see the vma manager for more information.
979 */
980 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
981 {
982 struct drm_file *priv = filp->private_data;
983 struct drm_device *dev = priv->minor->dev;
984 struct drm_gem_object *obj = NULL;
985 struct drm_vma_offset_node *node;
986 int ret;
987
988 if (drm_dev_is_unplugged(dev))
989 return -ENODEV;
990
991 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
992 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
993 vma->vm_pgoff,
994 vma_pages(vma));
995 if (likely(node)) {
996 obj = container_of(node, struct drm_gem_object, vma_node);
997 /*
998 * When the object is being freed, after it hits 0-refcnt it
999 * proceeds to tear down the object. In the process it will
1000 * attempt to remove the VMA offset and so acquire this
1001 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1002 * that matches our range, we know it is in the process of being
1003 * destroyed and will be freed as soon as we release the lock -
1004 * so we have to check for the 0-refcnted object and treat it as
1005 * invalid.
1006 */
1007 if (!kref_get_unless_zero(&obj->refcount))
1008 obj = NULL;
1009 }
1010 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1011
1012 if (!obj)
1013 return -EINVAL;
1014
1015 if (!drm_vma_node_is_allowed(node, priv)) {
1016 drm_gem_object_put_unlocked(obj);
1017 return -EACCES;
1018 }
1019
1020 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1021 vma);
1022
1023 drm_gem_object_put_unlocked(obj);
1024
1025 return ret;
1026 }
1027 EXPORT_SYMBOL(drm_gem_mmap);
1028 #endif
1029
1030 #ifdef __DragonFly__
1031 static struct drm_gem_object *
drm_gem_object_from_offset(struct drm_device * dev,vm_ooffset_t offset)1032 drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset)
1033 {
1034 struct drm_gem_object *obj;
1035 struct drm_gem_mm *mm = dev->mm_private;
1036 struct drm_hash_item *hash;
1037
1038 if ((offset & DRM_GEM_MAPPING_MASK) != DRM_GEM_MAPPING_KEY)
1039 return (NULL);
1040 offset &= ~DRM_GEM_MAPPING_KEY;
1041
1042 if (drm_ht_find_item(&mm->offset_hash, DRM_GEM_MAPPING_IDX(offset),
1043 &hash) != 0) {
1044 return (NULL);
1045 }
1046 obj = container_of(hash, struct drm_gem_object, map_list);
1047 return (obj);
1048 }
1049
1050 int
drm_gem_mmap_single(struct drm_device * dev,vm_ooffset_t * offset,vm_size_t size,struct vm_object ** obj_res,int nprot)1051 drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
1052 struct vm_object **obj_res, int nprot)
1053 {
1054 struct drm_gem_object *gem_obj;
1055 struct vm_object *vm_obj;
1056
1057 DRM_LOCK(dev);
1058 gem_obj = drm_gem_object_from_offset(dev, *offset);
1059 if (gem_obj == NULL) {
1060 DRM_UNLOCK(dev);
1061 return (ENODEV);
1062 }
1063
1064 drm_gem_object_reference(gem_obj);
1065 DRM_UNLOCK(dev);
1066 vm_obj = cdev_pager_allocate(gem_obj, OBJT_MGTDEVICE,
1067 dev->driver->gem_vm_ops, size, nprot,
1068 DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
1069 if (vm_obj == NULL) {
1070 drm_gem_object_unreference_unlocked(gem_obj);
1071 return (EINVAL);
1072 }
1073 *offset = DRM_GEM_MAPPING_MAPOFF(*offset);
1074 *obj_res = vm_obj;
1075 return (0);
1076 }
1077 #endif
1078