xref: /dragonfly/sys/dev/drm/drm_gem.c (revision 5ca0a96d)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 /*-
28  * Copyright (c) 2011 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * This software was developed by Konstantin Belousov under sponsorship from
32  * the FreeBSD Foundation.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55 
56 #ifdef __DragonFly__
57 #include "opt_vm.h"
58 #endif
59 
60 #include <linux/types.h>
61 #include <linux/slab.h>
62 #include <linux/mm.h>
63 #include <linux/uaccess.h>
64 #include <linux/fs.h>
65 #include <linux/file.h>
66 #include <linux/module.h>
67 #include <linux/mman.h>
68 #include <linux/pagemap.h>
69 #include <linux/shmem_fs.h>
70 #include <linux/dma-buf.h>
71 #include <drm/drmP.h>
72 #include <drm/drm_vma_manager.h>
73 #include <drm/drm_gem.h>
74 #include "drm_internal.h"
75 
76 #ifdef __DragonFly__
77 struct drm_gem_mm {
78 	struct drm_mm offset_manager;	/**< Offset mgmt for buffer objects */
79 	struct drm_open_hash offset_hash; /**< User token hash table for maps */
80 	struct unrhdr *idxunr;
81 };
82 #endif
83 
84 /** @file drm_gem.c
85  *
86  * This file provides some of the base ioctls and library routines for
87  * the graphics memory manager implemented by each device driver.
88  *
89  * Because various devices have different requirements in terms of
90  * synchronization and migration strategies, implementing that is left up to
91  * the driver, and all that the general API provides should be generic --
92  * allocating objects, reading/writing data with the cpu, freeing objects.
93  * Even there, platform-dependent optimizations for reading/writing data with
94  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
95  * the DRI2 implementation wants to have at least allocate/mmap be generic.
96  *
97  * The goal was to have swap-backed object allocation managed through
98  * struct file.  However, file descriptors as handles to a struct file have
99  * two major failings:
100  * - Process limits prevent more than 1024 or so being used at a time by
101  *   default.
102  * - Inability to allocate high fds will aggravate the X Server's select()
103  *   handling, and likely that of many GL client applications as well.
104  *
105  * This led to a plan of using our own integer IDs (called handles, following
106  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
107  * ioctls.  The objects themselves will still include the struct file so
108  * that we can transition to fds if the required kernel infrastructure shows
109  * up at a later date, and as our interface with shmfs for memory allocation.
110  */
111 
112 /*
113  * We make up offsets for buffer objects so we can recognize them at
114  * mmap time.
115  */
116 
117 /* pgoff in mmap is an unsigned long, so we need to make sure that
118  * the faked up offset will fit
119  */
120 
121 #if BITS_PER_LONG == 64
122 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
123 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
124 #else
125 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
126 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
127 #endif
128 
129 /**
130  * drm_gem_init - Initialize the GEM device fields
131  * @dev: drm_devic structure to initialize
132  */
133 int
134 drm_gem_init(struct drm_device *dev)
135 {
136 	struct drm_gem_mm *mm;
137 	struct drm_vma_offset_manager *vma_offset_manager;
138 
139 	lockinit(&dev->object_name_lock, "objnam", 0, LK_CANRECURSE);
140 	idr_init(&dev->object_name_idr);
141 
142 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
143 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
144 	if (!vma_offset_manager) {
145 		DRM_ERROR("out of memory\n");
146 		return -ENOMEM;
147 	}
148 
149 	dev->mm_private = mm;
150 
151 	if (drm_ht_create(&mm->offset_hash, 12)) {
152 		kfree(mm);
153 		return -ENOMEM;
154 	}
155 
156 	mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
157 	drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
158 		    DRM_FILE_PAGE_OFFSET_SIZE);
159 
160 	dev->vma_offset_manager = vma_offset_manager;
161 	drm_vma_offset_manager_init(vma_offset_manager,
162 				    DRM_FILE_PAGE_OFFSET_START,
163 				    DRM_FILE_PAGE_OFFSET_SIZE);
164 
165 	return 0;
166 }
167 
168 void
169 drm_gem_destroy(struct drm_device *dev)
170 {
171 	struct drm_gem_mm *mm = dev->mm_private;
172 
173 	drm_mm_takedown(&mm->offset_manager);
174 	drm_ht_remove(&mm->offset_hash);
175 	delete_unrhdr(mm->idxunr);
176 	kfree(mm);
177 	dev->mm_private = NULL;
178 
179 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
180 	kfree(dev->vma_offset_manager);
181 	dev->vma_offset_manager = NULL;
182 }
183 
184 /**
185  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
186  * @dev: drm_device the object should be initialized for
187  * @obj: drm_gem_object to initialize
188  * @size: object size
189  *
190  * Initialize an already allocated GEM object of the specified size with
191  * shmfs backing store.
192  */
193 int drm_gem_object_init(struct drm_device *dev,
194 			struct drm_gem_object *obj, size_t size)
195 {
196 	struct vm_object *filp;
197 
198 	drm_gem_private_object_init(dev, obj, size);
199 
200 	filp = default_pager_alloc(NULL, size,
201 	    VM_PROT_READ | VM_PROT_WRITE, 0);
202 
203 	obj->filp = filp;
204 
205 	return 0;
206 }
207 EXPORT_SYMBOL(drm_gem_object_init);
208 
209 /**
210  * drm_gem_private_object_init - initialize an allocated private GEM object
211  * @dev: drm_device the object should be initialized for
212  * @obj: drm_gem_object to initialize
213  * @size: object size
214  *
215  * Initialize an already allocated GEM object of the specified size with
216  * no GEM provided backing store. Instead the caller is responsible for
217  * backing the object and handling it.
218  */
219 void drm_gem_private_object_init(struct drm_device *dev,
220 				 struct drm_gem_object *obj, size_t size)
221 {
222 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
223 
224 	obj->dev = dev;
225 	obj->filp = NULL;
226 
227 	kref_init(&obj->refcount);
228 	obj->handle_count = 0;
229 	obj->size = size;
230 	drm_vma_node_reset(&obj->vma_node);
231 }
232 EXPORT_SYMBOL(drm_gem_private_object_init);
233 
234 static void
235 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
236 {
237 	/*
238 	 * Note: obj->dma_buf can't disappear as long as we still hold a
239 	 * handle reference in obj->handle_count.
240 	 */
241 	mutex_lock(&filp->prime.lock);
242 	if (obj->dma_buf) {
243 		drm_prime_remove_buf_handle_locked(&filp->prime,
244 						   obj->dma_buf);
245 	}
246 	mutex_unlock(&filp->prime.lock);
247 }
248 
249 /**
250  * drm_gem_object_handle_free - release resources bound to userspace handles
251  * @obj: GEM object to clean up.
252  *
253  * Called after the last handle to the object has been closed
254  *
255  * Removes any name for the object. Note that this must be
256  * called before drm_gem_object_free or we'll be touching
257  * freed memory
258  */
259 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
260 {
261 	struct drm_device *dev = obj->dev;
262 
263 	/* Remove any name for this object */
264 	if (obj->name) {
265 		idr_remove(&dev->object_name_idr, obj->name);
266 		obj->name = 0;
267 	}
268 }
269 
270 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
271 {
272 	/* Unbreak the reference cycle if we have an exported dma_buf. */
273 	if (obj->dma_buf) {
274 		dma_buf_put(obj->dma_buf);
275 		obj->dma_buf = NULL;
276 	}
277 }
278 
279 static void
280 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
281 {
282 	struct drm_device *dev = obj->dev;
283 	bool final = false;
284 
285 	if (WARN_ON(obj->handle_count == 0))
286 		return;
287 
288 	/*
289 	* Must bump handle count first as this may be the last
290 	* ref, in which case the object would disappear before we
291 	* checked for a name
292 	*/
293 
294 	mutex_lock(&dev->object_name_lock);
295 	if (--obj->handle_count == 0) {
296 		drm_gem_object_handle_free(obj);
297 		drm_gem_object_exported_dma_buf_free(obj);
298 		final = true;
299 	}
300 	mutex_unlock(&dev->object_name_lock);
301 
302 	if (final)
303 		drm_gem_object_put_unlocked(obj);
304 }
305 
306 /*
307  * Called at device or object close to release the file's
308  * handle references on objects.
309  */
310 static int
311 drm_gem_object_release_handle(int id, void *ptr, void *data)
312 {
313 	struct drm_file *file_priv = data;
314 	struct drm_gem_object *obj = ptr;
315 	struct drm_device *dev = obj->dev;
316 
317 	if (dev->driver->gem_close_object)
318 		dev->driver->gem_close_object(obj, file_priv);
319 
320 	if (drm_core_check_feature(dev, DRIVER_PRIME))
321 		drm_gem_remove_prime_handles(obj, file_priv);
322 	drm_vma_node_revoke(&obj->vma_node, file_priv);
323 
324 	drm_gem_object_handle_put_unlocked(obj);
325 
326 	return 0;
327 }
328 
329 /**
330  * drm_gem_handle_delete - deletes the given file-private handle
331  * @filp: drm file-private structure to use for the handle look up
332  * @handle: userspace handle to delete
333  *
334  * Removes the GEM handle from the @filp lookup table which has been added with
335  * drm_gem_handle_create(). If this is the last handle also cleans up linked
336  * resources like GEM names.
337  */
338 int
339 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
340 {
341 	struct drm_gem_object *obj;
342 
343 	/* This is gross. The idr system doesn't let us try a delete and
344 	 * return an error code.  It just spews if you fail at deleting.
345 	 * So, we have to grab a lock around finding the object and then
346 	 * doing the delete on it and dropping the refcount, or the user
347 	 * could race us to double-decrement the refcount and cause a
348 	 * use-after-free later.  Given the frequency of our handle lookups,
349 	 * we may want to use ida for number allocation and a hash table
350 	 * for the pointers, anyway.
351 	 */
352 	lockmgr(&filp->table_lock, LK_EXCLUSIVE);
353 
354 	/* Check if we currently have a reference on the object */
355 	obj = idr_replace(&filp->object_idr, NULL, handle);
356 	lockmgr(&filp->table_lock, LK_RELEASE);
357 	if (IS_ERR_OR_NULL(obj))
358 		return -EINVAL;
359 
360 	/* Release driver's reference and decrement refcount. */
361 	drm_gem_object_release_handle(handle, obj, filp);
362 
363 	/* And finally make the handle available for future allocations. */
364 	lockmgr(&filp->table_lock, LK_EXCLUSIVE);
365 	idr_remove(&filp->object_idr, handle);
366 	lockmgr(&filp->table_lock, LK_RELEASE);
367 
368 	return 0;
369 }
370 EXPORT_SYMBOL(drm_gem_handle_delete);
371 
372 /**
373  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
374  * @file: drm file-private structure containing the gem object
375  * @dev: corresponding drm_device
376  * @handle: gem object handle
377  * @offset: return location for the fake mmap offset
378  *
379  * This implements the &drm_driver.dumb_map_offset kms driver callback for
380  * drivers which use gem to manage their backing storage.
381  *
382  * Returns:
383  * 0 on success or a negative error code on failure.
384  */
385 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
386 			    u32 handle, u64 *offset)
387 {
388 	struct drm_gem_object *obj;
389 	int ret;
390 
391 	obj = drm_gem_object_lookup(file, handle);
392 	if (!obj)
393 		return -ENOENT;
394 
395 	/* Don't allow imported objects to be mapped */
396 	if (obj->import_attach) {
397 		ret = -EINVAL;
398 		goto out;
399 	}
400 
401 	ret = drm_gem_create_mmap_offset(obj);
402 	if (ret)
403 		goto out;
404 
405 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
406 out:
407 	drm_gem_object_put_unlocked(obj);
408 
409 	return ret;
410 }
411 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
412 
413 /**
414  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
415  * @file: drm file-private structure to remove the dumb handle from
416  * @dev: corresponding drm_device
417  * @handle: the dumb handle to remove
418  *
419  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
420  * which use gem to manage their backing storage.
421  */
422 int drm_gem_dumb_destroy(struct drm_file *file,
423 			 struct drm_device *dev,
424 			 uint32_t handle)
425 {
426 	return drm_gem_handle_delete(file, handle);
427 }
428 EXPORT_SYMBOL(drm_gem_dumb_destroy);
429 
430 /**
431  * drm_gem_handle_create_tail - internal functions to create a handle
432  * @file_priv: drm file-private structure to register the handle for
433  * @obj: object to register
434  * @handlep: pointer to return the created handle to the caller
435  *
436  * This expects the &drm_device.object_name_lock to be held already and will
437  * drop it before returning. Used to avoid races in establishing new handles
438  * when importing an object from either an flink name or a dma-buf.
439  *
440  * Handles must be release again through drm_gem_handle_delete(). This is done
441  * when userspace closes @file_priv for all attached handles, or through the
442  * GEM_CLOSE ioctl for individual handles.
443  */
444 int
445 drm_gem_handle_create_tail(struct drm_file *file_priv,
446 			   struct drm_gem_object *obj,
447 			   u32 *handlep)
448 {
449 	struct drm_device *dev = obj->dev;
450 	u32 handle;
451 	int ret;
452 
453 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
454 	if (obj->handle_count++ == 0)
455 		drm_gem_object_get(obj);
456 
457 	/*
458 	 * Get the user-visible handle using idr.  Preload and perform
459 	 * allocation under our spinlock.
460 	 */
461 	idr_preload(GFP_KERNEL);
462 	lockmgr(&file_priv->table_lock, LK_EXCLUSIVE);
463 
464 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
465 
466 	lockmgr(&file_priv->table_lock, LK_RELEASE);
467 	idr_preload_end();
468 
469 	mutex_unlock(&dev->object_name_lock);
470 	if (ret < 0)
471 		goto err_unref;
472 
473 	handle = ret;
474 
475 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
476 	if (ret)
477 		goto err_remove;
478 
479 	if (dev->driver->gem_open_object) {
480 		ret = dev->driver->gem_open_object(obj, file_priv);
481 		if (ret)
482 			goto err_revoke;
483 	}
484 
485 	*handlep = handle;
486 	return 0;
487 
488 err_revoke:
489 	drm_vma_node_revoke(&obj->vma_node, file_priv);
490 err_remove:
491 	lockmgr(&file_priv->table_lock, LK_EXCLUSIVE);
492 	idr_remove(&file_priv->object_idr, handle);
493 	lockmgr(&file_priv->table_lock, LK_RELEASE);
494 err_unref:
495 	drm_gem_object_handle_put_unlocked(obj);
496 	return ret;
497 }
498 
499 /**
500  * drm_gem_handle_create - create a gem handle for an object
501  * @file_priv: drm file-private structure to register the handle for
502  * @obj: object to register
503  * @handlep: pionter to return the created handle to the caller
504  *
505  * Create a handle for this object. This adds a handle reference
506  * to the object, which includes a regular reference count. Callers
507  * will likely want to dereference the object afterwards.
508  */
509 int drm_gem_handle_create(struct drm_file *file_priv,
510 			  struct drm_gem_object *obj,
511 			  u32 *handlep)
512 {
513 	mutex_lock(&obj->dev->object_name_lock);
514 
515 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
516 }
517 EXPORT_SYMBOL(drm_gem_handle_create);
518 
519 /**
520  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
521  * @obj: obj in question
522  *
523  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
524  *
525  * Note that drm_gem_object_release() already calls this function, so drivers
526  * don't have to take care of releasing the mmap offset themselves when freeing
527  * the GEM object.
528  */
529 void
530 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
531 {
532 	struct drm_device *dev = obj->dev;
533 
534 	struct drm_gem_mm *mm = dev->mm_private;
535 	struct drm_hash_item *list;
536 
537 	if (!obj->on_map)
538 		return;
539 	list = &obj->map_list;
540 
541 	drm_ht_remove_item(&mm->offset_hash, list);
542 	free_unr(mm->idxunr, list->key);
543 	obj->on_map = false;
544 
545 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
546 }
547 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
548 
549 /**
550  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
551  * @obj: obj in question
552  * @size: the virtual size
553  *
554  * GEM memory mapping works by handing back to userspace a fake mmap offset
555  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
556  * up the object based on the offset and sets up the various memory mapping
557  * structures.
558  *
559  * This routine allocates and attaches a fake offset for @obj, in cases where
560  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
561  * Otherwise just use drm_gem_create_mmap_offset().
562  *
563  * This function is idempotent and handles an already allocated mmap offset
564  * transparently. Drivers do not need to check for this case.
565  */
566 int
567 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
568 {
569 	struct drm_device *dev = obj->dev;
570 	struct drm_gem_mm *mm = dev->mm_private;
571 	int ret = 0;
572 
573 	if (obj->on_map)
574 		return (0);
575 
576 	obj->map_list.key = alloc_unr(mm->idxunr);
577 	ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
578 	if (ret != 0) {
579 		DRM_ERROR("failed to add to map hash\n");
580 		free_unr(mm->idxunr, obj->map_list.key);
581 		return (ret);
582 	}
583 	obj->on_map = true;
584 
585 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
586 				  size / PAGE_SIZE);
587 }
588 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
589 
590 /**
591  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
592  * @obj: obj in question
593  *
594  * GEM memory mapping works by handing back to userspace a fake mmap offset
595  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
596  * up the object based on the offset and sets up the various memory mapping
597  * structures.
598  *
599  * This routine allocates and attaches a fake offset for @obj.
600  *
601  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
602  * the fake offset again.
603  */
604 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
605 {
606 	return drm_gem_create_mmap_offset_size(obj, obj->size);
607 }
608 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
609 
610 /**
611  * drm_gem_object_lookup - look up a GEM object from it's handle
612  * @filp: DRM file private date
613  * @handle: userspace handle
614  *
615  * Returns:
616  *
617  * A reference to the object named by the handle if such exists on @filp, NULL
618  * otherwise.
619  */
620 struct drm_gem_object *
621 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
622 {
623 	struct drm_gem_object *obj;
624 
625 	lockmgr(&filp->table_lock, LK_EXCLUSIVE);
626 
627 	/* Check if we currently have a reference on the object */
628 	obj = idr_find(&filp->object_idr, handle);
629 	if (obj)
630 		drm_gem_object_get(obj);
631 
632 	lockmgr(&filp->table_lock, LK_RELEASE);
633 
634 	return obj;
635 }
636 EXPORT_SYMBOL(drm_gem_object_lookup);
637 
638 /**
639  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
640  * @dev: drm_device
641  * @data: ioctl data
642  * @file_priv: drm file-private structure
643  *
644  * Releases the handle to an mm object.
645  */
646 int
647 drm_gem_close_ioctl(struct drm_device *dev, void *data,
648 		    struct drm_file *file_priv)
649 {
650 	struct drm_gem_close *args = data;
651 	int ret;
652 
653 	if (!drm_core_check_feature(dev, DRIVER_GEM))
654 		return -ENODEV;
655 
656 	ret = drm_gem_handle_delete(file_priv, args->handle);
657 
658 	return ret;
659 }
660 
661 /**
662  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
663  * @dev: drm_device
664  * @data: ioctl data
665  * @file_priv: drm file-private structure
666  *
667  * Create a global name for an object, returning the name.
668  *
669  * Note that the name does not hold a reference; when the object
670  * is freed, the name goes away.
671  */
672 int
673 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
674 		    struct drm_file *file_priv)
675 {
676 	struct drm_gem_flink *args = data;
677 	struct drm_gem_object *obj;
678 	int ret;
679 
680 	if (!drm_core_check_feature(dev, DRIVER_GEM))
681 		return -ENODEV;
682 
683 	obj = drm_gem_object_lookup(file_priv, args->handle);
684 	if (obj == NULL)
685 		return -ENOENT;
686 
687 	mutex_lock(&dev->object_name_lock);
688 	/* prevent races with concurrent gem_close. */
689 	if (obj->handle_count == 0) {
690 		ret = -ENOENT;
691 		goto err;
692 	}
693 
694 	if (!obj->name) {
695 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
696 		if (ret < 0)
697 			goto err;
698 
699 		obj->name = ret;
700 	}
701 
702 	args->name = (uint64_t) obj->name;
703 	ret = 0;
704 
705 err:
706 	mutex_unlock(&dev->object_name_lock);
707 	drm_gem_object_put_unlocked(obj);
708 	return ret;
709 }
710 
711 /**
712  * drm_gem_open - implementation of the GEM_OPEN ioctl
713  * @dev: drm_device
714  * @data: ioctl data
715  * @file_priv: drm file-private structure
716  *
717  * Open an object using the global name, returning a handle and the size.
718  *
719  * This handle (of course) holds a reference to the object, so the object
720  * will not go away until the handle is deleted.
721  */
722 int
723 drm_gem_open_ioctl(struct drm_device *dev, void *data,
724 		   struct drm_file *file_priv)
725 {
726 	struct drm_gem_open *args = data;
727 	struct drm_gem_object *obj;
728 	int ret;
729 	u32 handle;
730 
731 	if (!drm_core_check_feature(dev, DRIVER_GEM))
732 		return -ENODEV;
733 
734 	mutex_lock(&dev->object_name_lock);
735 	obj = idr_find(&dev->object_name_idr, (int) args->name);
736 	if (obj) {
737 		drm_gem_object_get(obj);
738 	} else {
739 		mutex_unlock(&dev->object_name_lock);
740 		return -ENOENT;
741 	}
742 
743 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
744 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
745 	drm_gem_object_put_unlocked(obj);
746 	if (ret)
747 		return ret;
748 
749 	args->handle = handle;
750 	args->size = obj->size;
751 
752 	return 0;
753 }
754 
755 /**
756  * gem_gem_open - initalizes GEM file-private structures at devnode open time
757  * @dev: drm_device which is being opened by userspace
758  * @file_private: drm file-private structure to set up
759  *
760  * Called at device open time, sets up the structure for handling refcounting
761  * of mm objects.
762  */
763 void
764 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
765 {
766 	idr_init(&file_private->object_idr);
767 	lockinit(&file_private->table_lock, "fptab", 0, 0);
768 }
769 
770 /**
771  * drm_gem_release - release file-private GEM resources
772  * @dev: drm_device which is being closed by userspace
773  * @file_private: drm file-private structure to clean up
774  *
775  * Called at close time when the filp is going away.
776  *
777  * Releases any remaining references on objects by this filp.
778  */
779 void
780 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
781 {
782 	idr_for_each(&file_private->object_idr,
783 		     &drm_gem_object_release_handle, file_private);
784 	idr_destroy(&file_private->object_idr);
785 }
786 
787 /**
788  * drm_gem_object_release - release GEM buffer object resources
789  * @obj: GEM buffer object
790  *
791  * This releases any structures and resources used by @obj and is the invers of
792  * drm_gem_object_init().
793  */
794 void
795 drm_gem_object_release(struct drm_gem_object *obj)
796 {
797 	WARN_ON(obj->dma_buf);
798 
799 	/*
800 	 * obj->vm_obj can be NULL for private gem objects.
801 	 */
802 	vm_object_deallocate(obj->filp);
803 
804 	drm_gem_free_mmap_offset(obj);
805 }
806 EXPORT_SYMBOL(drm_gem_object_release);
807 
808 /**
809  * drm_gem_object_free - free a GEM object
810  * @kref: kref of the object to free
811  *
812  * Called after the last reference to the object has been lost.
813  * Must be called holding &drm_device.struct_mutex.
814  *
815  * Frees the object
816  */
817 void
818 drm_gem_object_free(struct kref *kref)
819 {
820 	struct drm_gem_object *obj =
821 		container_of(kref, struct drm_gem_object, refcount);
822 	struct drm_device *dev = obj->dev;
823 
824 	if (dev->driver->gem_free_object_unlocked) {
825 		dev->driver->gem_free_object_unlocked(obj);
826 	} else if (dev->driver->gem_free_object) {
827 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
828 
829 		dev->driver->gem_free_object(obj);
830 	}
831 }
832 EXPORT_SYMBOL(drm_gem_object_free);
833 
834 /**
835  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
836  * @obj: GEM buffer object
837  *
838  * This releases a reference to @obj. Callers must not hold the
839  * &drm_device.struct_mutex lock when calling this function.
840  *
841  * See also __drm_gem_object_put().
842  */
843 void
844 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
845 {
846 	struct drm_device *dev;
847 
848 	if (!obj)
849 		return;
850 
851 	dev = obj->dev;
852 	might_lock(&dev->struct_mutex);
853 
854 	if (dev->driver->gem_free_object_unlocked)
855 		kref_put(&obj->refcount, drm_gem_object_free);
856 	else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
857 				&dev->struct_mutex))
858 		mutex_unlock(&dev->struct_mutex);
859 }
860 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
861 
862 /**
863  * drm_gem_object_put - release a GEM buffer object reference
864  * @obj: GEM buffer object
865  *
866  * This releases a reference to @obj. Callers must hold the
867  * &drm_device.struct_mutex lock when calling this function, even when the
868  * driver doesn't use &drm_device.struct_mutex for anything.
869  *
870  * For drivers not encumbered with legacy locking use
871  * drm_gem_object_put_unlocked() instead.
872  */
873 void
874 drm_gem_object_put(struct drm_gem_object *obj)
875 {
876 	if (obj) {
877 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
878 
879 		kref_put(&obj->refcount, drm_gem_object_free);
880 	}
881 }
882 EXPORT_SYMBOL(drm_gem_object_put);
883 
884 /**
885  * drm_gem_vm_open - vma->ops->open implementation for GEM
886  * @vma: VM area structure
887  *
888  * This function implements the #vm_operations_struct open() callback for GEM
889  * drivers. This must be used together with drm_gem_vm_close().
890  */
891 void drm_gem_vm_open(struct vm_area_struct *vma)
892 {
893 	struct drm_gem_object *obj = vma->vm_private_data;
894 
895 	drm_gem_object_get(obj);
896 }
897 EXPORT_SYMBOL(drm_gem_vm_open);
898 
899 /**
900  * drm_gem_vm_close - vma->ops->close implementation for GEM
901  * @vma: VM area structure
902  *
903  * This function implements the #vm_operations_struct close() callback for GEM
904  * drivers. This must be used together with drm_gem_vm_open().
905  */
906 void drm_gem_vm_close(struct vm_area_struct *vma)
907 {
908 	struct drm_gem_object *obj = vma->vm_private_data;
909 
910 	drm_gem_object_put_unlocked(obj);
911 }
912 EXPORT_SYMBOL(drm_gem_vm_close);
913 
914 #if 0
915 /**
916  * drm_gem_mmap_obj - memory map a GEM object
917  * @obj: the GEM object to map
918  * @obj_size: the object size to be mapped, in bytes
919  * @vma: VMA for the area to be mapped
920  *
921  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
922  * provided by the driver. Depending on their requirements, drivers can either
923  * provide a fault handler in their gem_vm_ops (in which case any accesses to
924  * the object will be trapped, to perform migration, GTT binding, surface
925  * register allocation, or performance monitoring), or mmap the buffer memory
926  * synchronously after calling drm_gem_mmap_obj.
927  *
928  * This function is mainly intended to implement the DMABUF mmap operation, when
929  * the GEM object is not looked up based on its fake offset. To implement the
930  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
931  *
932  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
933  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
934  * callers must verify access restrictions before calling this helper.
935  *
936  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
937  * size, or if no gem_vm_ops are provided.
938  */
939 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
940 		     struct vm_area_struct *vma)
941 {
942 	struct drm_device *dev = obj->dev;
943 
944 	/* Check for valid size. */
945 	if (obj_size < vma->vm_end - vma->vm_start)
946 		return -EINVAL;
947 
948 	if (!dev->driver->gem_vm_ops)
949 		return -EINVAL;
950 
951 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
952 	vma->vm_ops = dev->driver->gem_vm_ops;
953 	vma->vm_private_data = obj;
954 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
955 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
956 
957 	/* Take a ref for this mapping of the object, so that the fault
958 	 * handler can dereference the mmap offset's pointer to the object.
959 	 * This reference is cleaned up by the corresponding vm_close
960 	 * (which should happen whether the vma was created by this call, or
961 	 * by a vm_open due to mremap or partial unmap or whatever).
962 	 */
963 	drm_gem_object_get(obj);
964 
965 	return 0;
966 }
967 EXPORT_SYMBOL(drm_gem_mmap_obj);
968 
969 /**
970  * drm_gem_mmap - memory map routine for GEM objects
971  * @filp: DRM file pointer
972  * @vma: VMA for the area to be mapped
973  *
974  * If a driver supports GEM object mapping, mmap calls on the DRM file
975  * descriptor will end up here.
976  *
977  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
978  * contain the fake offset we created when the GTT map ioctl was called on
979  * the object) and map it with a call to drm_gem_mmap_obj().
980  *
981  * If the caller is not granted access to the buffer object, the mmap will fail
982  * with EACCES. Please see the vma manager for more information.
983  */
984 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
985 {
986 	struct drm_file *priv = filp->private_data;
987 	struct drm_device *dev = priv->minor->dev;
988 	struct drm_gem_object *obj = NULL;
989 	struct drm_vma_offset_node *node;
990 	int ret;
991 
992 	if (drm_dev_is_unplugged(dev))
993 		return -ENODEV;
994 
995 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
996 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
997 						  vma->vm_pgoff,
998 						  vma_pages(vma));
999 	if (likely(node)) {
1000 		obj = container_of(node, struct drm_gem_object, vma_node);
1001 		/*
1002 		 * When the object is being freed, after it hits 0-refcnt it
1003 		 * proceeds to tear down the object. In the process it will
1004 		 * attempt to remove the VMA offset and so acquire this
1005 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1006 		 * that matches our range, we know it is in the process of being
1007 		 * destroyed and will be freed as soon as we release the lock -
1008 		 * so we have to check for the 0-refcnted object and treat it as
1009 		 * invalid.
1010 		 */
1011 		if (!kref_get_unless_zero(&obj->refcount))
1012 			obj = NULL;
1013 	}
1014 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1015 
1016 	if (!obj)
1017 		return -EINVAL;
1018 
1019 	if (!drm_vma_node_is_allowed(node, priv)) {
1020 		drm_gem_object_put_unlocked(obj);
1021 		return -EACCES;
1022 	}
1023 
1024 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1025 			       vma);
1026 
1027 	drm_gem_object_put_unlocked(obj);
1028 
1029 	return ret;
1030 }
1031 EXPORT_SYMBOL(drm_gem_mmap);
1032 #endif
1033 
1034 #ifdef __DragonFly__
1035 static struct drm_gem_object *
1036 drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset)
1037 {
1038 	struct drm_gem_object *obj;
1039 	struct drm_gem_mm *mm = dev->mm_private;
1040 	struct drm_hash_item *hash;
1041 
1042 	if ((offset & DRM_GEM_MAPPING_MASK) != DRM_GEM_MAPPING_KEY)
1043 		return (NULL);
1044 	offset &= ~DRM_GEM_MAPPING_KEY;
1045 
1046 	if (drm_ht_find_item(&mm->offset_hash, DRM_GEM_MAPPING_IDX(offset),
1047 	    &hash) != 0) {
1048 		return (NULL);
1049 	}
1050 	obj = container_of(hash, struct drm_gem_object, map_list);
1051 	return (obj);
1052 }
1053 
1054 int
1055 drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
1056     struct vm_object **obj_res, int nprot)
1057 {
1058 	struct drm_gem_object *gem_obj;
1059 	struct vm_object *vm_obj;
1060 
1061 	DRM_LOCK(dev);
1062 	gem_obj = drm_gem_object_from_offset(dev, *offset);
1063 	if (gem_obj == NULL) {
1064 		DRM_UNLOCK(dev);
1065 		return (ENODEV);
1066 	}
1067 
1068 	drm_gem_object_reference(gem_obj);
1069 	DRM_UNLOCK(dev);
1070 	vm_obj = cdev_pager_allocate(gem_obj, OBJT_MGTDEVICE,
1071 	    dev->driver->gem_vm_ops, size, nprot,
1072 	    DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
1073 	if (vm_obj == NULL) {
1074 		drm_gem_object_unreference_unlocked(gem_obj);
1075 		return (EINVAL);
1076 	}
1077 	*offset = DRM_GEM_MAPPING_MAPOFF(*offset);
1078 	*obj_res = vm_obj;
1079 	return (0);
1080 }
1081 #endif
1082