xref: /openbsd/sys/dev/pci/drm/drm_gem.c (revision 097a140d)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41 
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_vma_manager.h>
49 
50 #include "drm_internal.h"
51 
52 #include <sys/conf.h>
53 #include <uvm/uvm.h>
54 
55 void drm_unref(struct uvm_object *);
56 void drm_ref(struct uvm_object *);
57 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
58 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
59     vm_fault_t, vm_prot_t, int);
60 
61 const struct uvm_pagerops drm_pgops = {
62 	.pgo_reference = drm_ref,
63 	.pgo_detach = drm_unref,
64 	.pgo_fault = drm_fault,
65 	.pgo_flush = drm_flush,
66 };
67 
68 void
69 drm_ref(struct uvm_object *uobj)
70 {
71 	struct drm_gem_object *obj =
72 	    container_of(uobj, struct drm_gem_object, uobj);
73 
74 	drm_gem_object_get(obj);
75 }
76 
77 void
78 drm_unref(struct uvm_object *uobj)
79 {
80 	struct drm_gem_object *obj =
81 	    container_of(uobj, struct drm_gem_object, uobj);
82 
83 	drm_gem_object_put_unlocked(obj);
84 }
85 
86 int
87 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
88     int npages, int centeridx, vm_fault_t fault_type,
89     vm_prot_t access_type, int flags)
90 {
91 	struct vm_map_entry *entry = ufi->entry;
92 	struct uvm_object *uobj = entry->object.uvm_obj;
93 	struct drm_gem_object *obj =
94 	    container_of(uobj, struct drm_gem_object, uobj);
95 	struct drm_device *dev = obj->dev;
96 	int ret;
97 
98 	/*
99 	 * we do not allow device mappings to be mapped copy-on-write
100 	 * so we kill any attempt to do so here.
101 	 */
102 
103 	if (UVM_ET_ISCOPYONWRITE(entry)) {
104 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
105 		return(VM_PAGER_ERROR);
106 	}
107 
108 	/*
109 	 * We could end up here as the result of a copyin(9) or
110 	 * copyout(9) while handling an ioctl.  So we must be careful
111 	 * not to deadlock.  Therefore we only block if the quiesce
112 	 * count is zero, which guarantees we didn't enter from within
113 	 * an ioctl code path.
114 	 */
115 	mtx_enter(&dev->quiesce_mtx);
116 	if (dev->quiesce && dev->quiesce_count == 0) {
117 		mtx_leave(&dev->quiesce_mtx);
118 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
119 		mtx_enter(&dev->quiesce_mtx);
120 		while (dev->quiesce) {
121 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
122 			    PZERO, "drmflt", INFSLP);
123 		}
124 		mtx_leave(&dev->quiesce_mtx);
125 		return(VM_PAGER_REFAULT);
126 	}
127 	dev->quiesce_count++;
128 	mtx_leave(&dev->quiesce_mtx);
129 
130 	/* Call down into driver to do the magic */
131 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
132 	    entry->start), vaddr, pps, npages, centeridx,
133 	    access_type, flags);
134 
135 	mtx_enter(&dev->quiesce_mtx);
136 	dev->quiesce_count--;
137 	if (dev->quiesce)
138 		wakeup(&dev->quiesce_count);
139 	mtx_leave(&dev->quiesce_mtx);
140 
141 	return (ret);
142 }
143 
144 boolean_t
145 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
146 {
147 	return (TRUE);
148 }
149 
150 struct uvm_object *
151 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
152 {
153 	struct drm_device *dev = drm_get_device_from_kdev(device);
154 	struct drm_gem_object *obj = NULL;
155 	struct drm_vma_offset_node *node;
156 	struct drm_file *priv;
157 	struct file *filp;
158 
159 	if (cdevsw[major(device)].d_mmap != drmmmap)
160 		return NULL;
161 
162 	if (dev == NULL)
163 		return NULL;
164 
165 	mutex_lock(&dev->filelist_mutex);
166 	priv = drm_find_file_by_minor(dev, minor(device));
167 	if (priv == NULL) {
168 		mutex_unlock(&dev->filelist_mutex);
169 		return NULL;
170 	}
171 	filp = priv->filp;
172 	mutex_unlock(&dev->filelist_mutex);
173 
174 	if (dev->driver->mmap)
175 		return dev->driver->mmap(filp, accessprot, off, size);
176 
177 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
178 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
179 						  off >> PAGE_SHIFT,
180 						  atop(round_page(size)));
181 	if (likely(node)) {
182 		obj = container_of(node, struct drm_gem_object, vma_node);
183 		/*
184 		 * When the object is being freed, after it hits 0-refcnt it
185 		 * proceeds to tear down the object. In the process it will
186 		 * attempt to remove the VMA offset and so acquire this
187 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
188 		 * that matches our range, we know it is in the process of being
189 		 * destroyed and will be freed as soon as we release the lock -
190 		 * so we have to check for the 0-refcnted object and treat it as
191 		 * invalid.
192 		 */
193 		if (!kref_get_unless_zero(&obj->refcount))
194 			obj = NULL;
195 	}
196 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
197 
198 	if (!obj)
199 		return NULL;
200 
201 	if (!drm_vma_node_is_allowed(node, priv)) {
202 		drm_gem_object_put_unlocked(obj);
203 		return NULL;
204 	}
205 
206 	if (node->readonly) {
207 		if (accessprot & PROT_WRITE) {
208 			drm_gem_object_put_unlocked(obj);
209 			return NULL;
210 		}
211 	}
212 
213 	return &obj->uobj;
214 }
215 
216 /** @file drm_gem.c
217  *
218  * This file provides some of the base ioctls and library routines for
219  * the graphics memory manager implemented by each device driver.
220  *
221  * Because various devices have different requirements in terms of
222  * synchronization and migration strategies, implementing that is left up to
223  * the driver, and all that the general API provides should be generic --
224  * allocating objects, reading/writing data with the cpu, freeing objects.
225  * Even there, platform-dependent optimizations for reading/writing data with
226  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
227  * the DRI2 implementation wants to have at least allocate/mmap be generic.
228  *
229  * The goal was to have swap-backed object allocation managed through
230  * struct file.  However, file descriptors as handles to a struct file have
231  * two major failings:
232  * - Process limits prevent more than 1024 or so being used at a time by
233  *   default.
234  * - Inability to allocate high fds will aggravate the X Server's select()
235  *   handling, and likely that of many GL client applications as well.
236  *
237  * This led to a plan of using our own integer IDs (called handles, following
238  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
239  * ioctls.  The objects themselves will still include the struct file so
240  * that we can transition to fds if the required kernel infrastructure shows
241  * up at a later date, and as our interface with shmfs for memory allocation.
242  */
243 
244 /**
245  * drm_gem_init - Initialize the GEM device fields
246  * @dev: drm_devic structure to initialize
247  */
248 int
249 drm_gem_init(struct drm_device *dev)
250 {
251 	struct drm_vma_offset_manager *vma_offset_manager;
252 
253 	rw_init(&dev->object_name_lock, "drmonl");
254 	idr_init_base(&dev->object_name_idr, 1);
255 
256 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
257 	if (!vma_offset_manager) {
258 		DRM_ERROR("out of memory\n");
259 		return -ENOMEM;
260 	}
261 
262 	dev->vma_offset_manager = vma_offset_manager;
263 	drm_vma_offset_manager_init(vma_offset_manager,
264 				    DRM_FILE_PAGE_OFFSET_START,
265 				    DRM_FILE_PAGE_OFFSET_SIZE);
266 
267 	return 0;
268 }
269 
270 void
271 drm_gem_destroy(struct drm_device *dev)
272 {
273 
274 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
275 	kfree(dev->vma_offset_manager);
276 	dev->vma_offset_manager = NULL;
277 }
278 
279 #ifdef __linux__
280 
281 /**
282  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
283  * @dev: drm_device the object should be initialized for
284  * @obj: drm_gem_object to initialize
285  * @size: object size
286  *
287  * Initialize an already allocated GEM object of the specified size with
288  * shmfs backing store.
289  */
290 int drm_gem_object_init(struct drm_device *dev,
291 			struct drm_gem_object *obj, size_t size)
292 {
293 	struct file *filp;
294 
295 	drm_gem_private_object_init(dev, obj, size);
296 
297 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
298 	if (IS_ERR(filp))
299 		return PTR_ERR(filp);
300 
301 	obj->filp = filp;
302 
303 	return 0;
304 }
305 EXPORT_SYMBOL(drm_gem_object_init);
306 
307 #else
308 
309 int drm_gem_object_init(struct drm_device *dev,
310 			struct drm_gem_object *obj, size_t size)
311 {
312 	drm_gem_private_object_init(dev, obj, size);
313 
314 	obj->uao = uao_create(size, 0);
315 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
316 
317 	obj->filp = (void *)obj->uao;
318 
319 	return 0;
320 }
321 
322 #endif
323 
324 /**
325  * drm_gem_private_object_init - initialize an allocated private GEM object
326  * @dev: drm_device the object should be initialized for
327  * @obj: drm_gem_object to initialize
328  * @size: object size
329  *
330  * Initialize an already allocated GEM object of the specified size with
331  * no GEM provided backing store. Instead the caller is responsible for
332  * backing the object and handling it.
333  */
334 void drm_gem_private_object_init(struct drm_device *dev,
335 				 struct drm_gem_object *obj, size_t size)
336 {
337 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
338 
339 	obj->dev = dev;
340 	obj->filp = NULL;
341 
342 	kref_init(&obj->refcount);
343 	obj->handle_count = 0;
344 	obj->size = size;
345 	dma_resv_init(&obj->_resv);
346 	if (!obj->resv)
347 		obj->resv = &obj->_resv;
348 
349 	drm_vma_node_reset(&obj->vma_node);
350 }
351 EXPORT_SYMBOL(drm_gem_private_object_init);
352 
353 static void
354 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
355 {
356 	/*
357 	 * Note: obj->dma_buf can't disappear as long as we still hold a
358 	 * handle reference in obj->handle_count.
359 	 */
360 	mutex_lock(&filp->prime.lock);
361 	if (obj->dma_buf) {
362 		drm_prime_remove_buf_handle_locked(&filp->prime,
363 						   obj->dma_buf);
364 	}
365 	mutex_unlock(&filp->prime.lock);
366 }
367 
368 /**
369  * drm_gem_object_handle_free - release resources bound to userspace handles
370  * @obj: GEM object to clean up.
371  *
372  * Called after the last handle to the object has been closed
373  *
374  * Removes any name for the object. Note that this must be
375  * called before drm_gem_object_free or we'll be touching
376  * freed memory
377  */
378 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
379 {
380 	struct drm_device *dev = obj->dev;
381 
382 	/* Remove any name for this object */
383 	if (obj->name) {
384 		idr_remove(&dev->object_name_idr, obj->name);
385 		obj->name = 0;
386 	}
387 }
388 
389 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
390 {
391 	/* Unbreak the reference cycle if we have an exported dma_buf. */
392 	if (obj->dma_buf) {
393 		dma_buf_put(obj->dma_buf);
394 		obj->dma_buf = NULL;
395 	}
396 }
397 
398 static void
399 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
400 {
401 	struct drm_device *dev = obj->dev;
402 	bool final = false;
403 
404 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
405 		return;
406 
407 	/*
408 	* Must bump handle count first as this may be the last
409 	* ref, in which case the object would disappear before we
410 	* checked for a name
411 	*/
412 
413 	mutex_lock(&dev->object_name_lock);
414 	if (--obj->handle_count == 0) {
415 		drm_gem_object_handle_free(obj);
416 		drm_gem_object_exported_dma_buf_free(obj);
417 		final = true;
418 	}
419 	mutex_unlock(&dev->object_name_lock);
420 
421 	if (final)
422 		drm_gem_object_put_unlocked(obj);
423 }
424 
425 /*
426  * Called at device or object close to release the file's
427  * handle references on objects.
428  */
429 static int
430 drm_gem_object_release_handle(int id, void *ptr, void *data)
431 {
432 	struct drm_file *file_priv = data;
433 	struct drm_gem_object *obj = ptr;
434 	struct drm_device *dev = obj->dev;
435 
436 	if (obj->funcs && obj->funcs->close)
437 		obj->funcs->close(obj, file_priv);
438 	else if (dev->driver->gem_close_object)
439 		dev->driver->gem_close_object(obj, file_priv);
440 
441 	drm_gem_remove_prime_handles(obj, file_priv);
442 	drm_vma_node_revoke(&obj->vma_node, file_priv);
443 
444 	drm_gem_object_handle_put_unlocked(obj);
445 
446 	return 0;
447 }
448 
449 /**
450  * drm_gem_handle_delete - deletes the given file-private handle
451  * @filp: drm file-private structure to use for the handle look up
452  * @handle: userspace handle to delete
453  *
454  * Removes the GEM handle from the @filp lookup table which has been added with
455  * drm_gem_handle_create(). If this is the last handle also cleans up linked
456  * resources like GEM names.
457  */
458 int
459 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
460 {
461 	struct drm_gem_object *obj;
462 
463 	spin_lock(&filp->table_lock);
464 
465 	/* Check if we currently have a reference on the object */
466 	obj = idr_replace(&filp->object_idr, NULL, handle);
467 	spin_unlock(&filp->table_lock);
468 	if (IS_ERR_OR_NULL(obj))
469 		return -EINVAL;
470 
471 	/* Release driver's reference and decrement refcount. */
472 	drm_gem_object_release_handle(handle, obj, filp);
473 
474 	/* And finally make the handle available for future allocations. */
475 	spin_lock(&filp->table_lock);
476 	idr_remove(&filp->object_idr, handle);
477 	spin_unlock(&filp->table_lock);
478 
479 	return 0;
480 }
481 EXPORT_SYMBOL(drm_gem_handle_delete);
482 
483 /**
484  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
485  * @file: drm file-private structure containing the gem object
486  * @dev: corresponding drm_device
487  * @handle: gem object handle
488  * @offset: return location for the fake mmap offset
489  *
490  * This implements the &drm_driver.dumb_map_offset kms driver callback for
491  * drivers which use gem to manage their backing storage.
492  *
493  * Returns:
494  * 0 on success or a negative error code on failure.
495  */
496 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
497 			    u32 handle, u64 *offset)
498 {
499 	struct drm_gem_object *obj;
500 	int ret;
501 
502 	obj = drm_gem_object_lookup(file, handle);
503 	if (!obj)
504 		return -ENOENT;
505 
506 	/* Don't allow imported objects to be mapped */
507 	if (obj->import_attach) {
508 		ret = -EINVAL;
509 		goto out;
510 	}
511 
512 	ret = drm_gem_create_mmap_offset(obj);
513 	if (ret)
514 		goto out;
515 
516 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
517 out:
518 	drm_gem_object_put_unlocked(obj);
519 
520 	return ret;
521 }
522 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
523 
524 /**
525  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
526  * @file: drm file-private structure to remove the dumb handle from
527  * @dev: corresponding drm_device
528  * @handle: the dumb handle to remove
529  *
530  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
531  * which use gem to manage their backing storage.
532  */
533 int drm_gem_dumb_destroy(struct drm_file *file,
534 			 struct drm_device *dev,
535 			 uint32_t handle)
536 {
537 	return drm_gem_handle_delete(file, handle);
538 }
539 EXPORT_SYMBOL(drm_gem_dumb_destroy);
540 
541 /**
542  * drm_gem_handle_create_tail - internal functions to create a handle
543  * @file_priv: drm file-private structure to register the handle for
544  * @obj: object to register
545  * @handlep: pointer to return the created handle to the caller
546  *
547  * This expects the &drm_device.object_name_lock to be held already and will
548  * drop it before returning. Used to avoid races in establishing new handles
549  * when importing an object from either an flink name or a dma-buf.
550  *
551  * Handles must be release again through drm_gem_handle_delete(). This is done
552  * when userspace closes @file_priv for all attached handles, or through the
553  * GEM_CLOSE ioctl for individual handles.
554  */
555 int
556 drm_gem_handle_create_tail(struct drm_file *file_priv,
557 			   struct drm_gem_object *obj,
558 			   u32 *handlep)
559 {
560 	struct drm_device *dev = obj->dev;
561 	u32 handle;
562 	int ret;
563 
564 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
565 	if (obj->handle_count++ == 0)
566 		drm_gem_object_get(obj);
567 
568 	/*
569 	 * Get the user-visible handle using idr.  Preload and perform
570 	 * allocation under our spinlock.
571 	 */
572 	idr_preload(GFP_KERNEL);
573 	spin_lock(&file_priv->table_lock);
574 
575 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
576 
577 	spin_unlock(&file_priv->table_lock);
578 	idr_preload_end();
579 
580 	mutex_unlock(&dev->object_name_lock);
581 	if (ret < 0)
582 		goto err_unref;
583 
584 	handle = ret;
585 
586 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
587 	if (ret)
588 		goto err_remove;
589 
590 	if (obj->funcs && obj->funcs->open) {
591 		ret = obj->funcs->open(obj, file_priv);
592 		if (ret)
593 			goto err_revoke;
594 	} else if (dev->driver->gem_open_object) {
595 		ret = dev->driver->gem_open_object(obj, file_priv);
596 		if (ret)
597 			goto err_revoke;
598 	}
599 
600 	*handlep = handle;
601 	return 0;
602 
603 err_revoke:
604 	drm_vma_node_revoke(&obj->vma_node, file_priv);
605 err_remove:
606 	spin_lock(&file_priv->table_lock);
607 	idr_remove(&file_priv->object_idr, handle);
608 	spin_unlock(&file_priv->table_lock);
609 err_unref:
610 	drm_gem_object_handle_put_unlocked(obj);
611 	return ret;
612 }
613 
614 /**
615  * drm_gem_handle_create - create a gem handle for an object
616  * @file_priv: drm file-private structure to register the handle for
617  * @obj: object to register
618  * @handlep: pionter to return the created handle to the caller
619  *
620  * Create a handle for this object. This adds a handle reference to the object,
621  * which includes a regular reference count. Callers will likely want to
622  * dereference the object afterwards.
623  *
624  * Since this publishes @obj to userspace it must be fully set up by this point,
625  * drivers must call this last in their buffer object creation callbacks.
626  */
627 int drm_gem_handle_create(struct drm_file *file_priv,
628 			  struct drm_gem_object *obj,
629 			  u32 *handlep)
630 {
631 	mutex_lock(&obj->dev->object_name_lock);
632 
633 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
634 }
635 EXPORT_SYMBOL(drm_gem_handle_create);
636 
637 
638 /**
639  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
640  * @obj: obj in question
641  *
642  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
643  *
644  * Note that drm_gem_object_release() already calls this function, so drivers
645  * don't have to take care of releasing the mmap offset themselves when freeing
646  * the GEM object.
647  */
648 void
649 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
650 {
651 	struct drm_device *dev = obj->dev;
652 
653 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
654 }
655 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
656 
657 /**
658  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
659  * @obj: obj in question
660  * @size: the virtual size
661  *
662  * GEM memory mapping works by handing back to userspace a fake mmap offset
663  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
664  * up the object based on the offset and sets up the various memory mapping
665  * structures.
666  *
667  * This routine allocates and attaches a fake offset for @obj, in cases where
668  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
669  * Otherwise just use drm_gem_create_mmap_offset().
670  *
671  * This function is idempotent and handles an already allocated mmap offset
672  * transparently. Drivers do not need to check for this case.
673  */
674 int
675 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
676 {
677 	struct drm_device *dev = obj->dev;
678 
679 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
680 				  size / PAGE_SIZE);
681 }
682 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
683 
684 /**
685  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
686  * @obj: obj in question
687  *
688  * GEM memory mapping works by handing back to userspace a fake mmap offset
689  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
690  * up the object based on the offset and sets up the various memory mapping
691  * structures.
692  *
693  * This routine allocates and attaches a fake offset for @obj.
694  *
695  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
696  * the fake offset again.
697  */
698 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
699 {
700 	return drm_gem_create_mmap_offset_size(obj, obj->size);
701 }
702 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
703 
704 #ifdef notyet
705 /*
706  * Move pages to appropriate lru and release the pagevec, decrementing the
707  * ref count of those pages.
708  */
709 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
710 {
711 	check_move_unevictable_pages(pvec);
712 	__pagevec_release(pvec);
713 	cond_resched();
714 }
715 #endif
716 
717 /**
718  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
719  * from shmem
720  * @obj: obj in question
721  *
722  * This reads the page-array of the shmem-backing storage of the given gem
723  * object. An array of pages is returned. If a page is not allocated or
724  * swapped-out, this will allocate/swap-in the required pages. Note that the
725  * whole object is covered by the page-array and pinned in memory.
726  *
727  * Use drm_gem_put_pages() to release the array and unpin all pages.
728  *
729  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
730  * If you require other GFP-masks, you have to do those allocations yourself.
731  *
732  * Note that you are not allowed to change gfp-zones during runtime. That is,
733  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
734  * set during initialization. If you have special zone constraints, set them
735  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
736  * to keep pages in the required zone during swap-in.
737  */
738 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
739 {
740 	STUB();
741 	return ERR_PTR(-ENOSYS);
742 #ifdef notyet
743 	struct address_space *mapping;
744 	struct vm_page *p, **pages;
745 	struct pagevec pvec;
746 	int i, npages;
747 
748 	/* This is the shared memory object that backs the GEM resource */
749 	mapping = obj->filp->f_mapping;
750 
751 	/* We already BUG_ON() for non-page-aligned sizes in
752 	 * drm_gem_object_init(), so we should never hit this unless
753 	 * driver author is doing something really wrong:
754 	 */
755 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
756 
757 	npages = obj->size >> PAGE_SHIFT;
758 
759 	pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);
760 	if (pages == NULL)
761 		return ERR_PTR(-ENOMEM);
762 
763 	mapping_set_unevictable(mapping);
764 
765 	for (i = 0; i < npages; i++) {
766 		p = shmem_read_mapping_page(mapping, i);
767 		if (IS_ERR(p))
768 			goto fail;
769 		pages[i] = p;
770 
771 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
772 		 * correct region during swapin. Note that this requires
773 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
774 		 * so shmem can relocate pages during swapin if required.
775 		 */
776 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
777 				(page_to_pfn(p) >= 0x00100000UL));
778 	}
779 
780 	return pages;
781 
782 fail:
783 	mapping_clear_unevictable(mapping);
784 	pagevec_init(&pvec);
785 	while (i--) {
786 		if (!pagevec_add(&pvec, pages[i]))
787 			drm_gem_check_release_pagevec(&pvec);
788 	}
789 	if (pagevec_count(&pvec))
790 		drm_gem_check_release_pagevec(&pvec);
791 
792 	kvfree(pages);
793 	return ERR_CAST(p);
794 #endif
795 }
796 EXPORT_SYMBOL(drm_gem_get_pages);
797 
798 /**
799  * drm_gem_put_pages - helper to free backing pages for a GEM object
800  * @obj: obj in question
801  * @pages: pages to free
802  * @dirty: if true, pages will be marked as dirty
803  * @accessed: if true, the pages will be marked as accessed
804  */
805 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
806 		bool dirty, bool accessed)
807 {
808 	STUB();
809 #ifdef notyet
810 	int i, npages;
811 	struct address_space *mapping;
812 	struct pagevec pvec;
813 
814 	mapping = file_inode(obj->filp)->i_mapping;
815 	mapping_clear_unevictable(mapping);
816 
817 	/* We already BUG_ON() for non-page-aligned sizes in
818 	 * drm_gem_object_init(), so we should never hit this unless
819 	 * driver author is doing something really wrong:
820 	 */
821 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
822 
823 	npages = obj->size >> PAGE_SHIFT;
824 
825 	pagevec_init(&pvec);
826 	for (i = 0; i < npages; i++) {
827 		if (!pages[i])
828 			continue;
829 
830 		if (dirty)
831 			set_page_dirty(pages[i]);
832 
833 		if (accessed)
834 			mark_page_accessed(pages[i]);
835 
836 		/* Undo the reference we took when populating the table */
837 		if (!pagevec_add(&pvec, pages[i]))
838 			drm_gem_check_release_pagevec(&pvec);
839 	}
840 	if (pagevec_count(&pvec))
841 		drm_gem_check_release_pagevec(&pvec);
842 
843 	kvfree(pages);
844 #endif
845 }
846 EXPORT_SYMBOL(drm_gem_put_pages);
847 
848 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
849 			  struct drm_gem_object **objs)
850 {
851 	int i, ret = 0;
852 	struct drm_gem_object *obj;
853 
854 	spin_lock(&filp->table_lock);
855 
856 	for (i = 0; i < count; i++) {
857 		/* Check if we currently have a reference on the object */
858 		obj = idr_find(&filp->object_idr, handle[i]);
859 		if (!obj) {
860 			ret = -ENOENT;
861 			break;
862 		}
863 		drm_gem_object_get(obj);
864 		objs[i] = obj;
865 	}
866 	spin_unlock(&filp->table_lock);
867 
868 	return ret;
869 }
870 
871 /**
872  * drm_gem_objects_lookup - look up GEM objects from an array of handles
873  * @filp: DRM file private date
874  * @bo_handles: user pointer to array of userspace handle
875  * @count: size of handle array
876  * @objs_out: returned pointer to array of drm_gem_object pointers
877  *
878  * Takes an array of userspace handles and returns a newly allocated array of
879  * GEM objects.
880  *
881  * For a single handle lookup, use drm_gem_object_lookup().
882  *
883  * Returns:
884  *
885  * @objs filled in with GEM object pointers. Returned GEM objects need to be
886  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
887  * failure. 0 is returned on success.
888  *
889  */
890 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
891 			   int count, struct drm_gem_object ***objs_out)
892 {
893 	int ret;
894 	u32 *handles;
895 	struct drm_gem_object **objs;
896 
897 	if (!count)
898 		return 0;
899 
900 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
901 			     GFP_KERNEL | __GFP_ZERO);
902 	if (!objs)
903 		return -ENOMEM;
904 
905 	*objs_out = objs;
906 
907 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
908 	if (!handles) {
909 		ret = -ENOMEM;
910 		goto out;
911 	}
912 
913 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
914 		ret = -EFAULT;
915 		DRM_DEBUG("Failed to copy in GEM handles\n");
916 		goto out;
917 	}
918 
919 	ret = objects_lookup(filp, handles, count, objs);
920 out:
921 	kvfree(handles);
922 	return ret;
923 
924 }
925 EXPORT_SYMBOL(drm_gem_objects_lookup);
926 
927 /**
928  * drm_gem_object_lookup - look up a GEM object from its handle
929  * @filp: DRM file private date
930  * @handle: userspace handle
931  *
932  * Returns:
933  *
934  * A reference to the object named by the handle if such exists on @filp, NULL
935  * otherwise.
936  *
937  * If looking up an array of handles, use drm_gem_objects_lookup().
938  */
939 struct drm_gem_object *
940 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
941 {
942 	struct drm_gem_object *obj = NULL;
943 
944 	objects_lookup(filp, &handle, 1, &obj);
945 	return obj;
946 }
947 EXPORT_SYMBOL(drm_gem_object_lookup);
948 
949 /**
950  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
951  * shared and/or exclusive fences.
952  * @filep: DRM file private date
953  * @handle: userspace handle
954  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
955  * @timeout: timeout value in jiffies or zero to return immediately
956  *
957  * Returns:
958  *
959  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
960  * greater than 0 on success.
961  */
962 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
963 				    bool wait_all, unsigned long timeout)
964 {
965 	long ret;
966 	struct drm_gem_object *obj;
967 
968 	obj = drm_gem_object_lookup(filep, handle);
969 	if (!obj) {
970 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
971 		return -EINVAL;
972 	}
973 
974 	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
975 						  true, timeout);
976 	if (ret == 0)
977 		ret = -ETIME;
978 	else if (ret > 0)
979 		ret = 0;
980 
981 	drm_gem_object_put_unlocked(obj);
982 
983 	return ret;
984 }
985 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
986 
987 /**
988  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
989  * @dev: drm_device
990  * @data: ioctl data
991  * @file_priv: drm file-private structure
992  *
993  * Releases the handle to an mm object.
994  */
995 int
996 drm_gem_close_ioctl(struct drm_device *dev, void *data,
997 		    struct drm_file *file_priv)
998 {
999 	struct drm_gem_close *args = data;
1000 	int ret;
1001 
1002 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1003 		return -EOPNOTSUPP;
1004 
1005 	ret = drm_gem_handle_delete(file_priv, args->handle);
1006 
1007 	return ret;
1008 }
1009 
1010 /**
1011  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
1012  * @dev: drm_device
1013  * @data: ioctl data
1014  * @file_priv: drm file-private structure
1015  *
1016  * Create a global name for an object, returning the name.
1017  *
1018  * Note that the name does not hold a reference; when the object
1019  * is freed, the name goes away.
1020  */
1021 int
1022 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1023 		    struct drm_file *file_priv)
1024 {
1025 	struct drm_gem_flink *args = data;
1026 	struct drm_gem_object *obj;
1027 	int ret;
1028 
1029 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1030 		return -EOPNOTSUPP;
1031 
1032 	obj = drm_gem_object_lookup(file_priv, args->handle);
1033 	if (obj == NULL)
1034 		return -ENOENT;
1035 
1036 	mutex_lock(&dev->object_name_lock);
1037 	/* prevent races with concurrent gem_close. */
1038 	if (obj->handle_count == 0) {
1039 		ret = -ENOENT;
1040 		goto err;
1041 	}
1042 
1043 	if (!obj->name) {
1044 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
1045 		if (ret < 0)
1046 			goto err;
1047 
1048 		obj->name = ret;
1049 	}
1050 
1051 	args->name = (uint64_t) obj->name;
1052 	ret = 0;
1053 
1054 err:
1055 	mutex_unlock(&dev->object_name_lock);
1056 	drm_gem_object_put_unlocked(obj);
1057 	return ret;
1058 }
1059 
1060 /**
1061  * drm_gem_open - implementation of the GEM_OPEN ioctl
1062  * @dev: drm_device
1063  * @data: ioctl data
1064  * @file_priv: drm file-private structure
1065  *
1066  * Open an object using the global name, returning a handle and the size.
1067  */
1068 int
1069 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1070 		   struct drm_file *file_priv)
1071 {
1072 	struct drm_gem_open *args = data;
1073 	struct drm_gem_object *obj;
1074 	int ret;
1075 	u32 handle;
1076 
1077 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1078 		return -EOPNOTSUPP;
1079 
1080 	mutex_lock(&dev->object_name_lock);
1081 	obj = idr_find(&dev->object_name_idr, (int) args->name);
1082 	if (obj) {
1083 		drm_gem_object_get(obj);
1084 	} else {
1085 		mutex_unlock(&dev->object_name_lock);
1086 		return -ENOENT;
1087 	}
1088 
1089 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1090 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1091 	if (ret)
1092 		goto err;
1093 
1094 	args->handle = handle;
1095 	args->size = obj->size;
1096 
1097 err:
1098 	drm_gem_object_put_unlocked(obj);
1099 	return ret;
1100 }
1101 
1102 /**
1103  * gem_gem_open - initalizes GEM file-private structures at devnode open time
1104  * @dev: drm_device which is being opened by userspace
1105  * @file_private: drm file-private structure to set up
1106  *
1107  * Called at device open time, sets up the structure for handling refcounting
1108  * of mm objects.
1109  */
1110 void
1111 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1112 {
1113 	idr_init_base(&file_private->object_idr, 1);
1114 	mtx_init(&file_private->table_lock, IPL_NONE);
1115 }
1116 
1117 /**
1118  * drm_gem_release - release file-private GEM resources
1119  * @dev: drm_device which is being closed by userspace
1120  * @file_private: drm file-private structure to clean up
1121  *
1122  * Called at close time when the filp is going away.
1123  *
1124  * Releases any remaining references on objects by this filp.
1125  */
1126 void
1127 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1128 {
1129 	idr_for_each(&file_private->object_idr,
1130 		     &drm_gem_object_release_handle, file_private);
1131 	idr_destroy(&file_private->object_idr);
1132 }
1133 
1134 /**
1135  * drm_gem_object_release - release GEM buffer object resources
1136  * @obj: GEM buffer object
1137  *
1138  * This releases any structures and resources used by @obj and is the invers of
1139  * drm_gem_object_init().
1140  */
1141 void
1142 drm_gem_object_release(struct drm_gem_object *obj)
1143 {
1144 	WARN_ON(obj->dma_buf);
1145 
1146 	if (obj->filp)
1147 		fput(obj->filp);
1148 
1149 #ifdef __OpenBSD__
1150 	if (obj->uao)
1151 		uao_detach(obj->uao);
1152 #endif
1153 
1154 	dma_resv_fini(&obj->_resv);
1155 	drm_gem_free_mmap_offset(obj);
1156 }
1157 EXPORT_SYMBOL(drm_gem_object_release);
1158 
1159 /**
1160  * drm_gem_object_free - free a GEM object
1161  * @kref: kref of the object to free
1162  *
1163  * Called after the last reference to the object has been lost.
1164  * Must be called holding &drm_device.struct_mutex.
1165  *
1166  * Frees the object
1167  */
1168 void
1169 drm_gem_object_free(struct kref *kref)
1170 {
1171 	struct drm_gem_object *obj =
1172 		container_of(kref, struct drm_gem_object, refcount);
1173 	struct drm_device *dev = obj->dev;
1174 
1175 	if (obj->funcs) {
1176 		obj->funcs->free(obj);
1177 	} else if (dev->driver->gem_free_object_unlocked) {
1178 		dev->driver->gem_free_object_unlocked(obj);
1179 	} else if (dev->driver->gem_free_object) {
1180 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1181 
1182 		dev->driver->gem_free_object(obj);
1183 	}
1184 }
1185 EXPORT_SYMBOL(drm_gem_object_free);
1186 
1187 /**
1188  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
1189  * @obj: GEM buffer object
1190  *
1191  * This releases a reference to @obj. Callers must not hold the
1192  * &drm_device.struct_mutex lock when calling this function.
1193  *
1194  * See also __drm_gem_object_put().
1195  */
1196 void
1197 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1198 {
1199 	struct drm_device *dev;
1200 
1201 	if (!obj)
1202 		return;
1203 
1204 	dev = obj->dev;
1205 
1206 	if (dev->driver->gem_free_object) {
1207 		might_lock(&dev->struct_mutex);
1208 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1209 				&dev->struct_mutex))
1210 			mutex_unlock(&dev->struct_mutex);
1211 	} else {
1212 		kref_put(&obj->refcount, drm_gem_object_free);
1213 	}
1214 }
1215 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1216 
1217 /**
1218  * drm_gem_object_put - release a GEM buffer object reference
1219  * @obj: GEM buffer object
1220  *
1221  * This releases a reference to @obj. Callers must hold the
1222  * &drm_device.struct_mutex lock when calling this function, even when the
1223  * driver doesn't use &drm_device.struct_mutex for anything.
1224  *
1225  * For drivers not encumbered with legacy locking use
1226  * drm_gem_object_put_unlocked() instead.
1227  */
1228 void
1229 drm_gem_object_put(struct drm_gem_object *obj)
1230 {
1231 	if (obj) {
1232 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1233 
1234 		kref_put(&obj->refcount, drm_gem_object_free);
1235 	}
1236 }
1237 EXPORT_SYMBOL(drm_gem_object_put);
1238 
1239 #ifdef __linux
1240 
1241 /**
1242  * drm_gem_vm_open - vma->ops->open implementation for GEM
1243  * @vma: VM area structure
1244  *
1245  * This function implements the #vm_operations_struct open() callback for GEM
1246  * drivers. This must be used together with drm_gem_vm_close().
1247  */
1248 void drm_gem_vm_open(struct vm_area_struct *vma)
1249 {
1250 	struct drm_gem_object *obj = vma->vm_private_data;
1251 
1252 	drm_gem_object_get(obj);
1253 }
1254 EXPORT_SYMBOL(drm_gem_vm_open);
1255 
1256 /**
1257  * drm_gem_vm_close - vma->ops->close implementation for GEM
1258  * @vma: VM area structure
1259  *
1260  * This function implements the #vm_operations_struct close() callback for GEM
1261  * drivers. This must be used together with drm_gem_vm_open().
1262  */
1263 void drm_gem_vm_close(struct vm_area_struct *vma)
1264 {
1265 	struct drm_gem_object *obj = vma->vm_private_data;
1266 
1267 	drm_gem_object_put_unlocked(obj);
1268 }
1269 EXPORT_SYMBOL(drm_gem_vm_close);
1270 
1271 /**
1272  * drm_gem_mmap_obj - memory map a GEM object
1273  * @obj: the GEM object to map
1274  * @obj_size: the object size to be mapped, in bytes
1275  * @vma: VMA for the area to be mapped
1276  *
1277  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1278  * provided by the driver. Depending on their requirements, drivers can either
1279  * provide a fault handler in their gem_vm_ops (in which case any accesses to
1280  * the object will be trapped, to perform migration, GTT binding, surface
1281  * register allocation, or performance monitoring), or mmap the buffer memory
1282  * synchronously after calling drm_gem_mmap_obj.
1283  *
1284  * This function is mainly intended to implement the DMABUF mmap operation, when
1285  * the GEM object is not looked up based on its fake offset. To implement the
1286  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1287  *
1288  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1289  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1290  * callers must verify access restrictions before calling this helper.
1291  *
1292  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1293  * size, or if no gem_vm_ops are provided.
1294  */
1295 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1296 		     struct vm_area_struct *vma)
1297 {
1298 	struct drm_device *dev = obj->dev;
1299 	int ret;
1300 
1301 	/* Check for valid size. */
1302 	if (obj_size < vma->vm_end - vma->vm_start)
1303 		return -EINVAL;
1304 
1305 	/* Take a ref for this mapping of the object, so that the fault
1306 	 * handler can dereference the mmap offset's pointer to the object.
1307 	 * This reference is cleaned up by the corresponding vm_close
1308 	 * (which should happen whether the vma was created by this call, or
1309 	 * by a vm_open due to mremap or partial unmap or whatever).
1310 	 */
1311 	drm_gem_object_get(obj);
1312 
1313 	if (obj->funcs && obj->funcs->mmap) {
1314 		ret = obj->funcs->mmap(obj, vma);
1315 		if (ret) {
1316 			drm_gem_object_put_unlocked(obj);
1317 			return ret;
1318 		}
1319 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1320 	} else {
1321 		if (obj->funcs && obj->funcs->vm_ops)
1322 			vma->vm_ops = obj->funcs->vm_ops;
1323 		else if (dev->driver->gem_vm_ops)
1324 			vma->vm_ops = dev->driver->gem_vm_ops;
1325 		else {
1326 			drm_gem_object_put_unlocked(obj);
1327 			return -EINVAL;
1328 		}
1329 
1330 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1331 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1332 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1333 	}
1334 
1335 	vma->vm_private_data = obj;
1336 
1337 	return 0;
1338 }
1339 EXPORT_SYMBOL(drm_gem_mmap_obj);
1340 
1341 /**
1342  * drm_gem_mmap - memory map routine for GEM objects
1343  * @filp: DRM file pointer
1344  * @vma: VMA for the area to be mapped
1345  *
1346  * If a driver supports GEM object mapping, mmap calls on the DRM file
1347  * descriptor will end up here.
1348  *
1349  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1350  * contain the fake offset we created when the GTT map ioctl was called on
1351  * the object) and map it with a call to drm_gem_mmap_obj().
1352  *
1353  * If the caller is not granted access to the buffer object, the mmap will fail
1354  * with EACCES. Please see the vma manager for more information.
1355  */
1356 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1357 {
1358 	struct drm_file *priv = filp->private_data;
1359 	struct drm_device *dev = priv->minor->dev;
1360 	struct drm_gem_object *obj = NULL;
1361 	struct drm_vma_offset_node *node;
1362 	int ret;
1363 
1364 	if (drm_dev_is_unplugged(dev))
1365 		return -ENODEV;
1366 
1367 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1368 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1369 						  vma->vm_pgoff,
1370 						  vma_pages(vma));
1371 	if (likely(node)) {
1372 		obj = container_of(node, struct drm_gem_object, vma_node);
1373 		/*
1374 		 * When the object is being freed, after it hits 0-refcnt it
1375 		 * proceeds to tear down the object. In the process it will
1376 		 * attempt to remove the VMA offset and so acquire this
1377 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1378 		 * that matches our range, we know it is in the process of being
1379 		 * destroyed and will be freed as soon as we release the lock -
1380 		 * so we have to check for the 0-refcnted object and treat it as
1381 		 * invalid.
1382 		 */
1383 		if (!kref_get_unless_zero(&obj->refcount))
1384 			obj = NULL;
1385 	}
1386 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1387 
1388 	if (!obj)
1389 		return -EINVAL;
1390 
1391 	if (!drm_vma_node_is_allowed(node, priv)) {
1392 		drm_gem_object_put_unlocked(obj);
1393 		return -EACCES;
1394 	}
1395 
1396 	if (node->readonly) {
1397 		if (vma->vm_flags & VM_WRITE) {
1398 			drm_gem_object_put_unlocked(obj);
1399 			return -EINVAL;
1400 		}
1401 
1402 		vma->vm_flags &= ~VM_MAYWRITE;
1403 	}
1404 
1405 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1406 			       vma);
1407 
1408 	drm_gem_object_put_unlocked(obj);
1409 
1410 	return ret;
1411 }
1412 EXPORT_SYMBOL(drm_gem_mmap);
1413 
1414 #endif /* __linux__ */
1415 
1416 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1417 			const struct drm_gem_object *obj)
1418 {
1419 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1420 	drm_printf_indent(p, indent, "refcount=%u\n",
1421 			  kref_read(&obj->refcount));
1422 	drm_printf_indent(p, indent, "start=%08lx\n",
1423 			  drm_vma_node_start(&obj->vma_node));
1424 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1425 	drm_printf_indent(p, indent, "imported=%s\n",
1426 			  obj->import_attach ? "yes" : "no");
1427 
1428 	if (obj->funcs && obj->funcs->print_info)
1429 		obj->funcs->print_info(p, indent, obj);
1430 	else if (obj->dev->driver->gem_print_info)
1431 		obj->dev->driver->gem_print_info(p, indent, obj);
1432 }
1433 
1434 int drm_gem_pin(struct drm_gem_object *obj)
1435 {
1436 	if (obj->funcs && obj->funcs->pin)
1437 		return obj->funcs->pin(obj);
1438 	else if (obj->dev->driver->gem_prime_pin)
1439 		return obj->dev->driver->gem_prime_pin(obj);
1440 	else
1441 		return 0;
1442 }
1443 
1444 void drm_gem_unpin(struct drm_gem_object *obj)
1445 {
1446 	if (obj->funcs && obj->funcs->unpin)
1447 		obj->funcs->unpin(obj);
1448 	else if (obj->dev->driver->gem_prime_unpin)
1449 		obj->dev->driver->gem_prime_unpin(obj);
1450 }
1451 
1452 void *drm_gem_vmap(struct drm_gem_object *obj)
1453 {
1454 	void *vaddr;
1455 
1456 	if (obj->funcs && obj->funcs->vmap)
1457 		vaddr = obj->funcs->vmap(obj);
1458 	else if (obj->dev->driver->gem_prime_vmap)
1459 		vaddr = obj->dev->driver->gem_prime_vmap(obj);
1460 	else
1461 		vaddr = ERR_PTR(-EOPNOTSUPP);
1462 
1463 	if (!vaddr)
1464 		vaddr = ERR_PTR(-ENOMEM);
1465 
1466 	return vaddr;
1467 }
1468 
1469 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1470 {
1471 	if (!vaddr)
1472 		return;
1473 
1474 	if (obj->funcs && obj->funcs->vunmap)
1475 		obj->funcs->vunmap(obj, vaddr);
1476 	else if (obj->dev->driver->gem_prime_vunmap)
1477 		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1478 }
1479 
1480 /**
1481  * drm_gem_lock_reservations - Sets up the ww context and acquires
1482  * the lock on an array of GEM objects.
1483  *
1484  * Once you've locked your reservations, you'll want to set up space
1485  * for your shared fences (if applicable), submit your job, then
1486  * drm_gem_unlock_reservations().
1487  *
1488  * @objs: drm_gem_objects to lock
1489  * @count: Number of objects in @objs
1490  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1491  * part of tracking this set of locked reservations.
1492  */
1493 int
1494 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1495 			  struct ww_acquire_ctx *acquire_ctx)
1496 {
1497 	int contended = -1;
1498 	int i, ret;
1499 
1500 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1501 
1502 retry:
1503 	if (contended != -1) {
1504 		struct drm_gem_object *obj = objs[contended];
1505 
1506 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1507 								 acquire_ctx);
1508 		if (ret) {
1509 			ww_acquire_done(acquire_ctx);
1510 			return ret;
1511 		}
1512 	}
1513 
1514 	for (i = 0; i < count; i++) {
1515 		if (i == contended)
1516 			continue;
1517 
1518 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1519 							    acquire_ctx);
1520 		if (ret) {
1521 			int j;
1522 
1523 			for (j = 0; j < i; j++)
1524 				dma_resv_unlock(objs[j]->resv);
1525 
1526 			if (contended != -1 && contended >= i)
1527 				dma_resv_unlock(objs[contended]->resv);
1528 
1529 			if (ret == -EDEADLK) {
1530 				contended = i;
1531 				goto retry;
1532 			}
1533 
1534 			ww_acquire_done(acquire_ctx);
1535 			return ret;
1536 		}
1537 	}
1538 
1539 	ww_acquire_done(acquire_ctx);
1540 
1541 	return 0;
1542 }
1543 EXPORT_SYMBOL(drm_gem_lock_reservations);
1544 
1545 void
1546 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1547 			    struct ww_acquire_ctx *acquire_ctx)
1548 {
1549 	int i;
1550 
1551 	for (i = 0; i < count; i++)
1552 		dma_resv_unlock(objs[i]->resv);
1553 
1554 	ww_acquire_fini(acquire_ctx);
1555 }
1556 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1557 
1558 #ifdef notyet
1559 /**
1560  * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1561  * waited on, deduplicating fences from the same context.
1562  *
1563  * @fence_array: array of dma_fence * for the job to block on.
1564  * @fence: the dma_fence to add to the list of dependencies.
1565  *
1566  * Returns:
1567  * 0 on success, or an error on failing to expand the array.
1568  */
1569 int drm_gem_fence_array_add(struct xarray *fence_array,
1570 			    struct dma_fence *fence)
1571 {
1572 	struct dma_fence *entry;
1573 	unsigned long index;
1574 	u32 id = 0;
1575 	int ret;
1576 
1577 	if (!fence)
1578 		return 0;
1579 
1580 	/* Deduplicate if we already depend on a fence from the same context.
1581 	 * This lets the size of the array of deps scale with the number of
1582 	 * engines involved, rather than the number of BOs.
1583 	 */
1584 	xa_for_each(fence_array, index, entry) {
1585 		if (entry->context != fence->context)
1586 			continue;
1587 
1588 		if (dma_fence_is_later(fence, entry)) {
1589 			dma_fence_put(entry);
1590 			xa_store(fence_array, index, fence, GFP_KERNEL);
1591 		} else {
1592 			dma_fence_put(fence);
1593 		}
1594 		return 0;
1595 	}
1596 
1597 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1598 	if (ret != 0)
1599 		dma_fence_put(fence);
1600 
1601 	return ret;
1602 }
1603 EXPORT_SYMBOL(drm_gem_fence_array_add);
1604 
1605 /**
1606  * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1607  * in the GEM object's reservation object to an array of dma_fences for use in
1608  * scheduling a rendering job.
1609  *
1610  * This should be called after drm_gem_lock_reservations() on your array of
1611  * GEM objects used in the job but before updating the reservations with your
1612  * own fences.
1613  *
1614  * @fence_array: array of dma_fence * for the job to block on.
1615  * @obj: the gem object to add new dependencies from.
1616  * @write: whether the job might write the object (so we need to depend on
1617  * shared fences in the reservation object).
1618  */
1619 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1620 				     struct drm_gem_object *obj,
1621 				     bool write)
1622 {
1623 	int ret;
1624 	struct dma_fence **fences;
1625 	unsigned int i, fence_count;
1626 
1627 	if (!write) {
1628 		struct dma_fence *fence =
1629 			dma_resv_get_excl_rcu(obj->resv);
1630 
1631 		return drm_gem_fence_array_add(fence_array, fence);
1632 	}
1633 
1634 	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1635 						&fence_count, &fences);
1636 	if (ret || !fence_count)
1637 		return ret;
1638 
1639 	for (i = 0; i < fence_count; i++) {
1640 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
1641 		if (ret)
1642 			break;
1643 	}
1644 
1645 	for (; i < fence_count; i++)
1646 		dma_fence_put(fences[i]);
1647 	kfree(fences);
1648 	return ret;
1649 }
1650 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1651 
1652 #endif /* notyet */
1653