xref: /openbsd/sys/dev/pci/drm/drm_gem.c (revision 09467b48)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41 
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_vma_manager.h>
49 
50 #include "drm_internal.h"
51 
52 #include <sys/conf.h>
53 #include <uvm/uvm.h>
54 
55 void drm_unref(struct uvm_object *);
56 void drm_ref(struct uvm_object *);
57 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
58 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
59     vm_fault_t, vm_prot_t, int);
60 
61 struct uvm_pagerops drm_pgops = {
62 	NULL,
63 	drm_ref,
64 	drm_unref,
65 	drm_fault,
66 	drm_flush,
67 };
68 
69 void
70 drm_ref(struct uvm_object *uobj)
71 {
72 	struct drm_gem_object *obj =
73 	    container_of(uobj, struct drm_gem_object, uobj);
74 
75 	drm_gem_object_get(obj);
76 }
77 
78 void
79 drm_unref(struct uvm_object *uobj)
80 {
81 	struct drm_gem_object *obj =
82 	    container_of(uobj, struct drm_gem_object, uobj);
83 
84 	drm_gem_object_put_unlocked(obj);
85 }
86 
87 int
88 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
89     int npages, int centeridx, vm_fault_t fault_type,
90     vm_prot_t access_type, int flags)
91 {
92 	struct vm_map_entry *entry = ufi->entry;
93 	struct uvm_object *uobj = entry->object.uvm_obj;
94 	struct drm_gem_object *obj =
95 	    container_of(uobj, struct drm_gem_object, uobj);
96 	struct drm_device *dev = obj->dev;
97 	int ret;
98 
99 	/*
100 	 * we do not allow device mappings to be mapped copy-on-write
101 	 * so we kill any attempt to do so here.
102 	 */
103 
104 	if (UVM_ET_ISCOPYONWRITE(entry)) {
105 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
106 		return(VM_PAGER_ERROR);
107 	}
108 
109 	/*
110 	 * We could end up here as the result of a copyin(9) or
111 	 * copyout(9) while handling an ioctl.  So we must be careful
112 	 * not to deadlock.  Therefore we only block if the quiesce
113 	 * count is zero, which guarantees we didn't enter from within
114 	 * an ioctl code path.
115 	 */
116 	mtx_enter(&dev->quiesce_mtx);
117 	if (dev->quiesce && dev->quiesce_count == 0) {
118 		mtx_leave(&dev->quiesce_mtx);
119 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
120 		mtx_enter(&dev->quiesce_mtx);
121 		while (dev->quiesce) {
122 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
123 			    PZERO, "drmflt", INFSLP);
124 		}
125 		mtx_leave(&dev->quiesce_mtx);
126 		return(VM_PAGER_REFAULT);
127 	}
128 	dev->quiesce_count++;
129 	mtx_leave(&dev->quiesce_mtx);
130 
131 	/* Call down into driver to do the magic */
132 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
133 	    entry->start), vaddr, pps, npages, centeridx,
134 	    access_type, flags);
135 
136 	mtx_enter(&dev->quiesce_mtx);
137 	dev->quiesce_count--;
138 	if (dev->quiesce)
139 		wakeup(&dev->quiesce_count);
140 	mtx_leave(&dev->quiesce_mtx);
141 
142 	return (ret);
143 }
144 
145 boolean_t
146 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
147 {
148 	return (TRUE);
149 }
150 
151 struct uvm_object *
152 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
153 {
154 	struct drm_device *dev = drm_get_device_from_kdev(device);
155 	struct drm_gem_object *obj = NULL;
156 	struct drm_vma_offset_node *node;
157 	struct drm_file *priv;
158 	struct file *filp;
159 
160 	if (cdevsw[major(device)].d_mmap != drmmmap)
161 		return NULL;
162 
163 	if (dev == NULL)
164 		return NULL;
165 
166 	mutex_lock(&dev->filelist_mutex);
167 	priv = drm_find_file_by_minor(dev, minor(device));
168 	if (priv == NULL) {
169 		mutex_unlock(&dev->filelist_mutex);
170 		return NULL;
171 	}
172 	filp = priv->filp;
173 	mutex_unlock(&dev->filelist_mutex);
174 
175 	if (dev->driver->mmap)
176 		return dev->driver->mmap(filp, accessprot, off, size);
177 
178 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
179 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
180 						  off >> PAGE_SHIFT,
181 						  atop(round_page(size)));
182 	if (likely(node)) {
183 		obj = container_of(node, struct drm_gem_object, vma_node);
184 		/*
185 		 * When the object is being freed, after it hits 0-refcnt it
186 		 * proceeds to tear down the object. In the process it will
187 		 * attempt to remove the VMA offset and so acquire this
188 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
189 		 * that matches our range, we know it is in the process of being
190 		 * destroyed and will be freed as soon as we release the lock -
191 		 * so we have to check for the 0-refcnted object and treat it as
192 		 * invalid.
193 		 */
194 		if (!kref_get_unless_zero(&obj->refcount))
195 			obj = NULL;
196 	}
197 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
198 
199 	if (!obj)
200 		return NULL;
201 
202 	if (!drm_vma_node_is_allowed(node, filp)) {
203 		drm_gem_object_put_unlocked(obj);
204 		return NULL;
205 	}
206 
207 	if (node->readonly) {
208 		if (accessprot & PROT_WRITE) {
209 			drm_gem_object_put_unlocked(obj);
210 			return NULL;
211 		}
212 	}
213 
214 	return &obj->uobj;
215 }
216 
217 /** @file drm_gem.c
218  *
219  * This file provides some of the base ioctls and library routines for
220  * the graphics memory manager implemented by each device driver.
221  *
222  * Because various devices have different requirements in terms of
223  * synchronization and migration strategies, implementing that is left up to
224  * the driver, and all that the general API provides should be generic --
225  * allocating objects, reading/writing data with the cpu, freeing objects.
226  * Even there, platform-dependent optimizations for reading/writing data with
227  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
228  * the DRI2 implementation wants to have at least allocate/mmap be generic.
229  *
230  * The goal was to have swap-backed object allocation managed through
231  * struct file.  However, file descriptors as handles to a struct file have
232  * two major failings:
233  * - Process limits prevent more than 1024 or so being used at a time by
234  *   default.
235  * - Inability to allocate high fds will aggravate the X Server's select()
236  *   handling, and likely that of many GL client applications as well.
237  *
238  * This led to a plan of using our own integer IDs (called handles, following
239  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
240  * ioctls.  The objects themselves will still include the struct file so
241  * that we can transition to fds if the required kernel infrastructure shows
242  * up at a later date, and as our interface with shmfs for memory allocation.
243  */
244 
245 /**
246  * drm_gem_init - Initialize the GEM device fields
247  * @dev: drm_devic structure to initialize
248  */
249 int
250 drm_gem_init(struct drm_device *dev)
251 {
252 	struct drm_vma_offset_manager *vma_offset_manager;
253 
254 	rw_init(&dev->object_name_lock, "drmonl");
255 	idr_init_base(&dev->object_name_idr, 1);
256 
257 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
258 	if (!vma_offset_manager) {
259 		DRM_ERROR("out of memory\n");
260 		return -ENOMEM;
261 	}
262 
263 	dev->vma_offset_manager = vma_offset_manager;
264 	drm_vma_offset_manager_init(vma_offset_manager,
265 				    DRM_FILE_PAGE_OFFSET_START,
266 				    DRM_FILE_PAGE_OFFSET_SIZE);
267 
268 	return 0;
269 }
270 
271 void
272 drm_gem_destroy(struct drm_device *dev)
273 {
274 
275 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
276 	kfree(dev->vma_offset_manager);
277 	dev->vma_offset_manager = NULL;
278 }
279 
280 #ifdef __linux__
281 
282 /**
283  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
284  * @dev: drm_device the object should be initialized for
285  * @obj: drm_gem_object to initialize
286  * @size: object size
287  *
288  * Initialize an already allocated GEM object of the specified size with
289  * shmfs backing store.
290  */
291 int drm_gem_object_init(struct drm_device *dev,
292 			struct drm_gem_object *obj, size_t size)
293 {
294 	struct file *filp;
295 
296 	drm_gem_private_object_init(dev, obj, size);
297 
298 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
299 	if (IS_ERR(filp))
300 		return PTR_ERR(filp);
301 
302 	obj->filp = filp;
303 
304 	return 0;
305 }
306 EXPORT_SYMBOL(drm_gem_object_init);
307 
308 #else
309 
310 int drm_gem_object_init(struct drm_device *dev,
311 			struct drm_gem_object *obj, size_t size)
312 {
313 	drm_gem_private_object_init(dev, obj, size);
314 
315 	obj->uao = uao_create(size, 0);
316 	uvm_objinit(&obj->uobj, &drm_pgops, 1);
317 
318 	obj->filp = (void *)obj->uao;
319 
320 	return 0;
321 }
322 
323 #endif
324 
325 /**
326  * drm_gem_private_object_init - initialize an allocated private GEM object
327  * @dev: drm_device the object should be initialized for
328  * @obj: drm_gem_object to initialize
329  * @size: object size
330  *
331  * Initialize an already allocated GEM object of the specified size with
332  * no GEM provided backing store. Instead the caller is responsible for
333  * backing the object and handling it.
334  */
335 void drm_gem_private_object_init(struct drm_device *dev,
336 				 struct drm_gem_object *obj, size_t size)
337 {
338 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
339 
340 	obj->dev = dev;
341 	obj->filp = NULL;
342 
343 	kref_init(&obj->refcount);
344 	obj->handle_count = 0;
345 	obj->size = size;
346 	dma_resv_init(&obj->_resv);
347 	if (!obj->resv)
348 		obj->resv = &obj->_resv;
349 
350 	drm_vma_node_reset(&obj->vma_node);
351 }
352 EXPORT_SYMBOL(drm_gem_private_object_init);
353 
354 static void
355 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
356 {
357 	/*
358 	 * Note: obj->dma_buf can't disappear as long as we still hold a
359 	 * handle reference in obj->handle_count.
360 	 */
361 	mutex_lock(&filp->prime.lock);
362 	if (obj->dma_buf) {
363 		drm_prime_remove_buf_handle_locked(&filp->prime,
364 						   obj->dma_buf);
365 	}
366 	mutex_unlock(&filp->prime.lock);
367 }
368 
369 /**
370  * drm_gem_object_handle_free - release resources bound to userspace handles
371  * @obj: GEM object to clean up.
372  *
373  * Called after the last handle to the object has been closed
374  *
375  * Removes any name for the object. Note that this must be
376  * called before drm_gem_object_free or we'll be touching
377  * freed memory
378  */
379 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
380 {
381 	struct drm_device *dev = obj->dev;
382 
383 	/* Remove any name for this object */
384 	if (obj->name) {
385 		idr_remove(&dev->object_name_idr, obj->name);
386 		obj->name = 0;
387 	}
388 }
389 
390 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
391 {
392 	/* Unbreak the reference cycle if we have an exported dma_buf. */
393 	if (obj->dma_buf) {
394 		dma_buf_put(obj->dma_buf);
395 		obj->dma_buf = NULL;
396 	}
397 }
398 
399 static void
400 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
401 {
402 	struct drm_device *dev = obj->dev;
403 	bool final = false;
404 
405 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
406 		return;
407 
408 	/*
409 	* Must bump handle count first as this may be the last
410 	* ref, in which case the object would disappear before we
411 	* checked for a name
412 	*/
413 
414 	mutex_lock(&dev->object_name_lock);
415 	if (--obj->handle_count == 0) {
416 		drm_gem_object_handle_free(obj);
417 		drm_gem_object_exported_dma_buf_free(obj);
418 		final = true;
419 	}
420 	mutex_unlock(&dev->object_name_lock);
421 
422 	if (final)
423 		drm_gem_object_put_unlocked(obj);
424 }
425 
426 /*
427  * Called at device or object close to release the file's
428  * handle references on objects.
429  */
430 static int
431 drm_gem_object_release_handle(int id, void *ptr, void *data)
432 {
433 	struct drm_file *file_priv = data;
434 	struct drm_gem_object *obj = ptr;
435 	struct drm_device *dev = obj->dev;
436 
437 	if (obj->funcs && obj->funcs->close)
438 		obj->funcs->close(obj, file_priv);
439 	else if (dev->driver->gem_close_object)
440 		dev->driver->gem_close_object(obj, file_priv);
441 
442 	drm_gem_remove_prime_handles(obj, file_priv);
443 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
444 
445 	drm_gem_object_handle_put_unlocked(obj);
446 
447 	return 0;
448 }
449 
450 /**
451  * drm_gem_handle_delete - deletes the given file-private handle
452  * @filp: drm file-private structure to use for the handle look up
453  * @handle: userspace handle to delete
454  *
455  * Removes the GEM handle from the @filp lookup table which has been added with
456  * drm_gem_handle_create(). If this is the last handle also cleans up linked
457  * resources like GEM names.
458  */
459 int
460 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
461 {
462 	struct drm_gem_object *obj;
463 
464 	spin_lock(&filp->table_lock);
465 
466 	/* Check if we currently have a reference on the object */
467 	obj = idr_replace(&filp->object_idr, NULL, handle);
468 	spin_unlock(&filp->table_lock);
469 	if (IS_ERR_OR_NULL(obj))
470 		return -EINVAL;
471 
472 	/* Release driver's reference and decrement refcount. */
473 	drm_gem_object_release_handle(handle, obj, filp);
474 
475 	/* And finally make the handle available for future allocations. */
476 	spin_lock(&filp->table_lock);
477 	idr_remove(&filp->object_idr, handle);
478 	spin_unlock(&filp->table_lock);
479 
480 	return 0;
481 }
482 EXPORT_SYMBOL(drm_gem_handle_delete);
483 
484 /**
485  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
486  * @file: drm file-private structure containing the gem object
487  * @dev: corresponding drm_device
488  * @handle: gem object handle
489  * @offset: return location for the fake mmap offset
490  *
491  * This implements the &drm_driver.dumb_map_offset kms driver callback for
492  * drivers which use gem to manage their backing storage.
493  *
494  * Returns:
495  * 0 on success or a negative error code on failure.
496  */
497 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
498 			    u32 handle, u64 *offset)
499 {
500 	struct drm_gem_object *obj;
501 	int ret;
502 
503 	obj = drm_gem_object_lookup(file, handle);
504 	if (!obj)
505 		return -ENOENT;
506 
507 	/* Don't allow imported objects to be mapped */
508 	if (obj->import_attach) {
509 		ret = -EINVAL;
510 		goto out;
511 	}
512 
513 	ret = drm_gem_create_mmap_offset(obj);
514 	if (ret)
515 		goto out;
516 
517 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
518 out:
519 	drm_gem_object_put_unlocked(obj);
520 
521 	return ret;
522 }
523 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
524 
525 /**
526  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
527  * @file: drm file-private structure to remove the dumb handle from
528  * @dev: corresponding drm_device
529  * @handle: the dumb handle to remove
530  *
531  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
532  * which use gem to manage their backing storage.
533  */
534 int drm_gem_dumb_destroy(struct drm_file *file,
535 			 struct drm_device *dev,
536 			 uint32_t handle)
537 {
538 	return drm_gem_handle_delete(file, handle);
539 }
540 EXPORT_SYMBOL(drm_gem_dumb_destroy);
541 
542 /**
543  * drm_gem_handle_create_tail - internal functions to create a handle
544  * @file_priv: drm file-private structure to register the handle for
545  * @obj: object to register
546  * @handlep: pointer to return the created handle to the caller
547  *
548  * This expects the &drm_device.object_name_lock to be held already and will
549  * drop it before returning. Used to avoid races in establishing new handles
550  * when importing an object from either an flink name or a dma-buf.
551  *
552  * Handles must be release again through drm_gem_handle_delete(). This is done
553  * when userspace closes @file_priv for all attached handles, or through the
554  * GEM_CLOSE ioctl for individual handles.
555  */
556 int
557 drm_gem_handle_create_tail(struct drm_file *file_priv,
558 			   struct drm_gem_object *obj,
559 			   u32 *handlep)
560 {
561 	struct drm_device *dev = obj->dev;
562 	u32 handle;
563 	int ret;
564 
565 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
566 	if (obj->handle_count++ == 0)
567 		drm_gem_object_get(obj);
568 
569 	/*
570 	 * Get the user-visible handle using idr.  Preload and perform
571 	 * allocation under our spinlock.
572 	 */
573 	idr_preload(GFP_KERNEL);
574 	spin_lock(&file_priv->table_lock);
575 
576 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
577 
578 	spin_unlock(&file_priv->table_lock);
579 	idr_preload_end();
580 
581 	mutex_unlock(&dev->object_name_lock);
582 	if (ret < 0)
583 		goto err_unref;
584 
585 	handle = ret;
586 
587 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
588 	if (ret)
589 		goto err_remove;
590 
591 	if (obj->funcs && obj->funcs->open) {
592 		ret = obj->funcs->open(obj, file_priv);
593 		if (ret)
594 			goto err_revoke;
595 	} else if (dev->driver->gem_open_object) {
596 		ret = dev->driver->gem_open_object(obj, file_priv);
597 		if (ret)
598 			goto err_revoke;
599 	}
600 
601 	*handlep = handle;
602 	return 0;
603 
604 err_revoke:
605 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
606 err_remove:
607 	spin_lock(&file_priv->table_lock);
608 	idr_remove(&file_priv->object_idr, handle);
609 	spin_unlock(&file_priv->table_lock);
610 err_unref:
611 	drm_gem_object_handle_put_unlocked(obj);
612 	return ret;
613 }
614 
615 /**
616  * drm_gem_handle_create - create a gem handle for an object
617  * @file_priv: drm file-private structure to register the handle for
618  * @obj: object to register
619  * @handlep: pionter to return the created handle to the caller
620  *
621  * Create a handle for this object. This adds a handle reference to the object,
622  * which includes a regular reference count. Callers will likely want to
623  * dereference the object afterwards.
624  *
625  * Since this publishes @obj to userspace it must be fully set up by this point,
626  * drivers must call this last in their buffer object creation callbacks.
627  */
628 int drm_gem_handle_create(struct drm_file *file_priv,
629 			  struct drm_gem_object *obj,
630 			  u32 *handlep)
631 {
632 	mutex_lock(&obj->dev->object_name_lock);
633 
634 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
635 }
636 EXPORT_SYMBOL(drm_gem_handle_create);
637 
638 
639 /**
640  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
641  * @obj: obj in question
642  *
643  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
644  *
645  * Note that drm_gem_object_release() already calls this function, so drivers
646  * don't have to take care of releasing the mmap offset themselves when freeing
647  * the GEM object.
648  */
649 void
650 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
651 {
652 	struct drm_device *dev = obj->dev;
653 
654 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
655 }
656 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
657 
658 /**
659  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
660  * @obj: obj in question
661  * @size: the virtual size
662  *
663  * GEM memory mapping works by handing back to userspace a fake mmap offset
664  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
665  * up the object based on the offset and sets up the various memory mapping
666  * structures.
667  *
668  * This routine allocates and attaches a fake offset for @obj, in cases where
669  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
670  * Otherwise just use drm_gem_create_mmap_offset().
671  *
672  * This function is idempotent and handles an already allocated mmap offset
673  * transparently. Drivers do not need to check for this case.
674  */
675 int
676 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
677 {
678 	struct drm_device *dev = obj->dev;
679 
680 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
681 				  size / PAGE_SIZE);
682 }
683 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
684 
685 /**
686  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
687  * @obj: obj in question
688  *
689  * GEM memory mapping works by handing back to userspace a fake mmap offset
690  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
691  * up the object based on the offset and sets up the various memory mapping
692  * structures.
693  *
694  * This routine allocates and attaches a fake offset for @obj.
695  *
696  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
697  * the fake offset again.
698  */
699 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
700 {
701 	return drm_gem_create_mmap_offset_size(obj, obj->size);
702 }
703 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
704 
705 #ifdef notyet
706 /*
707  * Move pages to appropriate lru and release the pagevec, decrementing the
708  * ref count of those pages.
709  */
710 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
711 {
712 	check_move_unevictable_pages(pvec);
713 	__pagevec_release(pvec);
714 	cond_resched();
715 }
716 #endif
717 
718 /**
719  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
720  * from shmem
721  * @obj: obj in question
722  *
723  * This reads the page-array of the shmem-backing storage of the given gem
724  * object. An array of pages is returned. If a page is not allocated or
725  * swapped-out, this will allocate/swap-in the required pages. Note that the
726  * whole object is covered by the page-array and pinned in memory.
727  *
728  * Use drm_gem_put_pages() to release the array and unpin all pages.
729  *
730  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
731  * If you require other GFP-masks, you have to do those allocations yourself.
732  *
733  * Note that you are not allowed to change gfp-zones during runtime. That is,
734  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
735  * set during initialization. If you have special zone constraints, set them
736  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
737  * to keep pages in the required zone during swap-in.
738  */
739 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
740 {
741 	STUB();
742 	return ERR_PTR(-ENOSYS);
743 #ifdef notyet
744 	struct address_space *mapping;
745 	struct vm_page *p, **pages;
746 	struct pagevec pvec;
747 	int i, npages;
748 
749 	/* This is the shared memory object that backs the GEM resource */
750 	mapping = obj->filp->f_mapping;
751 
752 	/* We already BUG_ON() for non-page-aligned sizes in
753 	 * drm_gem_object_init(), so we should never hit this unless
754 	 * driver author is doing something really wrong:
755 	 */
756 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
757 
758 	npages = obj->size >> PAGE_SHIFT;
759 
760 	pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);
761 	if (pages == NULL)
762 		return ERR_PTR(-ENOMEM);
763 
764 	mapping_set_unevictable(mapping);
765 
766 	for (i = 0; i < npages; i++) {
767 		p = shmem_read_mapping_page(mapping, i);
768 		if (IS_ERR(p))
769 			goto fail;
770 		pages[i] = p;
771 
772 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
773 		 * correct region during swapin. Note that this requires
774 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
775 		 * so shmem can relocate pages during swapin if required.
776 		 */
777 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
778 				(page_to_pfn(p) >= 0x00100000UL));
779 	}
780 
781 	return pages;
782 
783 fail:
784 	mapping_clear_unevictable(mapping);
785 	pagevec_init(&pvec);
786 	while (i--) {
787 		if (!pagevec_add(&pvec, pages[i]))
788 			drm_gem_check_release_pagevec(&pvec);
789 	}
790 	if (pagevec_count(&pvec))
791 		drm_gem_check_release_pagevec(&pvec);
792 
793 	kvfree(pages);
794 	return ERR_CAST(p);
795 #endif
796 }
797 EXPORT_SYMBOL(drm_gem_get_pages);
798 
799 /**
800  * drm_gem_put_pages - helper to free backing pages for a GEM object
801  * @obj: obj in question
802  * @pages: pages to free
803  * @dirty: if true, pages will be marked as dirty
804  * @accessed: if true, the pages will be marked as accessed
805  */
806 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
807 		bool dirty, bool accessed)
808 {
809 	STUB();
810 #ifdef notyet
811 	int i, npages;
812 	struct address_space *mapping;
813 	struct pagevec pvec;
814 
815 	mapping = file_inode(obj->filp)->i_mapping;
816 	mapping_clear_unevictable(mapping);
817 
818 	/* We already BUG_ON() for non-page-aligned sizes in
819 	 * drm_gem_object_init(), so we should never hit this unless
820 	 * driver author is doing something really wrong:
821 	 */
822 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
823 
824 	npages = obj->size >> PAGE_SHIFT;
825 
826 	pagevec_init(&pvec);
827 	for (i = 0; i < npages; i++) {
828 		if (!pages[i])
829 			continue;
830 
831 		if (dirty)
832 			set_page_dirty(pages[i]);
833 
834 		if (accessed)
835 			mark_page_accessed(pages[i]);
836 
837 		/* Undo the reference we took when populating the table */
838 		if (!pagevec_add(&pvec, pages[i]))
839 			drm_gem_check_release_pagevec(&pvec);
840 	}
841 	if (pagevec_count(&pvec))
842 		drm_gem_check_release_pagevec(&pvec);
843 
844 	kvfree(pages);
845 #endif
846 }
847 EXPORT_SYMBOL(drm_gem_put_pages);
848 
849 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
850 			  struct drm_gem_object **objs)
851 {
852 	int i, ret = 0;
853 	struct drm_gem_object *obj;
854 
855 	spin_lock(&filp->table_lock);
856 
857 	for (i = 0; i < count; i++) {
858 		/* Check if we currently have a reference on the object */
859 		obj = idr_find(&filp->object_idr, handle[i]);
860 		if (!obj) {
861 			ret = -ENOENT;
862 			break;
863 		}
864 		drm_gem_object_get(obj);
865 		objs[i] = obj;
866 	}
867 	spin_unlock(&filp->table_lock);
868 
869 	return ret;
870 }
871 
872 /**
873  * drm_gem_objects_lookup - look up GEM objects from an array of handles
874  * @filp: DRM file private date
875  * @bo_handles: user pointer to array of userspace handle
876  * @count: size of handle array
877  * @objs_out: returned pointer to array of drm_gem_object pointers
878  *
879  * Takes an array of userspace handles and returns a newly allocated array of
880  * GEM objects.
881  *
882  * For a single handle lookup, use drm_gem_object_lookup().
883  *
884  * Returns:
885  *
886  * @objs filled in with GEM object pointers. Returned GEM objects need to be
887  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
888  * failure. 0 is returned on success.
889  *
890  */
891 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
892 			   int count, struct drm_gem_object ***objs_out)
893 {
894 	int ret;
895 	u32 *handles;
896 	struct drm_gem_object **objs;
897 
898 	if (!count)
899 		return 0;
900 
901 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
902 			     GFP_KERNEL | __GFP_ZERO);
903 	if (!objs)
904 		return -ENOMEM;
905 
906 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
907 	if (!handles) {
908 		ret = -ENOMEM;
909 		goto out;
910 	}
911 
912 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
913 		ret = -EFAULT;
914 		DRM_DEBUG("Failed to copy in GEM handles\n");
915 		goto out;
916 	}
917 
918 	ret = objects_lookup(filp, handles, count, objs);
919 	*objs_out = objs;
920 
921 out:
922 	kvfree(handles);
923 	return ret;
924 
925 }
926 EXPORT_SYMBOL(drm_gem_objects_lookup);
927 
928 /**
929  * drm_gem_object_lookup - look up a GEM object from its handle
930  * @filp: DRM file private date
931  * @handle: userspace handle
932  *
933  * Returns:
934  *
935  * A reference to the object named by the handle if such exists on @filp, NULL
936  * otherwise.
937  *
938  * If looking up an array of handles, use drm_gem_objects_lookup().
939  */
940 struct drm_gem_object *
941 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
942 {
943 	struct drm_gem_object *obj = NULL;
944 
945 	objects_lookup(filp, &handle, 1, &obj);
946 	return obj;
947 }
948 EXPORT_SYMBOL(drm_gem_object_lookup);
949 
950 /**
951  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
952  * shared and/or exclusive fences.
953  * @filep: DRM file private date
954  * @handle: userspace handle
955  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
956  * @timeout: timeout value in jiffies or zero to return immediately
957  *
958  * Returns:
959  *
960  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
961  * greater than 0 on success.
962  */
963 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
964 				    bool wait_all, unsigned long timeout)
965 {
966 	long ret;
967 	struct drm_gem_object *obj;
968 
969 	obj = drm_gem_object_lookup(filep, handle);
970 	if (!obj) {
971 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
972 		return -EINVAL;
973 	}
974 
975 	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
976 						  true, timeout);
977 	if (ret == 0)
978 		ret = -ETIME;
979 	else if (ret > 0)
980 		ret = 0;
981 
982 	drm_gem_object_put_unlocked(obj);
983 
984 	return ret;
985 }
986 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
987 
988 /**
989  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
990  * @dev: drm_device
991  * @data: ioctl data
992  * @file_priv: drm file-private structure
993  *
994  * Releases the handle to an mm object.
995  */
996 int
997 drm_gem_close_ioctl(struct drm_device *dev, void *data,
998 		    struct drm_file *file_priv)
999 {
1000 	struct drm_gem_close *args = data;
1001 	int ret;
1002 
1003 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1004 		return -EOPNOTSUPP;
1005 
1006 	ret = drm_gem_handle_delete(file_priv, args->handle);
1007 
1008 	return ret;
1009 }
1010 
1011 /**
1012  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
1013  * @dev: drm_device
1014  * @data: ioctl data
1015  * @file_priv: drm file-private structure
1016  *
1017  * Create a global name for an object, returning the name.
1018  *
1019  * Note that the name does not hold a reference; when the object
1020  * is freed, the name goes away.
1021  */
1022 int
1023 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1024 		    struct drm_file *file_priv)
1025 {
1026 	struct drm_gem_flink *args = data;
1027 	struct drm_gem_object *obj;
1028 	int ret;
1029 
1030 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1031 		return -EOPNOTSUPP;
1032 
1033 	obj = drm_gem_object_lookup(file_priv, args->handle);
1034 	if (obj == NULL)
1035 		return -ENOENT;
1036 
1037 	mutex_lock(&dev->object_name_lock);
1038 	/* prevent races with concurrent gem_close. */
1039 	if (obj->handle_count == 0) {
1040 		ret = -ENOENT;
1041 		goto err;
1042 	}
1043 
1044 	if (!obj->name) {
1045 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
1046 		if (ret < 0)
1047 			goto err;
1048 
1049 		obj->name = ret;
1050 	}
1051 
1052 	args->name = (uint64_t) obj->name;
1053 	ret = 0;
1054 
1055 err:
1056 	mutex_unlock(&dev->object_name_lock);
1057 	drm_gem_object_put_unlocked(obj);
1058 	return ret;
1059 }
1060 
1061 /**
1062  * drm_gem_open - implementation of the GEM_OPEN ioctl
1063  * @dev: drm_device
1064  * @data: ioctl data
1065  * @file_priv: drm file-private structure
1066  *
1067  * Open an object using the global name, returning a handle and the size.
1068  *
1069  * This handle (of course) holds a reference to the object, so the object
1070  * will not go away until the handle is deleted.
1071  */
1072 int
1073 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1074 		   struct drm_file *file_priv)
1075 {
1076 	struct drm_gem_open *args = data;
1077 	struct drm_gem_object *obj;
1078 	int ret;
1079 	u32 handle;
1080 
1081 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1082 		return -EOPNOTSUPP;
1083 
1084 	mutex_lock(&dev->object_name_lock);
1085 	obj = idr_find(&dev->object_name_idr, (int) args->name);
1086 	if (obj) {
1087 		drm_gem_object_get(obj);
1088 	} else {
1089 		mutex_unlock(&dev->object_name_lock);
1090 		return -ENOENT;
1091 	}
1092 
1093 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1094 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1095 	drm_gem_object_put_unlocked(obj);
1096 	if (ret)
1097 		return ret;
1098 
1099 	args->handle = handle;
1100 	args->size = obj->size;
1101 
1102 	return 0;
1103 }
1104 
1105 /**
1106  * gem_gem_open - initalizes GEM file-private structures at devnode open time
1107  * @dev: drm_device which is being opened by userspace
1108  * @file_private: drm file-private structure to set up
1109  *
1110  * Called at device open time, sets up the structure for handling refcounting
1111  * of mm objects.
1112  */
1113 void
1114 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1115 {
1116 	idr_init_base(&file_private->object_idr, 1);
1117 	mtx_init(&file_private->table_lock, IPL_NONE);
1118 }
1119 
1120 /**
1121  * drm_gem_release - release file-private GEM resources
1122  * @dev: drm_device which is being closed by userspace
1123  * @file_private: drm file-private structure to clean up
1124  *
1125  * Called at close time when the filp is going away.
1126  *
1127  * Releases any remaining references on objects by this filp.
1128  */
1129 void
1130 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1131 {
1132 	idr_for_each(&file_private->object_idr,
1133 		     &drm_gem_object_release_handle, file_private);
1134 	idr_destroy(&file_private->object_idr);
1135 }
1136 
1137 /**
1138  * drm_gem_object_release - release GEM buffer object resources
1139  * @obj: GEM buffer object
1140  *
1141  * This releases any structures and resources used by @obj and is the invers of
1142  * drm_gem_object_init().
1143  */
1144 void
1145 drm_gem_object_release(struct drm_gem_object *obj)
1146 {
1147 	WARN_ON(obj->dma_buf);
1148 
1149 	if (obj->filp)
1150 		fput(obj->filp);
1151 
1152 #ifdef __OpenBSD__
1153 	if (obj->uao)
1154 		uao_detach(obj->uao);
1155 #endif
1156 
1157 	dma_resv_fini(&obj->_resv);
1158 	drm_gem_free_mmap_offset(obj);
1159 }
1160 EXPORT_SYMBOL(drm_gem_object_release);
1161 
1162 /**
1163  * drm_gem_object_free - free a GEM object
1164  * @kref: kref of the object to free
1165  *
1166  * Called after the last reference to the object has been lost.
1167  * Must be called holding &drm_device.struct_mutex.
1168  *
1169  * Frees the object
1170  */
1171 void
1172 drm_gem_object_free(struct kref *kref)
1173 {
1174 	struct drm_gem_object *obj =
1175 		container_of(kref, struct drm_gem_object, refcount);
1176 	struct drm_device *dev = obj->dev;
1177 
1178 	if (obj->funcs) {
1179 		obj->funcs->free(obj);
1180 	} else if (dev->driver->gem_free_object_unlocked) {
1181 		dev->driver->gem_free_object_unlocked(obj);
1182 	} else if (dev->driver->gem_free_object) {
1183 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1184 
1185 		dev->driver->gem_free_object(obj);
1186 	}
1187 }
1188 EXPORT_SYMBOL(drm_gem_object_free);
1189 
1190 /**
1191  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
1192  * @obj: GEM buffer object
1193  *
1194  * This releases a reference to @obj. Callers must not hold the
1195  * &drm_device.struct_mutex lock when calling this function.
1196  *
1197  * See also __drm_gem_object_put().
1198  */
1199 void
1200 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1201 {
1202 	struct drm_device *dev;
1203 
1204 	if (!obj)
1205 		return;
1206 
1207 	dev = obj->dev;
1208 
1209 	if (dev->driver->gem_free_object) {
1210 		might_lock(&dev->struct_mutex);
1211 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1212 				&dev->struct_mutex))
1213 			mutex_unlock(&dev->struct_mutex);
1214 	} else {
1215 		kref_put(&obj->refcount, drm_gem_object_free);
1216 	}
1217 }
1218 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1219 
1220 /**
1221  * drm_gem_object_put - release a GEM buffer object reference
1222  * @obj: GEM buffer object
1223  *
1224  * This releases a reference to @obj. Callers must hold the
1225  * &drm_device.struct_mutex lock when calling this function, even when the
1226  * driver doesn't use &drm_device.struct_mutex for anything.
1227  *
1228  * For drivers not encumbered with legacy locking use
1229  * drm_gem_object_put_unlocked() instead.
1230  */
1231 void
1232 drm_gem_object_put(struct drm_gem_object *obj)
1233 {
1234 	if (obj) {
1235 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1236 
1237 		kref_put(&obj->refcount, drm_gem_object_free);
1238 	}
1239 }
1240 EXPORT_SYMBOL(drm_gem_object_put);
1241 
1242 #ifdef __linux
1243 
1244 /**
1245  * drm_gem_vm_open - vma->ops->open implementation for GEM
1246  * @vma: VM area structure
1247  *
1248  * This function implements the #vm_operations_struct open() callback for GEM
1249  * drivers. This must be used together with drm_gem_vm_close().
1250  */
1251 void drm_gem_vm_open(struct vm_area_struct *vma)
1252 {
1253 	struct drm_gem_object *obj = vma->vm_private_data;
1254 
1255 	drm_gem_object_get(obj);
1256 }
1257 EXPORT_SYMBOL(drm_gem_vm_open);
1258 
1259 /**
1260  * drm_gem_vm_close - vma->ops->close implementation for GEM
1261  * @vma: VM area structure
1262  *
1263  * This function implements the #vm_operations_struct close() callback for GEM
1264  * drivers. This must be used together with drm_gem_vm_open().
1265  */
1266 void drm_gem_vm_close(struct vm_area_struct *vma)
1267 {
1268 	struct drm_gem_object *obj = vma->vm_private_data;
1269 
1270 	drm_gem_object_put_unlocked(obj);
1271 }
1272 EXPORT_SYMBOL(drm_gem_vm_close);
1273 
1274 /**
1275  * drm_gem_mmap_obj - memory map a GEM object
1276  * @obj: the GEM object to map
1277  * @obj_size: the object size to be mapped, in bytes
1278  * @vma: VMA for the area to be mapped
1279  *
1280  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1281  * provided by the driver. Depending on their requirements, drivers can either
1282  * provide a fault handler in their gem_vm_ops (in which case any accesses to
1283  * the object will be trapped, to perform migration, GTT binding, surface
1284  * register allocation, or performance monitoring), or mmap the buffer memory
1285  * synchronously after calling drm_gem_mmap_obj.
1286  *
1287  * This function is mainly intended to implement the DMABUF mmap operation, when
1288  * the GEM object is not looked up based on its fake offset. To implement the
1289  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1290  *
1291  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1292  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1293  * callers must verify access restrictions before calling this helper.
1294  *
1295  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1296  * size, or if no gem_vm_ops are provided.
1297  */
1298 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1299 		     struct vm_area_struct *vma)
1300 {
1301 	struct drm_device *dev = obj->dev;
1302 	int ret;
1303 
1304 	/* Check for valid size. */
1305 	if (obj_size < vma->vm_end - vma->vm_start)
1306 		return -EINVAL;
1307 
1308 	/* Take a ref for this mapping of the object, so that the fault
1309 	 * handler can dereference the mmap offset's pointer to the object.
1310 	 * This reference is cleaned up by the corresponding vm_close
1311 	 * (which should happen whether the vma was created by this call, or
1312 	 * by a vm_open due to mremap or partial unmap or whatever).
1313 	 */
1314 	drm_gem_object_get(obj);
1315 
1316 	if (obj->funcs && obj->funcs->mmap) {
1317 		ret = obj->funcs->mmap(obj, vma);
1318 		if (ret) {
1319 			drm_gem_object_put_unlocked(obj);
1320 			return ret;
1321 		}
1322 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1323 	} else {
1324 		if (obj->funcs && obj->funcs->vm_ops)
1325 			vma->vm_ops = obj->funcs->vm_ops;
1326 		else if (dev->driver->gem_vm_ops)
1327 			vma->vm_ops = dev->driver->gem_vm_ops;
1328 		else {
1329 			drm_gem_object_put_unlocked(obj);
1330 			return -EINVAL;
1331 		}
1332 
1333 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1334 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1335 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1336 	}
1337 
1338 	vma->vm_private_data = obj;
1339 
1340 	return 0;
1341 }
1342 EXPORT_SYMBOL(drm_gem_mmap_obj);
1343 
1344 /**
1345  * drm_gem_mmap - memory map routine for GEM objects
1346  * @filp: DRM file pointer
1347  * @vma: VMA for the area to be mapped
1348  *
1349  * If a driver supports GEM object mapping, mmap calls on the DRM file
1350  * descriptor will end up here.
1351  *
1352  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1353  * contain the fake offset we created when the GTT map ioctl was called on
1354  * the object) and map it with a call to drm_gem_mmap_obj().
1355  *
1356  * If the caller is not granted access to the buffer object, the mmap will fail
1357  * with EACCES. Please see the vma manager for more information.
1358  */
1359 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1360 {
1361 	struct drm_file *priv = filp->private_data;
1362 	struct drm_device *dev = priv->minor->dev;
1363 	struct drm_gem_object *obj = NULL;
1364 	struct drm_vma_offset_node *node;
1365 	int ret;
1366 
1367 	if (drm_dev_is_unplugged(dev))
1368 		return -ENODEV;
1369 
1370 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1371 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1372 						  vma->vm_pgoff,
1373 						  vma_pages(vma));
1374 	if (likely(node)) {
1375 		obj = container_of(node, struct drm_gem_object, vma_node);
1376 		/*
1377 		 * When the object is being freed, after it hits 0-refcnt it
1378 		 * proceeds to tear down the object. In the process it will
1379 		 * attempt to remove the VMA offset and so acquire this
1380 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1381 		 * that matches our range, we know it is in the process of being
1382 		 * destroyed and will be freed as soon as we release the lock -
1383 		 * so we have to check for the 0-refcnted object and treat it as
1384 		 * invalid.
1385 		 */
1386 		if (!kref_get_unless_zero(&obj->refcount))
1387 			obj = NULL;
1388 	}
1389 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1390 
1391 	if (!obj)
1392 		return -EINVAL;
1393 
1394 	if (!drm_vma_node_is_allowed(node, priv)) {
1395 		drm_gem_object_put_unlocked(obj);
1396 		return -EACCES;
1397 	}
1398 
1399 	if (node->readonly) {
1400 		if (vma->vm_flags & VM_WRITE) {
1401 			drm_gem_object_put_unlocked(obj);
1402 			return -EINVAL;
1403 		}
1404 
1405 		vma->vm_flags &= ~VM_MAYWRITE;
1406 	}
1407 
1408 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1409 			       vma);
1410 
1411 	drm_gem_object_put_unlocked(obj);
1412 
1413 	return ret;
1414 }
1415 EXPORT_SYMBOL(drm_gem_mmap);
1416 
1417 #endif /* __linux__ */
1418 
1419 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1420 			const struct drm_gem_object *obj)
1421 {
1422 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1423 	drm_printf_indent(p, indent, "refcount=%u\n",
1424 			  kref_read(&obj->refcount));
1425 	drm_printf_indent(p, indent, "start=%08lx\n",
1426 			  drm_vma_node_start(&obj->vma_node));
1427 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1428 	drm_printf_indent(p, indent, "imported=%s\n",
1429 			  obj->import_attach ? "yes" : "no");
1430 
1431 	if (obj->funcs && obj->funcs->print_info)
1432 		obj->funcs->print_info(p, indent, obj);
1433 	else if (obj->dev->driver->gem_print_info)
1434 		obj->dev->driver->gem_print_info(p, indent, obj);
1435 }
1436 
1437 int drm_gem_pin(struct drm_gem_object *obj)
1438 {
1439 	if (obj->funcs && obj->funcs->pin)
1440 		return obj->funcs->pin(obj);
1441 	else if (obj->dev->driver->gem_prime_pin)
1442 		return obj->dev->driver->gem_prime_pin(obj);
1443 	else
1444 		return 0;
1445 }
1446 
1447 void drm_gem_unpin(struct drm_gem_object *obj)
1448 {
1449 	if (obj->funcs && obj->funcs->unpin)
1450 		obj->funcs->unpin(obj);
1451 	else if (obj->dev->driver->gem_prime_unpin)
1452 		obj->dev->driver->gem_prime_unpin(obj);
1453 }
1454 
1455 void *drm_gem_vmap(struct drm_gem_object *obj)
1456 {
1457 	void *vaddr;
1458 
1459 	if (obj->funcs && obj->funcs->vmap)
1460 		vaddr = obj->funcs->vmap(obj);
1461 	else if (obj->dev->driver->gem_prime_vmap)
1462 		vaddr = obj->dev->driver->gem_prime_vmap(obj);
1463 	else
1464 		vaddr = ERR_PTR(-EOPNOTSUPP);
1465 
1466 	if (!vaddr)
1467 		vaddr = ERR_PTR(-ENOMEM);
1468 
1469 	return vaddr;
1470 }
1471 
1472 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1473 {
1474 	if (!vaddr)
1475 		return;
1476 
1477 	if (obj->funcs && obj->funcs->vunmap)
1478 		obj->funcs->vunmap(obj, vaddr);
1479 	else if (obj->dev->driver->gem_prime_vunmap)
1480 		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1481 }
1482 
1483 /**
1484  * drm_gem_lock_reservations - Sets up the ww context and acquires
1485  * the lock on an array of GEM objects.
1486  *
1487  * Once you've locked your reservations, you'll want to set up space
1488  * for your shared fences (if applicable), submit your job, then
1489  * drm_gem_unlock_reservations().
1490  *
1491  * @objs: drm_gem_objects to lock
1492  * @count: Number of objects in @objs
1493  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1494  * part of tracking this set of locked reservations.
1495  */
1496 int
1497 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1498 			  struct ww_acquire_ctx *acquire_ctx)
1499 {
1500 	int contended = -1;
1501 	int i, ret;
1502 
1503 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1504 
1505 retry:
1506 	if (contended != -1) {
1507 		struct drm_gem_object *obj = objs[contended];
1508 
1509 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1510 								 acquire_ctx);
1511 		if (ret) {
1512 			ww_acquire_done(acquire_ctx);
1513 			return ret;
1514 		}
1515 	}
1516 
1517 	for (i = 0; i < count; i++) {
1518 		if (i == contended)
1519 			continue;
1520 
1521 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1522 							    acquire_ctx);
1523 		if (ret) {
1524 			int j;
1525 
1526 			for (j = 0; j < i; j++)
1527 				dma_resv_unlock(objs[j]->resv);
1528 
1529 			if (contended != -1 && contended >= i)
1530 				dma_resv_unlock(objs[contended]->resv);
1531 
1532 			if (ret == -EDEADLK) {
1533 				contended = i;
1534 				goto retry;
1535 			}
1536 
1537 			ww_acquire_done(acquire_ctx);
1538 			return ret;
1539 		}
1540 	}
1541 
1542 	ww_acquire_done(acquire_ctx);
1543 
1544 	return 0;
1545 }
1546 EXPORT_SYMBOL(drm_gem_lock_reservations);
1547 
1548 void
1549 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1550 			    struct ww_acquire_ctx *acquire_ctx)
1551 {
1552 	int i;
1553 
1554 	for (i = 0; i < count; i++)
1555 		dma_resv_unlock(objs[i]->resv);
1556 
1557 	ww_acquire_fini(acquire_ctx);
1558 }
1559 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1560 
1561 #ifdef notyet
1562 /**
1563  * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1564  * waited on, deduplicating fences from the same context.
1565  *
1566  * @fence_array: array of dma_fence * for the job to block on.
1567  * @fence: the dma_fence to add to the list of dependencies.
1568  *
1569  * Returns:
1570  * 0 on success, or an error on failing to expand the array.
1571  */
1572 int drm_gem_fence_array_add(struct xarray *fence_array,
1573 			    struct dma_fence *fence)
1574 {
1575 	struct dma_fence *entry;
1576 	unsigned long index;
1577 	u32 id = 0;
1578 	int ret;
1579 
1580 	if (!fence)
1581 		return 0;
1582 
1583 	/* Deduplicate if we already depend on a fence from the same context.
1584 	 * This lets the size of the array of deps scale with the number of
1585 	 * engines involved, rather than the number of BOs.
1586 	 */
1587 	xa_for_each(fence_array, index, entry) {
1588 		if (entry->context != fence->context)
1589 			continue;
1590 
1591 		if (dma_fence_is_later(fence, entry)) {
1592 			dma_fence_put(entry);
1593 			xa_store(fence_array, index, fence, GFP_KERNEL);
1594 		} else {
1595 			dma_fence_put(fence);
1596 		}
1597 		return 0;
1598 	}
1599 
1600 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1601 	if (ret != 0)
1602 		dma_fence_put(fence);
1603 
1604 	return ret;
1605 }
1606 EXPORT_SYMBOL(drm_gem_fence_array_add);
1607 
1608 /**
1609  * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1610  * in the GEM object's reservation object to an array of dma_fences for use in
1611  * scheduling a rendering job.
1612  *
1613  * This should be called after drm_gem_lock_reservations() on your array of
1614  * GEM objects used in the job but before updating the reservations with your
1615  * own fences.
1616  *
1617  * @fence_array: array of dma_fence * for the job to block on.
1618  * @obj: the gem object to add new dependencies from.
1619  * @write: whether the job might write the object (so we need to depend on
1620  * shared fences in the reservation object).
1621  */
1622 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1623 				     struct drm_gem_object *obj,
1624 				     bool write)
1625 {
1626 	int ret;
1627 	struct dma_fence **fences;
1628 	unsigned int i, fence_count;
1629 
1630 	if (!write) {
1631 		struct dma_fence *fence =
1632 			dma_resv_get_excl_rcu(obj->resv);
1633 
1634 		return drm_gem_fence_array_add(fence_array, fence);
1635 	}
1636 
1637 	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1638 						&fence_count, &fences);
1639 	if (ret || !fence_count)
1640 		return ret;
1641 
1642 	for (i = 0; i < fence_count; i++) {
1643 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
1644 		if (ret)
1645 			break;
1646 	}
1647 
1648 	for (; i < fence_count; i++)
1649 		dma_fence_put(fences[i]);
1650 	kfree(fences);
1651 	return ret;
1652 }
1653 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1654 
1655 #endif /* notyet */
1656