xref: /openbsd/sys/dev/pci/drm/drm_gem.c (revision f005ef32)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/dma-buf.h>
29 #include <linux/file.h>
30 #include <linux/fs.h>
31 #include <linux/iosys-map.h>
32 #include <linux/mem_encrypt.h>
33 #include <linux/mm.h>
34 #include <linux/mman.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h>
37 #include <linux/pagevec.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/string_helpers.h>
41 #include <linux/types.h>
42 #include <linux/uaccess.h>
43 
44 #include <drm/drm.h>
45 #include <drm/drm_device.h>
46 #include <drm/drm_drv.h>
47 #include <drm/drm_file.h>
48 #include <drm/drm_gem.h>
49 #include <drm/drm_managed.h>
50 #include <drm/drm_print.h>
51 #include <drm/drm_vma_manager.h>
52 
53 #include "drm_internal.h"
54 
55 #include <sys/conf.h>
56 #include <uvm/uvm.h>
57 
58 void drm_unref(struct uvm_object *);
59 void drm_ref(struct uvm_object *);
60 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
61 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
62     vm_fault_t, vm_prot_t, int);
63 
64 const struct uvm_pagerops drm_pgops = {
65 	.pgo_reference = drm_ref,
66 	.pgo_detach = drm_unref,
67 	.pgo_fault = drm_fault,
68 	.pgo_flush = drm_flush,
69 };
70 
71 void
drm_ref(struct uvm_object * uobj)72 drm_ref(struct uvm_object *uobj)
73 {
74 	struct drm_gem_object *obj =
75 	    container_of(uobj, struct drm_gem_object, uobj);
76 
77 	drm_gem_object_get(obj);
78 }
79 
80 void
drm_unref(struct uvm_object * uobj)81 drm_unref(struct uvm_object *uobj)
82 {
83 	struct drm_gem_object *obj =
84 	    container_of(uobj, struct drm_gem_object, uobj);
85 
86 	drm_gem_object_put(obj);
87 }
88 
89 int
drm_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_fault_t fault_type,vm_prot_t access_type,int flags)90 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
91     int npages, int centeridx, vm_fault_t fault_type,
92     vm_prot_t access_type, int flags)
93 {
94 	struct vm_map_entry *entry = ufi->entry;
95 	struct uvm_object *uobj = entry->object.uvm_obj;
96 	struct drm_gem_object *obj =
97 	    container_of(uobj, struct drm_gem_object, uobj);
98 	struct drm_device *dev = obj->dev;
99 	int ret;
100 
101 	/*
102 	 * we do not allow device mappings to be mapped copy-on-write
103 	 * so we kill any attempt to do so here.
104 	 */
105 
106 	if (UVM_ET_ISCOPYONWRITE(entry)) {
107 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
108 		return(VM_PAGER_ERROR);
109 	}
110 
111 	/*
112 	 * We could end up here as the result of a copyin(9) or
113 	 * copyout(9) while handling an ioctl.  So we must be careful
114 	 * not to deadlock.  Therefore we only block if the quiesce
115 	 * count is zero, which guarantees we didn't enter from within
116 	 * an ioctl code path.
117 	 */
118 	mtx_enter(&dev->quiesce_mtx);
119 	if (dev->quiesce && dev->quiesce_count == 0) {
120 		mtx_leave(&dev->quiesce_mtx);
121 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
122 		mtx_enter(&dev->quiesce_mtx);
123 		while (dev->quiesce) {
124 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
125 			    PZERO, "drmflt", INFSLP);
126 		}
127 		mtx_leave(&dev->quiesce_mtx);
128 		return(VM_PAGER_REFAULT);
129 	}
130 	dev->quiesce_count++;
131 	mtx_leave(&dev->quiesce_mtx);
132 
133 	/* Call down into driver to do the magic */
134 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
135 	    entry->start), vaddr, pps, npages, centeridx,
136 	    access_type, flags);
137 
138 	mtx_enter(&dev->quiesce_mtx);
139 	dev->quiesce_count--;
140 	if (dev->quiesce)
141 		wakeup(&dev->quiesce_count);
142 	mtx_leave(&dev->quiesce_mtx);
143 
144 	return (ret);
145 }
146 
147 boolean_t
drm_flush(struct uvm_object * uobj,voff_t start,voff_t stop,int flags)148 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
149 {
150 	return (TRUE);
151 }
152 
153 struct uvm_object *
udv_attach_drm(dev_t device,vm_prot_t accessprot,voff_t off,vsize_t size)154 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
155 {
156 	struct drm_device *dev = drm_get_device_from_kdev(device);
157 	struct drm_gem_object *obj = NULL;
158 	struct drm_vma_offset_node *node;
159 	struct drm_file *priv;
160 	struct file *filp;
161 
162 	if (cdevsw[major(device)].d_mmap != drmmmap)
163 		return NULL;
164 
165 	if (dev == NULL)
166 		return NULL;
167 
168 	mutex_lock(&dev->filelist_mutex);
169 	priv = drm_find_file_by_minor(dev, minor(device));
170 	if (priv == NULL) {
171 		mutex_unlock(&dev->filelist_mutex);
172 		return NULL;
173 	}
174 	filp = priv->filp;
175 	mutex_unlock(&dev->filelist_mutex);
176 
177 	if (dev->driver->mmap)
178 		return dev->driver->mmap(filp, accessprot, off, size);
179 
180 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
181 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
182 						  off >> PAGE_SHIFT,
183 						  atop(round_page(size)));
184 	if (likely(node)) {
185 		obj = container_of(node, struct drm_gem_object, vma_node);
186 		/*
187 		 * When the object is being freed, after it hits 0-refcnt it
188 		 * proceeds to tear down the object. In the process it will
189 		 * attempt to remove the VMA offset and so acquire this
190 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
191 		 * that matches our range, we know it is in the process of being
192 		 * destroyed and will be freed as soon as we release the lock -
193 		 * so we have to check for the 0-refcnted object and treat it as
194 		 * invalid.
195 		 */
196 		if (!kref_get_unless_zero(&obj->refcount))
197 			obj = NULL;
198 	}
199 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
200 
201 	if (!obj)
202 		return NULL;
203 
204 	if (!drm_vma_node_is_allowed(node, priv)) {
205 		drm_gem_object_put(obj);
206 		return NULL;
207 	}
208 
209 	return &obj->uobj;
210 }
211 
212 /** @file drm_gem.c
213  *
214  * This file provides some of the base ioctls and library routines for
215  * the graphics memory manager implemented by each device driver.
216  *
217  * Because various devices have different requirements in terms of
218  * synchronization and migration strategies, implementing that is left up to
219  * the driver, and all that the general API provides should be generic --
220  * allocating objects, reading/writing data with the cpu, freeing objects.
221  * Even there, platform-dependent optimizations for reading/writing data with
222  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
223  * the DRI2 implementation wants to have at least allocate/mmap be generic.
224  *
225  * The goal was to have swap-backed object allocation managed through
226  * struct file.  However, file descriptors as handles to a struct file have
227  * two major failings:
228  * - Process limits prevent more than 1024 or so being used at a time by
229  *   default.
230  * - Inability to allocate high fds will aggravate the X Server's select()
231  *   handling, and likely that of many GL client applications as well.
232  *
233  * This led to a plan of using our own integer IDs (called handles, following
234  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
235  * ioctls.  The objects themselves will still include the struct file so
236  * that we can transition to fds if the required kernel infrastructure shows
237  * up at a later date, and as our interface with shmfs for memory allocation.
238  */
239 
240 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)241 drm_gem_init_release(struct drm_device *dev, void *ptr)
242 {
243 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
244 }
245 
246 /**
247  * drm_gem_init - Initialize the GEM device fields
248  * @dev: drm_devic structure to initialize
249  */
250 int
drm_gem_init(struct drm_device * dev)251 drm_gem_init(struct drm_device *dev)
252 {
253 	struct drm_vma_offset_manager *vma_offset_manager;
254 
255 	rw_init(&dev->object_name_lock, "drmonl");
256 	idr_init_base(&dev->object_name_idr, 1);
257 
258 	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
259 					  GFP_KERNEL);
260 	if (!vma_offset_manager) {
261 		DRM_ERROR("out of memory\n");
262 		return -ENOMEM;
263 	}
264 
265 	dev->vma_offset_manager = vma_offset_manager;
266 	drm_vma_offset_manager_init(vma_offset_manager,
267 				    DRM_FILE_PAGE_OFFSET_START,
268 				    DRM_FILE_PAGE_OFFSET_SIZE);
269 
270 	return drmm_add_action(dev, drm_gem_init_release, NULL);
271 }
272 
273 #ifdef __linux__
274 
275 /**
276  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
277  * @dev: drm_device the object should be initialized for
278  * @obj: drm_gem_object to initialize
279  * @size: object size
280  *
281  * Initialize an already allocated GEM object of the specified size with
282  * shmfs backing store.
283  */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)284 int drm_gem_object_init(struct drm_device *dev,
285 			struct drm_gem_object *obj, size_t size)
286 {
287 	struct file *filp;
288 
289 	drm_gem_private_object_init(dev, obj, size);
290 
291 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
292 	if (IS_ERR(filp))
293 		return PTR_ERR(filp);
294 
295 	obj->filp = filp;
296 
297 	return 0;
298 }
299 EXPORT_SYMBOL(drm_gem_object_init);
300 
301 #else
302 
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)303 int drm_gem_object_init(struct drm_device *dev,
304 			struct drm_gem_object *obj, size_t size)
305 {
306 	drm_gem_private_object_init(dev, obj, size);
307 
308 	if (size > (512 * 1024 * 1024)) {
309 		printf("%s size too big %lu\n", __func__, size);
310 		return -ENOMEM;
311 	}
312 
313 	obj->uao = uao_create(size, 0);
314 	uvm_obj_init(&obj->uobj, &drm_pgops, 1);
315 
316 	return 0;
317 }
318 
319 #endif
320 
321 /**
322  * drm_gem_private_object_init - initialize an allocated private GEM object
323  * @dev: drm_device the object should be initialized for
324  * @obj: drm_gem_object to initialize
325  * @size: object size
326  *
327  * Initialize an already allocated GEM object of the specified size with
328  * no GEM provided backing store. Instead the caller is responsible for
329  * backing the object and handling it.
330  */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)331 void drm_gem_private_object_init(struct drm_device *dev,
332 				 struct drm_gem_object *obj, size_t size)
333 {
334 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
335 
336 	obj->dev = dev;
337 #ifdef __linux__
338 	obj->filp = NULL;
339 #else
340 	obj->uao = NULL;
341 	obj->uobj.pgops = NULL;
342 #endif
343 
344 	kref_init(&obj->refcount);
345 	obj->handle_count = 0;
346 	obj->size = size;
347 	dma_resv_init(&obj->_resv);
348 	if (!obj->resv)
349 		obj->resv = &obj->_resv;
350 
351 	if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
352 		drm_gem_gpuva_init(obj);
353 
354 	drm_vma_node_reset(&obj->vma_node);
355 	INIT_LIST_HEAD(&obj->lru_node);
356 }
357 EXPORT_SYMBOL(drm_gem_private_object_init);
358 
359 /**
360  * drm_gem_private_object_fini - Finalize a failed drm_gem_object
361  * @obj: drm_gem_object
362  *
363  * Uninitialize an already allocated GEM object when it initialized failed
364  */
drm_gem_private_object_fini(struct drm_gem_object * obj)365 void drm_gem_private_object_fini(struct drm_gem_object *obj)
366 {
367 	WARN_ON(obj->dma_buf);
368 
369 	dma_resv_fini(&obj->_resv);
370 }
371 EXPORT_SYMBOL(drm_gem_private_object_fini);
372 
373 /**
374  * drm_gem_object_handle_free - release resources bound to userspace handles
375  * @obj: GEM object to clean up.
376  *
377  * Called after the last handle to the object has been closed
378  *
379  * Removes any name for the object. Note that this must be
380  * called before drm_gem_object_free or we'll be touching
381  * freed memory
382  */
drm_gem_object_handle_free(struct drm_gem_object * obj)383 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
384 {
385 	struct drm_device *dev = obj->dev;
386 
387 	/* Remove any name for this object */
388 	if (obj->name) {
389 		idr_remove(&dev->object_name_idr, obj->name);
390 		obj->name = 0;
391 	}
392 }
393 
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)394 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
395 {
396 	/* Unbreak the reference cycle if we have an exported dma_buf. */
397 	if (obj->dma_buf) {
398 		dma_buf_put(obj->dma_buf);
399 		obj->dma_buf = NULL;
400 	}
401 }
402 
403 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)404 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
405 {
406 	struct drm_device *dev = obj->dev;
407 	bool final = false;
408 
409 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
410 		return;
411 
412 	/*
413 	* Must bump handle count first as this may be the last
414 	* ref, in which case the object would disappear before we
415 	* checked for a name
416 	*/
417 
418 	mutex_lock(&dev->object_name_lock);
419 	if (--obj->handle_count == 0) {
420 		drm_gem_object_handle_free(obj);
421 		drm_gem_object_exported_dma_buf_free(obj);
422 		final = true;
423 	}
424 	mutex_unlock(&dev->object_name_lock);
425 
426 	if (final)
427 		drm_gem_object_put(obj);
428 }
429 
430 /*
431  * Called at device or object close to release the file's
432  * handle references on objects.
433  */
434 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)435 drm_gem_object_release_handle(int id, void *ptr, void *data)
436 {
437 	struct drm_file *file_priv = data;
438 	struct drm_gem_object *obj = ptr;
439 
440 	if (obj->funcs->close)
441 		obj->funcs->close(obj, file_priv);
442 
443 	drm_prime_remove_buf_handle(&file_priv->prime, id);
444 	drm_vma_node_revoke(&obj->vma_node, file_priv);
445 
446 	drm_gem_object_handle_put_unlocked(obj);
447 
448 	return 0;
449 }
450 
451 /**
452  * drm_gem_handle_delete - deletes the given file-private handle
453  * @filp: drm file-private structure to use for the handle look up
454  * @handle: userspace handle to delete
455  *
456  * Removes the GEM handle from the @filp lookup table which has been added with
457  * drm_gem_handle_create(). If this is the last handle also cleans up linked
458  * resources like GEM names.
459  */
460 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)461 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
462 {
463 	struct drm_gem_object *obj;
464 
465 	spin_lock(&filp->table_lock);
466 
467 	/* Check if we currently have a reference on the object */
468 	obj = idr_replace(&filp->object_idr, NULL, handle);
469 	spin_unlock(&filp->table_lock);
470 	if (IS_ERR_OR_NULL(obj))
471 		return -EINVAL;
472 
473 	/* Release driver's reference and decrement refcount. */
474 	drm_gem_object_release_handle(handle, obj, filp);
475 
476 	/* And finally make the handle available for future allocations. */
477 	spin_lock(&filp->table_lock);
478 	idr_remove(&filp->object_idr, handle);
479 	spin_unlock(&filp->table_lock);
480 
481 	return 0;
482 }
483 EXPORT_SYMBOL(drm_gem_handle_delete);
484 
485 /**
486  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
487  * @file: drm file-private structure containing the gem object
488  * @dev: corresponding drm_device
489  * @handle: gem object handle
490  * @offset: return location for the fake mmap offset
491  *
492  * This implements the &drm_driver.dumb_map_offset kms driver callback for
493  * drivers which use gem to manage their backing storage.
494  *
495  * Returns:
496  * 0 on success or a negative error code on failure.
497  */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)498 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
499 			    u32 handle, u64 *offset)
500 {
501 	struct drm_gem_object *obj;
502 	int ret;
503 
504 	obj = drm_gem_object_lookup(file, handle);
505 	if (!obj)
506 		return -ENOENT;
507 
508 	/* Don't allow imported objects to be mapped */
509 	if (obj->import_attach) {
510 		ret = -EINVAL;
511 		goto out;
512 	}
513 
514 	ret = drm_gem_create_mmap_offset(obj);
515 	if (ret)
516 		goto out;
517 
518 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
519 out:
520 	drm_gem_object_put(obj);
521 
522 	return ret;
523 }
524 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
525 
526 /**
527  * drm_gem_handle_create_tail - internal functions to create a handle
528  * @file_priv: drm file-private structure to register the handle for
529  * @obj: object to register
530  * @handlep: pointer to return the created handle to the caller
531  *
532  * This expects the &drm_device.object_name_lock to be held already and will
533  * drop it before returning. Used to avoid races in establishing new handles
534  * when importing an object from either an flink name or a dma-buf.
535  *
536  * Handles must be release again through drm_gem_handle_delete(). This is done
537  * when userspace closes @file_priv for all attached handles, or through the
538  * GEM_CLOSE ioctl for individual handles.
539  */
540 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)541 drm_gem_handle_create_tail(struct drm_file *file_priv,
542 			   struct drm_gem_object *obj,
543 			   u32 *handlep)
544 {
545 	struct drm_device *dev = obj->dev;
546 	u32 handle;
547 	int ret;
548 
549 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
550 	if (obj->handle_count++ == 0)
551 		drm_gem_object_get(obj);
552 
553 	/*
554 	 * Get the user-visible handle using idr.  Preload and perform
555 	 * allocation under our spinlock.
556 	 */
557 	idr_preload(GFP_KERNEL);
558 	spin_lock(&file_priv->table_lock);
559 
560 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
561 
562 	spin_unlock(&file_priv->table_lock);
563 	idr_preload_end();
564 
565 	mutex_unlock(&dev->object_name_lock);
566 	if (ret < 0)
567 		goto err_unref;
568 
569 	handle = ret;
570 
571 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
572 	if (ret)
573 		goto err_remove;
574 
575 	if (obj->funcs->open) {
576 		ret = obj->funcs->open(obj, file_priv);
577 		if (ret)
578 			goto err_revoke;
579 	}
580 
581 	*handlep = handle;
582 	return 0;
583 
584 err_revoke:
585 	drm_vma_node_revoke(&obj->vma_node, file_priv);
586 err_remove:
587 	spin_lock(&file_priv->table_lock);
588 	idr_remove(&file_priv->object_idr, handle);
589 	spin_unlock(&file_priv->table_lock);
590 err_unref:
591 	drm_gem_object_handle_put_unlocked(obj);
592 	return ret;
593 }
594 
595 /**
596  * drm_gem_handle_create - create a gem handle for an object
597  * @file_priv: drm file-private structure to register the handle for
598  * @obj: object to register
599  * @handlep: pointer to return the created handle to the caller
600  *
601  * Create a handle for this object. This adds a handle reference to the object,
602  * which includes a regular reference count. Callers will likely want to
603  * dereference the object afterwards.
604  *
605  * Since this publishes @obj to userspace it must be fully set up by this point,
606  * drivers must call this last in their buffer object creation callbacks.
607  */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)608 int drm_gem_handle_create(struct drm_file *file_priv,
609 			  struct drm_gem_object *obj,
610 			  u32 *handlep)
611 {
612 	mutex_lock(&obj->dev->object_name_lock);
613 
614 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
615 }
616 EXPORT_SYMBOL(drm_gem_handle_create);
617 
618 
619 /**
620  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
621  * @obj: obj in question
622  *
623  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
624  *
625  * Note that drm_gem_object_release() already calls this function, so drivers
626  * don't have to take care of releasing the mmap offset themselves when freeing
627  * the GEM object.
628  */
629 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)630 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
631 {
632 	struct drm_device *dev = obj->dev;
633 
634 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
635 }
636 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
637 
638 /**
639  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
640  * @obj: obj in question
641  * @size: the virtual size
642  *
643  * GEM memory mapping works by handing back to userspace a fake mmap offset
644  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
645  * up the object based on the offset and sets up the various memory mapping
646  * structures.
647  *
648  * This routine allocates and attaches a fake offset for @obj, in cases where
649  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
650  * Otherwise just use drm_gem_create_mmap_offset().
651  *
652  * This function is idempotent and handles an already allocated mmap offset
653  * transparently. Drivers do not need to check for this case.
654  */
655 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)656 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
657 {
658 	struct drm_device *dev = obj->dev;
659 
660 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
661 				  size / PAGE_SIZE);
662 }
663 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
664 
665 /**
666  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
667  * @obj: obj in question
668  *
669  * GEM memory mapping works by handing back to userspace a fake mmap offset
670  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
671  * up the object based on the offset and sets up the various memory mapping
672  * structures.
673  *
674  * This routine allocates and attaches a fake offset for @obj.
675  *
676  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
677  * the fake offset again.
678  */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)679 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
680 {
681 	return drm_gem_create_mmap_offset_size(obj, obj->size);
682 }
683 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
684 
685 #ifdef notyet
686 /*
687  * Move folios to appropriate lru and release the folios, decrementing the
688  * ref count of those folios.
689  */
drm_gem_check_release_batch(struct folio_batch * fbatch)690 static void drm_gem_check_release_batch(struct folio_batch *fbatch)
691 {
692 	check_move_unevictable_folios(fbatch);
693 	__folio_batch_release(fbatch);
694 	cond_resched();
695 }
696 #endif
697 
698 /**
699  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
700  * from shmem
701  * @obj: obj in question
702  *
703  * This reads the page-array of the shmem-backing storage of the given gem
704  * object. An array of pages is returned. If a page is not allocated or
705  * swapped-out, this will allocate/swap-in the required pages. Note that the
706  * whole object is covered by the page-array and pinned in memory.
707  *
708  * Use drm_gem_put_pages() to release the array and unpin all pages.
709  *
710  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
711  * If you require other GFP-masks, you have to do those allocations yourself.
712  *
713  * Note that you are not allowed to change gfp-zones during runtime. That is,
714  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
715  * set during initialization. If you have special zone constraints, set them
716  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
717  * to keep pages in the required zone during swap-in.
718  *
719  * This function is only valid on objects initialized with
720  * drm_gem_object_init(), but not for those initialized with
721  * drm_gem_private_object_init() only.
722  */
drm_gem_get_pages(struct drm_gem_object * obj)723 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
724 {
725 	STUB();
726 	return ERR_PTR(-ENOSYS);
727 #ifdef notyet
728 	struct address_space *mapping;
729 	struct vm_page **pages;
730 	struct folio *folio;
731 	struct folio_batch fbatch;
732 	long i, j, npages;
733 
734 	if (WARN_ON(!obj->filp))
735 		return ERR_PTR(-EINVAL);
736 
737 	/* This is the shared memory object that backs the GEM resource */
738 	mapping = obj->filp->f_mapping;
739 
740 	/* We already BUG_ON() for non-page-aligned sizes in
741 	 * drm_gem_object_init(), so we should never hit this unless
742 	 * driver author is doing something really wrong:
743 	 */
744 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
745 
746 	npages = obj->size >> PAGE_SHIFT;
747 
748 	pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);
749 	if (pages == NULL)
750 		return ERR_PTR(-ENOMEM);
751 
752 	mapping_set_unevictable(mapping);
753 
754 	i = 0;
755 	while (i < npages) {
756 		long nr;
757 		folio = shmem_read_folio_gfp(mapping, i,
758 				mapping_gfp_mask(mapping));
759 		if (IS_ERR(folio))
760 			goto fail;
761 		nr = min(npages - i, folio_nr_pages(folio));
762 		for (j = 0; j < nr; j++, i++)
763 			pages[i] = folio_file_page(folio, i);
764 
765 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
766 		 * correct region during swapin. Note that this requires
767 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
768 		 * so shmem can relocate pages during swapin if required.
769 		 */
770 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
771 				(folio_pfn(folio) >= 0x00100000UL));
772 	}
773 
774 	return pages;
775 
776 fail:
777 	mapping_clear_unevictable(mapping);
778 	folio_batch_init(&fbatch);
779 	j = 0;
780 	while (j < i) {
781 		struct folio *f = page_folio(pages[j]);
782 		if (!folio_batch_add(&fbatch, f))
783 			drm_gem_check_release_batch(&fbatch);
784 		j += folio_nr_pages(f);
785 	}
786 	if (fbatch.nr)
787 		drm_gem_check_release_batch(&fbatch);
788 
789 	kvfree(pages);
790 	return ERR_CAST(folio);
791 #endif
792 }
793 EXPORT_SYMBOL(drm_gem_get_pages);
794 
795 /**
796  * drm_gem_put_pages - helper to free backing pages for a GEM object
797  * @obj: obj in question
798  * @pages: pages to free
799  * @dirty: if true, pages will be marked as dirty
800  * @accessed: if true, the pages will be marked as accessed
801  */
drm_gem_put_pages(struct drm_gem_object * obj,struct vm_page ** pages,bool dirty,bool accessed)802 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
803 		bool dirty, bool accessed)
804 {
805 	STUB();
806 #ifdef notyet
807 	int i, npages;
808 	struct address_space *mapping;
809 	struct folio_batch fbatch;
810 
811 	mapping = file_inode(obj->filp)->i_mapping;
812 	mapping_clear_unevictable(mapping);
813 
814 	/* We already BUG_ON() for non-page-aligned sizes in
815 	 * drm_gem_object_init(), so we should never hit this unless
816 	 * driver author is doing something really wrong:
817 	 */
818 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
819 
820 	npages = obj->size >> PAGE_SHIFT;
821 
822 	folio_batch_init(&fbatch);
823 	for (i = 0; i < npages; i++) {
824 		struct folio *folio;
825 
826 		if (!pages[i])
827 			continue;
828 		folio = page_folio(pages[i]);
829 
830 		if (dirty)
831 			folio_mark_dirty(folio);
832 
833 		if (accessed)
834 			folio_mark_accessed(folio);
835 
836 		/* Undo the reference we took when populating the table */
837 		if (!folio_batch_add(&fbatch, folio))
838 			drm_gem_check_release_batch(&fbatch);
839 		i += folio_nr_pages(folio) - 1;
840 	}
841 	if (folio_batch_count(&fbatch))
842 		drm_gem_check_release_batch(&fbatch);
843 
844 	kvfree(pages);
845 #endif
846 }
847 EXPORT_SYMBOL(drm_gem_put_pages);
848 
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)849 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
850 			  struct drm_gem_object **objs)
851 {
852 	int i, ret = 0;
853 	struct drm_gem_object *obj;
854 
855 	spin_lock(&filp->table_lock);
856 
857 	for (i = 0; i < count; i++) {
858 		/* Check if we currently have a reference on the object */
859 		obj = idr_find(&filp->object_idr, handle[i]);
860 		if (!obj) {
861 			ret = -ENOENT;
862 			break;
863 		}
864 		drm_gem_object_get(obj);
865 		objs[i] = obj;
866 	}
867 	spin_unlock(&filp->table_lock);
868 
869 	return ret;
870 }
871 
872 /**
873  * drm_gem_objects_lookup - look up GEM objects from an array of handles
874  * @filp: DRM file private date
875  * @bo_handles: user pointer to array of userspace handle
876  * @count: size of handle array
877  * @objs_out: returned pointer to array of drm_gem_object pointers
878  *
879  * Takes an array of userspace handles and returns a newly allocated array of
880  * GEM objects.
881  *
882  * For a single handle lookup, use drm_gem_object_lookup().
883  *
884  * Returns:
885  *
886  * @objs filled in with GEM object pointers. Returned GEM objects need to be
887  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
888  * failure. 0 is returned on success.
889  *
890  */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)891 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
892 			   int count, struct drm_gem_object ***objs_out)
893 {
894 	int ret;
895 	u32 *handles;
896 	struct drm_gem_object **objs;
897 
898 	if (!count)
899 		return 0;
900 
901 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
902 			     GFP_KERNEL | __GFP_ZERO);
903 	if (!objs)
904 		return -ENOMEM;
905 
906 	*objs_out = objs;
907 
908 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
909 	if (!handles) {
910 		ret = -ENOMEM;
911 		goto out;
912 	}
913 
914 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
915 		ret = -EFAULT;
916 		DRM_DEBUG("Failed to copy in GEM handles\n");
917 		goto out;
918 	}
919 
920 	ret = objects_lookup(filp, handles, count, objs);
921 out:
922 	kvfree(handles);
923 	return ret;
924 
925 }
926 EXPORT_SYMBOL(drm_gem_objects_lookup);
927 
928 /**
929  * drm_gem_object_lookup - look up a GEM object from its handle
930  * @filp: DRM file private date
931  * @handle: userspace handle
932  *
933  * Returns:
934  *
935  * A reference to the object named by the handle if such exists on @filp, NULL
936  * otherwise.
937  *
938  * If looking up an array of handles, use drm_gem_objects_lookup().
939  */
940 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)941 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
942 {
943 	struct drm_gem_object *obj = NULL;
944 
945 	objects_lookup(filp, &handle, 1, &obj);
946 	return obj;
947 }
948 EXPORT_SYMBOL(drm_gem_object_lookup);
949 
950 /**
951  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
952  * shared and/or exclusive fences.
953  * @filep: DRM file private date
954  * @handle: userspace handle
955  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
956  * @timeout: timeout value in jiffies or zero to return immediately
957  *
958  * Returns:
959  *
960  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
961  * greater than 0 on success.
962  */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)963 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
964 				    bool wait_all, unsigned long timeout)
965 {
966 	long ret;
967 	struct drm_gem_object *obj;
968 
969 	obj = drm_gem_object_lookup(filep, handle);
970 	if (!obj) {
971 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
972 		return -EINVAL;
973 	}
974 
975 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
976 				    true, timeout);
977 	if (ret == 0)
978 		ret = -ETIME;
979 	else if (ret > 0)
980 		ret = 0;
981 
982 	drm_gem_object_put(obj);
983 
984 	return ret;
985 }
986 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
987 
988 /**
989  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
990  * @dev: drm_device
991  * @data: ioctl data
992  * @file_priv: drm file-private structure
993  *
994  * Releases the handle to an mm object.
995  */
996 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)997 drm_gem_close_ioctl(struct drm_device *dev, void *data,
998 		    struct drm_file *file_priv)
999 {
1000 	struct drm_gem_close *args = data;
1001 	int ret;
1002 
1003 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1004 		return -EOPNOTSUPP;
1005 
1006 	ret = drm_gem_handle_delete(file_priv, args->handle);
1007 
1008 	return ret;
1009 }
1010 
1011 /**
1012  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
1013  * @dev: drm_device
1014  * @data: ioctl data
1015  * @file_priv: drm file-private structure
1016  *
1017  * Create a global name for an object, returning the name.
1018  *
1019  * Note that the name does not hold a reference; when the object
1020  * is freed, the name goes away.
1021  */
1022 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1023 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1024 		    struct drm_file *file_priv)
1025 {
1026 	struct drm_gem_flink *args = data;
1027 	struct drm_gem_object *obj;
1028 	int ret;
1029 
1030 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1031 		return -EOPNOTSUPP;
1032 
1033 	obj = drm_gem_object_lookup(file_priv, args->handle);
1034 	if (obj == NULL)
1035 		return -ENOENT;
1036 
1037 	mutex_lock(&dev->object_name_lock);
1038 	/* prevent races with concurrent gem_close. */
1039 	if (obj->handle_count == 0) {
1040 		ret = -ENOENT;
1041 		goto err;
1042 	}
1043 
1044 	if (!obj->name) {
1045 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
1046 		if (ret < 0)
1047 			goto err;
1048 
1049 		obj->name = ret;
1050 	}
1051 
1052 	args->name = (uint64_t) obj->name;
1053 	ret = 0;
1054 
1055 err:
1056 	mutex_unlock(&dev->object_name_lock);
1057 	drm_gem_object_put(obj);
1058 	return ret;
1059 }
1060 
1061 /**
1062  * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
1063  * @dev: drm_device
1064  * @data: ioctl data
1065  * @file_priv: drm file-private structure
1066  *
1067  * Open an object using the global name, returning a handle and the size.
1068  *
1069  * This handle (of course) holds a reference to the object, so the object
1070  * will not go away until the handle is deleted.
1071  */
1072 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1073 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1074 		   struct drm_file *file_priv)
1075 {
1076 	struct drm_gem_open *args = data;
1077 	struct drm_gem_object *obj;
1078 	int ret;
1079 	u32 handle;
1080 
1081 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1082 		return -EOPNOTSUPP;
1083 
1084 	mutex_lock(&dev->object_name_lock);
1085 	obj = idr_find(&dev->object_name_idr, (int) args->name);
1086 	if (obj) {
1087 		drm_gem_object_get(obj);
1088 	} else {
1089 		mutex_unlock(&dev->object_name_lock);
1090 		return -ENOENT;
1091 	}
1092 
1093 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1094 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1095 	if (ret)
1096 		goto err;
1097 
1098 	args->handle = handle;
1099 	args->size = obj->size;
1100 
1101 err:
1102 	drm_gem_object_put(obj);
1103 	return ret;
1104 }
1105 
1106 /**
1107  * drm_gem_open - initializes GEM file-private structures at devnode open time
1108  * @dev: drm_device which is being opened by userspace
1109  * @file_private: drm file-private structure to set up
1110  *
1111  * Called at device open time, sets up the structure for handling refcounting
1112  * of mm objects.
1113  */
1114 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)1115 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1116 {
1117 	idr_init_base(&file_private->object_idr, 1);
1118 	mtx_init(&file_private->table_lock, IPL_NONE);
1119 }
1120 
1121 /**
1122  * drm_gem_release - release file-private GEM resources
1123  * @dev: drm_device which is being closed by userspace
1124  * @file_private: drm file-private structure to clean up
1125  *
1126  * Called at close time when the filp is going away.
1127  *
1128  * Releases any remaining references on objects by this filp.
1129  */
1130 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)1131 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1132 {
1133 	idr_for_each(&file_private->object_idr,
1134 		     &drm_gem_object_release_handle, file_private);
1135 	idr_destroy(&file_private->object_idr);
1136 }
1137 
1138 /**
1139  * drm_gem_object_release - release GEM buffer object resources
1140  * @obj: GEM buffer object
1141  *
1142  * This releases any structures and resources used by @obj and is the inverse of
1143  * drm_gem_object_init().
1144  */
1145 void
drm_gem_object_release(struct drm_gem_object * obj)1146 drm_gem_object_release(struct drm_gem_object *obj)
1147 {
1148 #ifdef __linux__
1149 	if (obj->filp)
1150 		fput(obj->filp);
1151 #else
1152 	if (obj->uao)
1153 		uao_detach(obj->uao);
1154 	if (obj->uobj.pgops)
1155 		uvm_obj_destroy(&obj->uobj);
1156 #endif
1157 
1158 	drm_gem_private_object_fini(obj);
1159 
1160 	drm_gem_free_mmap_offset(obj);
1161 	drm_gem_lru_remove(obj);
1162 }
1163 EXPORT_SYMBOL(drm_gem_object_release);
1164 
1165 /**
1166  * drm_gem_object_free - free a GEM object
1167  * @kref: kref of the object to free
1168  *
1169  * Called after the last reference to the object has been lost.
1170  *
1171  * Frees the object
1172  */
1173 void
drm_gem_object_free(struct kref * kref)1174 drm_gem_object_free(struct kref *kref)
1175 {
1176 	struct drm_gem_object *obj =
1177 		container_of(kref, struct drm_gem_object, refcount);
1178 
1179 	if (WARN_ON(!obj->funcs->free))
1180 		return;
1181 
1182 	obj->funcs->free(obj);
1183 }
1184 EXPORT_SYMBOL(drm_gem_object_free);
1185 
1186 #ifdef __linux__
1187 /**
1188  * drm_gem_vm_open - vma->ops->open implementation for GEM
1189  * @vma: VM area structure
1190  *
1191  * This function implements the #vm_operations_struct open() callback for GEM
1192  * drivers. This must be used together with drm_gem_vm_close().
1193  */
drm_gem_vm_open(struct vm_area_struct * vma)1194 void drm_gem_vm_open(struct vm_area_struct *vma)
1195 {
1196 	struct drm_gem_object *obj = vma->vm_private_data;
1197 
1198 	drm_gem_object_get(obj);
1199 }
1200 EXPORT_SYMBOL(drm_gem_vm_open);
1201 
1202 /**
1203  * drm_gem_vm_close - vma->ops->close implementation for GEM
1204  * @vma: VM area structure
1205  *
1206  * This function implements the #vm_operations_struct close() callback for GEM
1207  * drivers. This must be used together with drm_gem_vm_open().
1208  */
drm_gem_vm_close(struct vm_area_struct * vma)1209 void drm_gem_vm_close(struct vm_area_struct *vma)
1210 {
1211 	struct drm_gem_object *obj = vma->vm_private_data;
1212 
1213 	drm_gem_object_put(obj);
1214 }
1215 EXPORT_SYMBOL(drm_gem_vm_close);
1216 
1217 /**
1218  * drm_gem_mmap_obj - memory map a GEM object
1219  * @obj: the GEM object to map
1220  * @obj_size: the object size to be mapped, in bytes
1221  * @vma: VMA for the area to be mapped
1222  *
1223  * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1224  * vm_ops. Depending on their requirements, GEM objects can either
1225  * provide a fault handler in their vm_ops (in which case any accesses to
1226  * the object will be trapped, to perform migration, GTT binding, surface
1227  * register allocation, or performance monitoring), or mmap the buffer memory
1228  * synchronously after calling drm_gem_mmap_obj.
1229  *
1230  * This function is mainly intended to implement the DMABUF mmap operation, when
1231  * the GEM object is not looked up based on its fake offset. To implement the
1232  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1233  *
1234  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1235  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1236  * callers must verify access restrictions before calling this helper.
1237  *
1238  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1239  * size, or if no vm_ops are provided.
1240  */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1241 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1242 		     struct vm_area_struct *vma)
1243 {
1244 	int ret;
1245 
1246 	/* Check for valid size. */
1247 	if (obj_size < vma->vm_end - vma->vm_start)
1248 		return -EINVAL;
1249 
1250 	/* Take a ref for this mapping of the object, so that the fault
1251 	 * handler can dereference the mmap offset's pointer to the object.
1252 	 * This reference is cleaned up by the corresponding vm_close
1253 	 * (which should happen whether the vma was created by this call, or
1254 	 * by a vm_open due to mremap or partial unmap or whatever).
1255 	 */
1256 	drm_gem_object_get(obj);
1257 
1258 	vma->vm_private_data = obj;
1259 	vma->vm_ops = obj->funcs->vm_ops;
1260 
1261 	if (obj->funcs->mmap) {
1262 		ret = obj->funcs->mmap(obj, vma);
1263 		if (ret)
1264 			goto err_drm_gem_object_put;
1265 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1266 	} else {
1267 		if (!vma->vm_ops) {
1268 			ret = -EINVAL;
1269 			goto err_drm_gem_object_put;
1270 		}
1271 
1272 		vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1273 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1274 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1275 	}
1276 
1277 	return 0;
1278 
1279 err_drm_gem_object_put:
1280 	drm_gem_object_put(obj);
1281 	return ret;
1282 }
1283 EXPORT_SYMBOL(drm_gem_mmap_obj);
1284 
1285 /**
1286  * drm_gem_mmap - memory map routine for GEM objects
1287  * @filp: DRM file pointer
1288  * @vma: VMA for the area to be mapped
1289  *
1290  * If a driver supports GEM object mapping, mmap calls on the DRM file
1291  * descriptor will end up here.
1292  *
1293  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1294  * contain the fake offset we created when the GTT map ioctl was called on
1295  * the object) and map it with a call to drm_gem_mmap_obj().
1296  *
1297  * If the caller is not granted access to the buffer object, the mmap will fail
1298  * with EACCES. Please see the vma manager for more information.
1299  */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1300 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1301 {
1302 	struct drm_file *priv = filp->private_data;
1303 	struct drm_device *dev = priv->minor->dev;
1304 	struct drm_gem_object *obj = NULL;
1305 	struct drm_vma_offset_node *node;
1306 	int ret;
1307 
1308 	if (drm_dev_is_unplugged(dev))
1309 		return -ENODEV;
1310 
1311 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1312 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1313 						  vma->vm_pgoff,
1314 						  vma_pages(vma));
1315 	if (likely(node)) {
1316 		obj = container_of(node, struct drm_gem_object, vma_node);
1317 		/*
1318 		 * When the object is being freed, after it hits 0-refcnt it
1319 		 * proceeds to tear down the object. In the process it will
1320 		 * attempt to remove the VMA offset and so acquire this
1321 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1322 		 * that matches our range, we know it is in the process of being
1323 		 * destroyed and will be freed as soon as we release the lock -
1324 		 * so we have to check for the 0-refcnted object and treat it as
1325 		 * invalid.
1326 		 */
1327 		if (!kref_get_unless_zero(&obj->refcount))
1328 			obj = NULL;
1329 	}
1330 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1331 
1332 	if (!obj)
1333 		return -EINVAL;
1334 
1335 	if (!drm_vma_node_is_allowed(node, priv)) {
1336 		drm_gem_object_put(obj);
1337 		return -EACCES;
1338 	}
1339 
1340 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1341 			       vma);
1342 
1343 	drm_gem_object_put(obj);
1344 
1345 	return ret;
1346 }
1347 EXPORT_SYMBOL(drm_gem_mmap);
1348 #else /* ! __linux__ */
1349 
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,vm_prot_t accessprot,voff_t off,vsize_t size)1350 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1351 		     vm_prot_t accessprot, voff_t off, vsize_t size)
1352 {
1353 	int ret;
1354 
1355 	/* Check for valid size. */
1356 	if (obj_size < size)
1357 		return -EINVAL;
1358 
1359 	/* Take a ref for this mapping of the object, so that the fault
1360 	 * handler can dereference the mmap offset's pointer to the object.
1361 	 * This reference is cleaned up by the corresponding vm_close
1362 	 * (which should happen whether the vma was created by this call, or
1363 	 * by a vm_open due to mremap or partial unmap or whatever).
1364 	 */
1365 	drm_gem_object_get(obj);
1366 
1367 #ifdef __linux__
1368 	vma->vm_private_data = obj;
1369 	vma->vm_ops = obj->funcs->vm_ops;
1370 #else
1371 	if (obj->uobj.pgops == NULL)
1372 		uvm_obj_init(&obj->uobj, obj->funcs->vm_ops, 1);
1373 #endif
1374 
1375 	if (obj->funcs->mmap) {
1376 		ret = obj->funcs->mmap(obj, accessprot, off, size);
1377 		if (ret)
1378 			goto err_drm_gem_object_put;
1379 #ifdef notyet
1380 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1381 #endif
1382 	} else {
1383 #ifdef notyet
1384 		if (!vma->vm_ops) {
1385 			ret = -EINVAL;
1386 			goto err_drm_gem_object_put;
1387 		}
1388 
1389 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1390 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1391 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1392 #else
1393 		ret = -EINVAL;
1394 		goto err_drm_gem_object_put;
1395 #endif
1396 	}
1397 
1398 	return 0;
1399 
1400 err_drm_gem_object_put:
1401 	drm_gem_object_put(obj);
1402 	return ret;
1403 }
1404 
1405 struct uvm_object *
drm_gem_mmap(struct file * filp,vm_prot_t accessprot,voff_t off,vsize_t size)1406 drm_gem_mmap(struct file *filp, vm_prot_t accessprot, voff_t off,
1407     vsize_t size)
1408 {
1409 	struct drm_file *priv = (void *)filp;
1410 	struct drm_device *dev = priv->minor->dev;
1411 	struct drm_gem_object *obj = NULL;
1412 	struct drm_vma_offset_node *node;
1413 	int ret;
1414 
1415 	if (drm_dev_is_unplugged(dev))
1416 		return NULL;
1417 
1418 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1419 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1420 						  off >> PAGE_SHIFT,
1421 						  atop(round_page(size)));
1422 	if (likely(node)) {
1423 		obj = container_of(node, struct drm_gem_object, vma_node);
1424 		/*
1425 		 * When the object is being freed, after it hits 0-refcnt it
1426 		 * proceeds to tear down the object. In the process it will
1427 		 * attempt to remove the VMA offset and so acquire this
1428 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1429 		 * that matches our range, we know it is in the process of being
1430 		 * destroyed and will be freed as soon as we release the lock -
1431 		 * so we have to check for the 0-refcnted object and treat it as
1432 		 * invalid.
1433 		 */
1434 		if (!kref_get_unless_zero(&obj->refcount))
1435 			obj = NULL;
1436 	}
1437 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1438 
1439 	if (!obj)
1440 		return NULL;
1441 
1442 	if (!drm_vma_node_is_allowed(node, priv)) {
1443 		drm_gem_object_put(obj);
1444 		return NULL;
1445 	}
1446 
1447 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1448 			       accessprot, off, size);
1449 
1450 	drm_gem_object_put(obj);
1451 
1452 	return &obj->uobj;
1453 }
1454 
1455 #endif /* __linux__ */
1456 
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1457 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1458 			const struct drm_gem_object *obj)
1459 {
1460 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1461 	drm_printf_indent(p, indent, "refcount=%u\n",
1462 			  kref_read(&obj->refcount));
1463 	drm_printf_indent(p, indent, "start=%08lx\n",
1464 			  drm_vma_node_start(&obj->vma_node));
1465 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1466 	drm_printf_indent(p, indent, "imported=%s\n",
1467 			  str_yes_no(obj->import_attach));
1468 
1469 	if (obj->funcs->print_info)
1470 		obj->funcs->print_info(p, indent, obj);
1471 }
1472 
drm_gem_pin(struct drm_gem_object * obj)1473 int drm_gem_pin(struct drm_gem_object *obj)
1474 {
1475 	if (obj->funcs->pin)
1476 		return obj->funcs->pin(obj);
1477 
1478 	return 0;
1479 }
1480 
drm_gem_unpin(struct drm_gem_object * obj)1481 void drm_gem_unpin(struct drm_gem_object *obj)
1482 {
1483 	if (obj->funcs->unpin)
1484 		obj->funcs->unpin(obj);
1485 }
1486 
drm_gem_vmap(struct drm_gem_object * obj,struct iosys_map * map)1487 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1488 {
1489 	int ret;
1490 
1491 	dma_resv_assert_held(obj->resv);
1492 
1493 	if (!obj->funcs->vmap)
1494 		return -EOPNOTSUPP;
1495 
1496 	ret = obj->funcs->vmap(obj, map);
1497 	if (ret)
1498 		return ret;
1499 	else if (iosys_map_is_null(map))
1500 		return -ENOMEM;
1501 
1502 	return 0;
1503 }
1504 EXPORT_SYMBOL(drm_gem_vmap);
1505 
drm_gem_vunmap(struct drm_gem_object * obj,struct iosys_map * map)1506 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1507 {
1508 	dma_resv_assert_held(obj->resv);
1509 
1510 	if (iosys_map_is_null(map))
1511 		return;
1512 
1513 	if (obj->funcs->vunmap)
1514 		obj->funcs->vunmap(obj, map);
1515 
1516 	/* Always set the mapping to NULL. Callers may rely on this. */
1517 	iosys_map_clear(map);
1518 }
1519 EXPORT_SYMBOL(drm_gem_vunmap);
1520 
drm_gem_vmap_unlocked(struct drm_gem_object * obj,struct iosys_map * map)1521 int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1522 {
1523 	int ret;
1524 
1525 	dma_resv_lock(obj->resv, NULL);
1526 	ret = drm_gem_vmap(obj, map);
1527 	dma_resv_unlock(obj->resv);
1528 
1529 	return ret;
1530 }
1531 EXPORT_SYMBOL(drm_gem_vmap_unlocked);
1532 
drm_gem_vunmap_unlocked(struct drm_gem_object * obj,struct iosys_map * map)1533 void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1534 {
1535 	dma_resv_lock(obj->resv, NULL);
1536 	drm_gem_vunmap(obj, map);
1537 	dma_resv_unlock(obj->resv);
1538 }
1539 EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
1540 
1541 /**
1542  * drm_gem_lock_reservations - Sets up the ww context and acquires
1543  * the lock on an array of GEM objects.
1544  *
1545  * Once you've locked your reservations, you'll want to set up space
1546  * for your shared fences (if applicable), submit your job, then
1547  * drm_gem_unlock_reservations().
1548  *
1549  * @objs: drm_gem_objects to lock
1550  * @count: Number of objects in @objs
1551  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1552  * part of tracking this set of locked reservations.
1553  */
1554 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1555 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1556 			  struct ww_acquire_ctx *acquire_ctx)
1557 {
1558 	int contended = -1;
1559 	int i, ret;
1560 
1561 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1562 
1563 retry:
1564 	if (contended != -1) {
1565 		struct drm_gem_object *obj = objs[contended];
1566 
1567 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1568 								 acquire_ctx);
1569 		if (ret) {
1570 			ww_acquire_fini(acquire_ctx);
1571 			return ret;
1572 		}
1573 	}
1574 
1575 	for (i = 0; i < count; i++) {
1576 		if (i == contended)
1577 			continue;
1578 
1579 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1580 							    acquire_ctx);
1581 		if (ret) {
1582 			int j;
1583 
1584 			for (j = 0; j < i; j++)
1585 				dma_resv_unlock(objs[j]->resv);
1586 
1587 			if (contended != -1 && contended >= i)
1588 				dma_resv_unlock(objs[contended]->resv);
1589 
1590 			if (ret == -EDEADLK) {
1591 				contended = i;
1592 				goto retry;
1593 			}
1594 
1595 			ww_acquire_fini(acquire_ctx);
1596 			return ret;
1597 		}
1598 	}
1599 
1600 	ww_acquire_done(acquire_ctx);
1601 
1602 	return 0;
1603 }
1604 EXPORT_SYMBOL(drm_gem_lock_reservations);
1605 
1606 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1607 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1608 			    struct ww_acquire_ctx *acquire_ctx)
1609 {
1610 	int i;
1611 
1612 	for (i = 0; i < count; i++)
1613 		dma_resv_unlock(objs[i]->resv);
1614 
1615 	ww_acquire_fini(acquire_ctx);
1616 }
1617 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1618 
1619 /**
1620  * drm_gem_lru_init - initialize a LRU
1621  *
1622  * @lru: The LRU to initialize
1623  * @lock: The lock protecting the LRU
1624  */
1625 void
drm_gem_lru_init(struct drm_gem_lru * lru,struct rwlock * lock)1626 drm_gem_lru_init(struct drm_gem_lru *lru, struct rwlock *lock)
1627 {
1628 	lru->lock = lock;
1629 	lru->count = 0;
1630 	INIT_LIST_HEAD(&lru->list);
1631 }
1632 EXPORT_SYMBOL(drm_gem_lru_init);
1633 
1634 static void
drm_gem_lru_remove_locked(struct drm_gem_object * obj)1635 drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1636 {
1637 	obj->lru->count -= obj->size >> PAGE_SHIFT;
1638 	WARN_ON(obj->lru->count < 0);
1639 	list_del(&obj->lru_node);
1640 	obj->lru = NULL;
1641 }
1642 
1643 /**
1644  * drm_gem_lru_remove - remove object from whatever LRU it is in
1645  *
1646  * If the object is currently in any LRU, remove it.
1647  *
1648  * @obj: The GEM object to remove from current LRU
1649  */
1650 void
drm_gem_lru_remove(struct drm_gem_object * obj)1651 drm_gem_lru_remove(struct drm_gem_object *obj)
1652 {
1653 	struct drm_gem_lru *lru = obj->lru;
1654 
1655 	if (!lru)
1656 		return;
1657 
1658 	mutex_lock(lru->lock);
1659 	drm_gem_lru_remove_locked(obj);
1660 	mutex_unlock(lru->lock);
1661 }
1662 EXPORT_SYMBOL(drm_gem_lru_remove);
1663 
1664 /**
1665  * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU
1666  *
1667  * Like &drm_gem_lru_move_tail but lru lock must be held
1668  *
1669  * @lru: The LRU to move the object into.
1670  * @obj: The GEM object to move into this LRU
1671  */
1672 void
drm_gem_lru_move_tail_locked(struct drm_gem_lru * lru,struct drm_gem_object * obj)1673 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1674 {
1675 	lockdep_assert_held_once(lru->lock);
1676 
1677 	if (obj->lru)
1678 		drm_gem_lru_remove_locked(obj);
1679 
1680 	lru->count += obj->size >> PAGE_SHIFT;
1681 	list_add_tail(&obj->lru_node, &lru->list);
1682 	obj->lru = lru;
1683 }
1684 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked);
1685 
1686 /**
1687  * drm_gem_lru_move_tail - move the object to the tail of the LRU
1688  *
1689  * If the object is already in this LRU it will be moved to the
1690  * tail.  Otherwise it will be removed from whichever other LRU
1691  * it is in (if any) and moved into this LRU.
1692  *
1693  * @lru: The LRU to move the object into.
1694  * @obj: The GEM object to move into this LRU
1695  */
1696 void
drm_gem_lru_move_tail(struct drm_gem_lru * lru,struct drm_gem_object * obj)1697 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1698 {
1699 	mutex_lock(lru->lock);
1700 	drm_gem_lru_move_tail_locked(lru, obj);
1701 	mutex_unlock(lru->lock);
1702 }
1703 EXPORT_SYMBOL(drm_gem_lru_move_tail);
1704 
1705 /**
1706  * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1707  *
1708  * If the shrink callback succeeds, it is expected that the driver
1709  * move the object out of this LRU.
1710  *
1711  * If the LRU possibly contain active buffers, it is the responsibility
1712  * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1713  * or if necessary block until the buffer becomes idle.
1714  *
1715  * @lru: The LRU to scan
1716  * @nr_to_scan: The number of pages to try to reclaim
1717  * @remaining: The number of pages left to reclaim, should be initialized by caller
1718  * @shrink: Callback to try to shrink/reclaim the object.
1719  */
1720 unsigned long
drm_gem_lru_scan(struct drm_gem_lru * lru,unsigned int nr_to_scan,unsigned long * remaining,bool (* shrink)(struct drm_gem_object * obj))1721 drm_gem_lru_scan(struct drm_gem_lru *lru,
1722 		 unsigned int nr_to_scan,
1723 		 unsigned long *remaining,
1724 		 bool (*shrink)(struct drm_gem_object *obj))
1725 {
1726 	struct drm_gem_lru still_in_lru;
1727 	struct drm_gem_object *obj;
1728 	unsigned freed = 0;
1729 
1730 	drm_gem_lru_init(&still_in_lru, lru->lock);
1731 
1732 	mutex_lock(lru->lock);
1733 
1734 	while (freed < nr_to_scan) {
1735 		obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1736 
1737 		if (!obj)
1738 			break;
1739 
1740 		drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1741 
1742 		/*
1743 		 * If it's in the process of being freed, gem_object->free()
1744 		 * may be blocked on lock waiting to remove it.  So just
1745 		 * skip it.
1746 		 */
1747 		if (!kref_get_unless_zero(&obj->refcount))
1748 			continue;
1749 
1750 		/*
1751 		 * Now that we own a reference, we can drop the lock for the
1752 		 * rest of the loop body, to reduce contention with other
1753 		 * code paths that need the LRU lock
1754 		 */
1755 		mutex_unlock(lru->lock);
1756 
1757 		/*
1758 		 * Note that this still needs to be trylock, since we can
1759 		 * hit shrinker in response to trying to get backing pages
1760 		 * for this obj (ie. while it's lock is already held)
1761 		 */
1762 		if (!dma_resv_trylock(obj->resv)) {
1763 			*remaining += obj->size >> PAGE_SHIFT;
1764 			goto tail;
1765 		}
1766 
1767 		if (shrink(obj)) {
1768 			freed += obj->size >> PAGE_SHIFT;
1769 
1770 			/*
1771 			 * If we succeeded in releasing the object's backing
1772 			 * pages, we expect the driver to have moved the object
1773 			 * out of this LRU
1774 			 */
1775 			WARN_ON(obj->lru == &still_in_lru);
1776 			WARN_ON(obj->lru == lru);
1777 		}
1778 
1779 		dma_resv_unlock(obj->resv);
1780 
1781 tail:
1782 		drm_gem_object_put(obj);
1783 		mutex_lock(lru->lock);
1784 	}
1785 
1786 	/*
1787 	 * Move objects we've skipped over out of the temporary still_in_lru
1788 	 * back into this LRU
1789 	 */
1790 	list_for_each_entry (obj, &still_in_lru.list, lru_node)
1791 		obj->lru = lru;
1792 	list_splice_tail(&still_in_lru.list, &lru->list);
1793 	lru->count += still_in_lru.count;
1794 
1795 	mutex_unlock(lru->lock);
1796 
1797 	return freed;
1798 }
1799 EXPORT_SYMBOL(drm_gem_lru_scan);
1800 
1801 /**
1802  * drm_gem_evict - helper to evict backing pages for a GEM object
1803  * @obj: obj in question
1804  */
drm_gem_evict(struct drm_gem_object * obj)1805 int drm_gem_evict(struct drm_gem_object *obj)
1806 {
1807 	dma_resv_assert_held(obj->resv);
1808 
1809 	if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
1810 		return -EBUSY;
1811 
1812 	if (obj->funcs->evict)
1813 		return obj->funcs->evict(obj);
1814 
1815 	return 0;
1816 }
1817 EXPORT_SYMBOL(drm_gem_evict);
1818