xref: /openbsd/sys/dev/pci/drm/drm_gem.c (revision f46a341e)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/dma-buf.h>
29 #include <linux/file.h>
30 #include <linux/fs.h>
31 #include <linux/iosys-map.h>
32 #include <linux/mem_encrypt.h>
33 #include <linux/mm.h>
34 #include <linux/mman.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h>
37 #include <linux/pagevec.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/string_helpers.h>
41 #include <linux/types.h>
42 #include <linux/uaccess.h>
43 
44 #include <drm/drm.h>
45 #include <drm/drm_device.h>
46 #include <drm/drm_drv.h>
47 #include <drm/drm_file.h>
48 #include <drm/drm_gem.h>
49 #include <drm/drm_managed.h>
50 #include <drm/drm_print.h>
51 #include <drm/drm_vma_manager.h>
52 
53 #include "drm_internal.h"
54 
55 #include <sys/conf.h>
56 #include <uvm/uvm.h>
57 
58 void drm_unref(struct uvm_object *);
59 void drm_ref(struct uvm_object *);
60 boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
61 int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
62     vm_fault_t, vm_prot_t, int);
63 
64 const struct uvm_pagerops drm_pgops = {
65 	.pgo_reference = drm_ref,
66 	.pgo_detach = drm_unref,
67 	.pgo_fault = drm_fault,
68 	.pgo_flush = drm_flush,
69 };
70 
71 void
drm_ref(struct uvm_object * uobj)72 drm_ref(struct uvm_object *uobj)
73 {
74 	struct drm_gem_object *obj =
75 	    container_of(uobj, struct drm_gem_object, uobj);
76 
77 	drm_gem_object_get(obj);
78 }
79 
80 void
drm_unref(struct uvm_object * uobj)81 drm_unref(struct uvm_object *uobj)
82 {
83 	struct drm_gem_object *obj =
84 	    container_of(uobj, struct drm_gem_object, uobj);
85 
86 	drm_gem_object_put(obj);
87 }
88 
89 int
drm_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_fault_t fault_type,vm_prot_t access_type,int flags)90 drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
91     int npages, int centeridx, vm_fault_t fault_type,
92     vm_prot_t access_type, int flags)
93 {
94 	struct vm_map_entry *entry = ufi->entry;
95 	struct uvm_object *uobj = entry->object.uvm_obj;
96 	struct drm_gem_object *obj =
97 	    container_of(uobj, struct drm_gem_object, uobj);
98 	struct drm_device *dev = obj->dev;
99 	int ret;
100 
101 	/*
102 	 * we do not allow device mappings to be mapped copy-on-write
103 	 * so we kill any attempt to do so here.
104 	 */
105 	if (UVM_ET_ISCOPYONWRITE(entry)) {
106 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
107 		return EACCES;
108 	}
109 
110 	/*
111 	 * We could end up here as the result of a copyin(9) or
112 	 * copyout(9) while handling an ioctl.  So we must be careful
113 	 * not to deadlock.  Therefore we only block if the quiesce
114 	 * count is zero, which guarantees we didn't enter from within
115 	 * an ioctl code path.
116 	 */
117 	mtx_enter(&dev->quiesce_mtx);
118 	if (dev->quiesce && dev->quiesce_count == 0) {
119 		mtx_leave(&dev->quiesce_mtx);
120 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
121 		mtx_enter(&dev->quiesce_mtx);
122 		while (dev->quiesce) {
123 			msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
124 			    PZERO, "drmflt", INFSLP);
125 		}
126 		mtx_leave(&dev->quiesce_mtx);
127 		return ERESTART;
128 	}
129 	dev->quiesce_count++;
130 	mtx_leave(&dev->quiesce_mtx);
131 
132 	/* Call down into driver to do the magic */
133 	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
134 	    entry->start), vaddr, pps, npages, centeridx,
135 	    access_type, flags);
136 
137 	mtx_enter(&dev->quiesce_mtx);
138 	dev->quiesce_count--;
139 	if (dev->quiesce)
140 		wakeup(&dev->quiesce_count);
141 	mtx_leave(&dev->quiesce_mtx);
142 
143 	return ret;
144 }
145 
146 boolean_t
drm_flush(struct uvm_object * uobj,voff_t start,voff_t stop,int flags)147 drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
148 {
149 	return (TRUE);
150 }
151 
152 struct uvm_object *
udv_attach_drm(dev_t device,vm_prot_t accessprot,voff_t off,vsize_t size)153 udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
154 {
155 	struct drm_device *dev = drm_get_device_from_kdev(device);
156 	struct drm_gem_object *obj = NULL;
157 	struct drm_vma_offset_node *node;
158 	struct drm_file *priv;
159 	struct file *filp;
160 
161 	if (cdevsw[major(device)].d_mmap != drmmmap)
162 		return NULL;
163 
164 	if (dev == NULL)
165 		return NULL;
166 
167 	mutex_lock(&dev->filelist_mutex);
168 	priv = drm_find_file_by_minor(dev, minor(device));
169 	if (priv == NULL) {
170 		mutex_unlock(&dev->filelist_mutex);
171 		return NULL;
172 	}
173 	filp = priv->filp;
174 	mutex_unlock(&dev->filelist_mutex);
175 
176 	if (dev->driver->mmap)
177 		return dev->driver->mmap(filp, accessprot, off, size);
178 
179 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
180 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
181 						  off >> PAGE_SHIFT,
182 						  atop(round_page(size)));
183 	if (likely(node)) {
184 		obj = container_of(node, struct drm_gem_object, vma_node);
185 		/*
186 		 * When the object is being freed, after it hits 0-refcnt it
187 		 * proceeds to tear down the object. In the process it will
188 		 * attempt to remove the VMA offset and so acquire this
189 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
190 		 * that matches our range, we know it is in the process of being
191 		 * destroyed and will be freed as soon as we release the lock -
192 		 * so we have to check for the 0-refcnted object and treat it as
193 		 * invalid.
194 		 */
195 		if (!kref_get_unless_zero(&obj->refcount))
196 			obj = NULL;
197 	}
198 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
199 
200 	if (!obj)
201 		return NULL;
202 
203 	if (!drm_vma_node_is_allowed(node, priv)) {
204 		drm_gem_object_put(obj);
205 		return NULL;
206 	}
207 
208 	return &obj->uobj;
209 }
210 
211 /** @file drm_gem.c
212  *
213  * This file provides some of the base ioctls and library routines for
214  * the graphics memory manager implemented by each device driver.
215  *
216  * Because various devices have different requirements in terms of
217  * synchronization and migration strategies, implementing that is left up to
218  * the driver, and all that the general API provides should be generic --
219  * allocating objects, reading/writing data with the cpu, freeing objects.
220  * Even there, platform-dependent optimizations for reading/writing data with
221  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
222  * the DRI2 implementation wants to have at least allocate/mmap be generic.
223  *
224  * The goal was to have swap-backed object allocation managed through
225  * struct file.  However, file descriptors as handles to a struct file have
226  * two major failings:
227  * - Process limits prevent more than 1024 or so being used at a time by
228  *   default.
229  * - Inability to allocate high fds will aggravate the X Server's select()
230  *   handling, and likely that of many GL client applications as well.
231  *
232  * This led to a plan of using our own integer IDs (called handles, following
233  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
234  * ioctls.  The objects themselves will still include the struct file so
235  * that we can transition to fds if the required kernel infrastructure shows
236  * up at a later date, and as our interface with shmfs for memory allocation.
237  */
238 
239 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)240 drm_gem_init_release(struct drm_device *dev, void *ptr)
241 {
242 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
243 }
244 
245 /**
246  * drm_gem_init - Initialize the GEM device fields
247  * @dev: drm_devic structure to initialize
248  */
249 int
drm_gem_init(struct drm_device * dev)250 drm_gem_init(struct drm_device *dev)
251 {
252 	struct drm_vma_offset_manager *vma_offset_manager;
253 
254 	rw_init(&dev->object_name_lock, "drmonl");
255 	idr_init_base(&dev->object_name_idr, 1);
256 
257 	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
258 					  GFP_KERNEL);
259 	if (!vma_offset_manager) {
260 		DRM_ERROR("out of memory\n");
261 		return -ENOMEM;
262 	}
263 
264 	dev->vma_offset_manager = vma_offset_manager;
265 	drm_vma_offset_manager_init(vma_offset_manager,
266 				    DRM_FILE_PAGE_OFFSET_START,
267 				    DRM_FILE_PAGE_OFFSET_SIZE);
268 
269 	return drmm_add_action(dev, drm_gem_init_release, NULL);
270 }
271 
272 #ifdef __linux__
273 
274 /**
275  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
276  * @dev: drm_device the object should be initialized for
277  * @obj: drm_gem_object to initialize
278  * @size: object size
279  *
280  * Initialize an already allocated GEM object of the specified size with
281  * shmfs backing store.
282  */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)283 int drm_gem_object_init(struct drm_device *dev,
284 			struct drm_gem_object *obj, size_t size)
285 {
286 	struct file *filp;
287 
288 	drm_gem_private_object_init(dev, obj, size);
289 
290 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
291 	if (IS_ERR(filp))
292 		return PTR_ERR(filp);
293 
294 	obj->filp = filp;
295 
296 	return 0;
297 }
298 EXPORT_SYMBOL(drm_gem_object_init);
299 
300 #else
301 
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)302 int drm_gem_object_init(struct drm_device *dev,
303 			struct drm_gem_object *obj, size_t size)
304 {
305 	drm_gem_private_object_init(dev, obj, size);
306 
307 	if (size > (512 * 1024 * 1024)) {
308 		printf("%s size too big %lu\n", __func__, size);
309 		return -ENOMEM;
310 	}
311 
312 	obj->uao = uao_create(size, 0);
313 	uvm_obj_init(&obj->uobj, &drm_pgops, 1);
314 
315 	return 0;
316 }
317 
318 #endif
319 
320 /**
321  * drm_gem_private_object_init - initialize an allocated private GEM object
322  * @dev: drm_device the object should be initialized for
323  * @obj: drm_gem_object to initialize
324  * @size: object size
325  *
326  * Initialize an already allocated GEM object of the specified size with
327  * no GEM provided backing store. Instead the caller is responsible for
328  * backing the object and handling it.
329  */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)330 void drm_gem_private_object_init(struct drm_device *dev,
331 				 struct drm_gem_object *obj, size_t size)
332 {
333 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
334 
335 	obj->dev = dev;
336 #ifdef __linux__
337 	obj->filp = NULL;
338 #else
339 	obj->uao = NULL;
340 	obj->uobj.pgops = NULL;
341 #endif
342 
343 	kref_init(&obj->refcount);
344 	obj->handle_count = 0;
345 	obj->size = size;
346 	dma_resv_init(&obj->_resv);
347 	if (!obj->resv)
348 		obj->resv = &obj->_resv;
349 
350 	if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
351 		drm_gem_gpuva_init(obj);
352 
353 	drm_vma_node_reset(&obj->vma_node);
354 	INIT_LIST_HEAD(&obj->lru_node);
355 }
356 EXPORT_SYMBOL(drm_gem_private_object_init);
357 
358 /**
359  * drm_gem_private_object_fini - Finalize a failed drm_gem_object
360  * @obj: drm_gem_object
361  *
362  * Uninitialize an already allocated GEM object when it initialized failed
363  */
drm_gem_private_object_fini(struct drm_gem_object * obj)364 void drm_gem_private_object_fini(struct drm_gem_object *obj)
365 {
366 	WARN_ON(obj->dma_buf);
367 
368 	dma_resv_fini(&obj->_resv);
369 }
370 EXPORT_SYMBOL(drm_gem_private_object_fini);
371 
372 /**
373  * drm_gem_object_handle_free - release resources bound to userspace handles
374  * @obj: GEM object to clean up.
375  *
376  * Called after the last handle to the object has been closed
377  *
378  * Removes any name for the object. Note that this must be
379  * called before drm_gem_object_free or we'll be touching
380  * freed memory
381  */
drm_gem_object_handle_free(struct drm_gem_object * obj)382 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
383 {
384 	struct drm_device *dev = obj->dev;
385 
386 	/* Remove any name for this object */
387 	if (obj->name) {
388 		idr_remove(&dev->object_name_idr, obj->name);
389 		obj->name = 0;
390 	}
391 }
392 
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)393 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
394 {
395 	/* Unbreak the reference cycle if we have an exported dma_buf. */
396 	if (obj->dma_buf) {
397 		dma_buf_put(obj->dma_buf);
398 		obj->dma_buf = NULL;
399 	}
400 }
401 
402 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)403 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
404 {
405 	struct drm_device *dev = obj->dev;
406 	bool final = false;
407 
408 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
409 		return;
410 
411 	/*
412 	* Must bump handle count first as this may be the last
413 	* ref, in which case the object would disappear before we
414 	* checked for a name
415 	*/
416 
417 	mutex_lock(&dev->object_name_lock);
418 	if (--obj->handle_count == 0) {
419 		drm_gem_object_handle_free(obj);
420 		drm_gem_object_exported_dma_buf_free(obj);
421 		final = true;
422 	}
423 	mutex_unlock(&dev->object_name_lock);
424 
425 	if (final)
426 		drm_gem_object_put(obj);
427 }
428 
429 /*
430  * Called at device or object close to release the file's
431  * handle references on objects.
432  */
433 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)434 drm_gem_object_release_handle(int id, void *ptr, void *data)
435 {
436 	struct drm_file *file_priv = data;
437 	struct drm_gem_object *obj = ptr;
438 
439 	if (obj->funcs->close)
440 		obj->funcs->close(obj, file_priv);
441 
442 	drm_prime_remove_buf_handle(&file_priv->prime, id);
443 	drm_vma_node_revoke(&obj->vma_node, file_priv);
444 
445 	drm_gem_object_handle_put_unlocked(obj);
446 
447 	return 0;
448 }
449 
450 /**
451  * drm_gem_handle_delete - deletes the given file-private handle
452  * @filp: drm file-private structure to use for the handle look up
453  * @handle: userspace handle to delete
454  *
455  * Removes the GEM handle from the @filp lookup table which has been added with
456  * drm_gem_handle_create(). If this is the last handle also cleans up linked
457  * resources like GEM names.
458  */
459 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)460 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
461 {
462 	struct drm_gem_object *obj;
463 
464 	spin_lock(&filp->table_lock);
465 
466 	/* Check if we currently have a reference on the object */
467 	obj = idr_replace(&filp->object_idr, NULL, handle);
468 	spin_unlock(&filp->table_lock);
469 	if (IS_ERR_OR_NULL(obj))
470 		return -EINVAL;
471 
472 	/* Release driver's reference and decrement refcount. */
473 	drm_gem_object_release_handle(handle, obj, filp);
474 
475 	/* And finally make the handle available for future allocations. */
476 	spin_lock(&filp->table_lock);
477 	idr_remove(&filp->object_idr, handle);
478 	spin_unlock(&filp->table_lock);
479 
480 	return 0;
481 }
482 EXPORT_SYMBOL(drm_gem_handle_delete);
483 
484 /**
485  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
486  * @file: drm file-private structure containing the gem object
487  * @dev: corresponding drm_device
488  * @handle: gem object handle
489  * @offset: return location for the fake mmap offset
490  *
491  * This implements the &drm_driver.dumb_map_offset kms driver callback for
492  * drivers which use gem to manage their backing storage.
493  *
494  * Returns:
495  * 0 on success or a negative error code on failure.
496  */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)497 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
498 			    u32 handle, u64 *offset)
499 {
500 	struct drm_gem_object *obj;
501 	int ret;
502 
503 	obj = drm_gem_object_lookup(file, handle);
504 	if (!obj)
505 		return -ENOENT;
506 
507 	/* Don't allow imported objects to be mapped */
508 	if (obj->import_attach) {
509 		ret = -EINVAL;
510 		goto out;
511 	}
512 
513 	ret = drm_gem_create_mmap_offset(obj);
514 	if (ret)
515 		goto out;
516 
517 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
518 out:
519 	drm_gem_object_put(obj);
520 
521 	return ret;
522 }
523 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
524 
525 /**
526  * drm_gem_handle_create_tail - internal functions to create a handle
527  * @file_priv: drm file-private structure to register the handle for
528  * @obj: object to register
529  * @handlep: pointer to return the created handle to the caller
530  *
531  * This expects the &drm_device.object_name_lock to be held already and will
532  * drop it before returning. Used to avoid races in establishing new handles
533  * when importing an object from either an flink name or a dma-buf.
534  *
535  * Handles must be release again through drm_gem_handle_delete(). This is done
536  * when userspace closes @file_priv for all attached handles, or through the
537  * GEM_CLOSE ioctl for individual handles.
538  */
539 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)540 drm_gem_handle_create_tail(struct drm_file *file_priv,
541 			   struct drm_gem_object *obj,
542 			   u32 *handlep)
543 {
544 	struct drm_device *dev = obj->dev;
545 	u32 handle;
546 	int ret;
547 
548 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
549 	if (obj->handle_count++ == 0)
550 		drm_gem_object_get(obj);
551 
552 	/*
553 	 * Get the user-visible handle using idr.  Preload and perform
554 	 * allocation under our spinlock.
555 	 */
556 	idr_preload(GFP_KERNEL);
557 	spin_lock(&file_priv->table_lock);
558 
559 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
560 
561 	spin_unlock(&file_priv->table_lock);
562 	idr_preload_end();
563 
564 	mutex_unlock(&dev->object_name_lock);
565 	if (ret < 0)
566 		goto err_unref;
567 
568 	handle = ret;
569 
570 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
571 	if (ret)
572 		goto err_remove;
573 
574 	if (obj->funcs->open) {
575 		ret = obj->funcs->open(obj, file_priv);
576 		if (ret)
577 			goto err_revoke;
578 	}
579 
580 	*handlep = handle;
581 	return 0;
582 
583 err_revoke:
584 	drm_vma_node_revoke(&obj->vma_node, file_priv);
585 err_remove:
586 	spin_lock(&file_priv->table_lock);
587 	idr_remove(&file_priv->object_idr, handle);
588 	spin_unlock(&file_priv->table_lock);
589 err_unref:
590 	drm_gem_object_handle_put_unlocked(obj);
591 	return ret;
592 }
593 
594 /**
595  * drm_gem_handle_create - create a gem handle for an object
596  * @file_priv: drm file-private structure to register the handle for
597  * @obj: object to register
598  * @handlep: pointer to return the created handle to the caller
599  *
600  * Create a handle for this object. This adds a handle reference to the object,
601  * which includes a regular reference count. Callers will likely want to
602  * dereference the object afterwards.
603  *
604  * Since this publishes @obj to userspace it must be fully set up by this point,
605  * drivers must call this last in their buffer object creation callbacks.
606  */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)607 int drm_gem_handle_create(struct drm_file *file_priv,
608 			  struct drm_gem_object *obj,
609 			  u32 *handlep)
610 {
611 	mutex_lock(&obj->dev->object_name_lock);
612 
613 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
614 }
615 EXPORT_SYMBOL(drm_gem_handle_create);
616 
617 
618 /**
619  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
620  * @obj: obj in question
621  *
622  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
623  *
624  * Note that drm_gem_object_release() already calls this function, so drivers
625  * don't have to take care of releasing the mmap offset themselves when freeing
626  * the GEM object.
627  */
628 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)629 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
630 {
631 	struct drm_device *dev = obj->dev;
632 
633 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
634 }
635 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
636 
637 /**
638  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
639  * @obj: obj in question
640  * @size: the virtual size
641  *
642  * GEM memory mapping works by handing back to userspace a fake mmap offset
643  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
644  * up the object based on the offset and sets up the various memory mapping
645  * structures.
646  *
647  * This routine allocates and attaches a fake offset for @obj, in cases where
648  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
649  * Otherwise just use drm_gem_create_mmap_offset().
650  *
651  * This function is idempotent and handles an already allocated mmap offset
652  * transparently. Drivers do not need to check for this case.
653  */
654 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)655 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
656 {
657 	struct drm_device *dev = obj->dev;
658 
659 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
660 				  size / PAGE_SIZE);
661 }
662 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
663 
664 /**
665  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
666  * @obj: obj in question
667  *
668  * GEM memory mapping works by handing back to userspace a fake mmap offset
669  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
670  * up the object based on the offset and sets up the various memory mapping
671  * structures.
672  *
673  * This routine allocates and attaches a fake offset for @obj.
674  *
675  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
676  * the fake offset again.
677  */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)678 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
679 {
680 	return drm_gem_create_mmap_offset_size(obj, obj->size);
681 }
682 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
683 
684 #ifdef notyet
685 /*
686  * Move folios to appropriate lru and release the folios, decrementing the
687  * ref count of those folios.
688  */
drm_gem_check_release_batch(struct folio_batch * fbatch)689 static void drm_gem_check_release_batch(struct folio_batch *fbatch)
690 {
691 	check_move_unevictable_folios(fbatch);
692 	__folio_batch_release(fbatch);
693 	cond_resched();
694 }
695 #endif
696 
697 /**
698  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
699  * from shmem
700  * @obj: obj in question
701  *
702  * This reads the page-array of the shmem-backing storage of the given gem
703  * object. An array of pages is returned. If a page is not allocated or
704  * swapped-out, this will allocate/swap-in the required pages. Note that the
705  * whole object is covered by the page-array and pinned in memory.
706  *
707  * Use drm_gem_put_pages() to release the array and unpin all pages.
708  *
709  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
710  * If you require other GFP-masks, you have to do those allocations yourself.
711  *
712  * Note that you are not allowed to change gfp-zones during runtime. That is,
713  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
714  * set during initialization. If you have special zone constraints, set them
715  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
716  * to keep pages in the required zone during swap-in.
717  *
718  * This function is only valid on objects initialized with
719  * drm_gem_object_init(), but not for those initialized with
720  * drm_gem_private_object_init() only.
721  */
drm_gem_get_pages(struct drm_gem_object * obj)722 struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
723 {
724 	STUB();
725 	return ERR_PTR(-ENOSYS);
726 #ifdef notyet
727 	struct address_space *mapping;
728 	struct vm_page **pages;
729 	struct folio *folio;
730 	struct folio_batch fbatch;
731 	long i, j, npages;
732 
733 	if (WARN_ON(!obj->filp))
734 		return ERR_PTR(-EINVAL);
735 
736 	/* This is the shared memory object that backs the GEM resource */
737 	mapping = obj->filp->f_mapping;
738 
739 	/* We already BUG_ON() for non-page-aligned sizes in
740 	 * drm_gem_object_init(), so we should never hit this unless
741 	 * driver author is doing something really wrong:
742 	 */
743 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
744 
745 	npages = obj->size >> PAGE_SHIFT;
746 
747 	pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);
748 	if (pages == NULL)
749 		return ERR_PTR(-ENOMEM);
750 
751 	mapping_set_unevictable(mapping);
752 
753 	i = 0;
754 	while (i < npages) {
755 		long nr;
756 		folio = shmem_read_folio_gfp(mapping, i,
757 				mapping_gfp_mask(mapping));
758 		if (IS_ERR(folio))
759 			goto fail;
760 		nr = min(npages - i, folio_nr_pages(folio));
761 		for (j = 0; j < nr; j++, i++)
762 			pages[i] = folio_file_page(folio, i);
763 
764 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
765 		 * correct region during swapin. Note that this requires
766 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
767 		 * so shmem can relocate pages during swapin if required.
768 		 */
769 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
770 				(folio_pfn(folio) >= 0x00100000UL));
771 	}
772 
773 	return pages;
774 
775 fail:
776 	mapping_clear_unevictable(mapping);
777 	folio_batch_init(&fbatch);
778 	j = 0;
779 	while (j < i) {
780 		struct folio *f = page_folio(pages[j]);
781 		if (!folio_batch_add(&fbatch, f))
782 			drm_gem_check_release_batch(&fbatch);
783 		j += folio_nr_pages(f);
784 	}
785 	if (fbatch.nr)
786 		drm_gem_check_release_batch(&fbatch);
787 
788 	kvfree(pages);
789 	return ERR_CAST(folio);
790 #endif
791 }
792 EXPORT_SYMBOL(drm_gem_get_pages);
793 
794 /**
795  * drm_gem_put_pages - helper to free backing pages for a GEM object
796  * @obj: obj in question
797  * @pages: pages to free
798  * @dirty: if true, pages will be marked as dirty
799  * @accessed: if true, the pages will be marked as accessed
800  */
drm_gem_put_pages(struct drm_gem_object * obj,struct vm_page ** pages,bool dirty,bool accessed)801 void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
802 		bool dirty, bool accessed)
803 {
804 	STUB();
805 #ifdef notyet
806 	int i, npages;
807 	struct address_space *mapping;
808 	struct folio_batch fbatch;
809 
810 	mapping = file_inode(obj->filp)->i_mapping;
811 	mapping_clear_unevictable(mapping);
812 
813 	/* We already BUG_ON() for non-page-aligned sizes in
814 	 * drm_gem_object_init(), so we should never hit this unless
815 	 * driver author is doing something really wrong:
816 	 */
817 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
818 
819 	npages = obj->size >> PAGE_SHIFT;
820 
821 	folio_batch_init(&fbatch);
822 	for (i = 0; i < npages; i++) {
823 		struct folio *folio;
824 
825 		if (!pages[i])
826 			continue;
827 		folio = page_folio(pages[i]);
828 
829 		if (dirty)
830 			folio_mark_dirty(folio);
831 
832 		if (accessed)
833 			folio_mark_accessed(folio);
834 
835 		/* Undo the reference we took when populating the table */
836 		if (!folio_batch_add(&fbatch, folio))
837 			drm_gem_check_release_batch(&fbatch);
838 		i += folio_nr_pages(folio) - 1;
839 	}
840 	if (folio_batch_count(&fbatch))
841 		drm_gem_check_release_batch(&fbatch);
842 
843 	kvfree(pages);
844 #endif
845 }
846 EXPORT_SYMBOL(drm_gem_put_pages);
847 
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)848 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
849 			  struct drm_gem_object **objs)
850 {
851 	int i, ret = 0;
852 	struct drm_gem_object *obj;
853 
854 	spin_lock(&filp->table_lock);
855 
856 	for (i = 0; i < count; i++) {
857 		/* Check if we currently have a reference on the object */
858 		obj = idr_find(&filp->object_idr, handle[i]);
859 		if (!obj) {
860 			ret = -ENOENT;
861 			break;
862 		}
863 		drm_gem_object_get(obj);
864 		objs[i] = obj;
865 	}
866 	spin_unlock(&filp->table_lock);
867 
868 	return ret;
869 }
870 
871 /**
872  * drm_gem_objects_lookup - look up GEM objects from an array of handles
873  * @filp: DRM file private date
874  * @bo_handles: user pointer to array of userspace handle
875  * @count: size of handle array
876  * @objs_out: returned pointer to array of drm_gem_object pointers
877  *
878  * Takes an array of userspace handles and returns a newly allocated array of
879  * GEM objects.
880  *
881  * For a single handle lookup, use drm_gem_object_lookup().
882  *
883  * Returns:
884  *
885  * @objs filled in with GEM object pointers. Returned GEM objects need to be
886  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
887  * failure. 0 is returned on success.
888  *
889  */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)890 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
891 			   int count, struct drm_gem_object ***objs_out)
892 {
893 	int ret;
894 	u32 *handles;
895 	struct drm_gem_object **objs;
896 
897 	if (!count)
898 		return 0;
899 
900 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
901 			     GFP_KERNEL | __GFP_ZERO);
902 	if (!objs)
903 		return -ENOMEM;
904 
905 	*objs_out = objs;
906 
907 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
908 	if (!handles) {
909 		ret = -ENOMEM;
910 		goto out;
911 	}
912 
913 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
914 		ret = -EFAULT;
915 		DRM_DEBUG("Failed to copy in GEM handles\n");
916 		goto out;
917 	}
918 
919 	ret = objects_lookup(filp, handles, count, objs);
920 out:
921 	kvfree(handles);
922 	return ret;
923 
924 }
925 EXPORT_SYMBOL(drm_gem_objects_lookup);
926 
927 /**
928  * drm_gem_object_lookup - look up a GEM object from its handle
929  * @filp: DRM file private date
930  * @handle: userspace handle
931  *
932  * Returns:
933  *
934  * A reference to the object named by the handle if such exists on @filp, NULL
935  * otherwise.
936  *
937  * If looking up an array of handles, use drm_gem_objects_lookup().
938  */
939 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)940 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
941 {
942 	struct drm_gem_object *obj = NULL;
943 
944 	objects_lookup(filp, &handle, 1, &obj);
945 	return obj;
946 }
947 EXPORT_SYMBOL(drm_gem_object_lookup);
948 
949 /**
950  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
951  * shared and/or exclusive fences.
952  * @filep: DRM file private date
953  * @handle: userspace handle
954  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
955  * @timeout: timeout value in jiffies or zero to return immediately
956  *
957  * Returns:
958  *
959  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
960  * greater than 0 on success.
961  */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)962 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
963 				    bool wait_all, unsigned long timeout)
964 {
965 	long ret;
966 	struct drm_gem_object *obj;
967 
968 	obj = drm_gem_object_lookup(filep, handle);
969 	if (!obj) {
970 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
971 		return -EINVAL;
972 	}
973 
974 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
975 				    true, timeout);
976 	if (ret == 0)
977 		ret = -ETIME;
978 	else if (ret > 0)
979 		ret = 0;
980 
981 	drm_gem_object_put(obj);
982 
983 	return ret;
984 }
985 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
986 
987 /**
988  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
989  * @dev: drm_device
990  * @data: ioctl data
991  * @file_priv: drm file-private structure
992  *
993  * Releases the handle to an mm object.
994  */
995 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)996 drm_gem_close_ioctl(struct drm_device *dev, void *data,
997 		    struct drm_file *file_priv)
998 {
999 	struct drm_gem_close *args = data;
1000 	int ret;
1001 
1002 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1003 		return -EOPNOTSUPP;
1004 
1005 	ret = drm_gem_handle_delete(file_priv, args->handle);
1006 
1007 	return ret;
1008 }
1009 
1010 /**
1011  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
1012  * @dev: drm_device
1013  * @data: ioctl data
1014  * @file_priv: drm file-private structure
1015  *
1016  * Create a global name for an object, returning the name.
1017  *
1018  * Note that the name does not hold a reference; when the object
1019  * is freed, the name goes away.
1020  */
1021 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1022 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1023 		    struct drm_file *file_priv)
1024 {
1025 	struct drm_gem_flink *args = data;
1026 	struct drm_gem_object *obj;
1027 	int ret;
1028 
1029 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1030 		return -EOPNOTSUPP;
1031 
1032 	obj = drm_gem_object_lookup(file_priv, args->handle);
1033 	if (obj == NULL)
1034 		return -ENOENT;
1035 
1036 	mutex_lock(&dev->object_name_lock);
1037 	/* prevent races with concurrent gem_close. */
1038 	if (obj->handle_count == 0) {
1039 		ret = -ENOENT;
1040 		goto err;
1041 	}
1042 
1043 	if (!obj->name) {
1044 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
1045 		if (ret < 0)
1046 			goto err;
1047 
1048 		obj->name = ret;
1049 	}
1050 
1051 	args->name = (uint64_t) obj->name;
1052 	ret = 0;
1053 
1054 err:
1055 	mutex_unlock(&dev->object_name_lock);
1056 	drm_gem_object_put(obj);
1057 	return ret;
1058 }
1059 
1060 /**
1061  * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
1062  * @dev: drm_device
1063  * @data: ioctl data
1064  * @file_priv: drm file-private structure
1065  *
1066  * Open an object using the global name, returning a handle and the size.
1067  *
1068  * This handle (of course) holds a reference to the object, so the object
1069  * will not go away until the handle is deleted.
1070  */
1071 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1072 drm_gem_open_ioctl(struct drm_device *dev, void *data,
1073 		   struct drm_file *file_priv)
1074 {
1075 	struct drm_gem_open *args = data;
1076 	struct drm_gem_object *obj;
1077 	int ret;
1078 	u32 handle;
1079 
1080 	if (!drm_core_check_feature(dev, DRIVER_GEM))
1081 		return -EOPNOTSUPP;
1082 
1083 	mutex_lock(&dev->object_name_lock);
1084 	obj = idr_find(&dev->object_name_idr, (int) args->name);
1085 	if (obj) {
1086 		drm_gem_object_get(obj);
1087 	} else {
1088 		mutex_unlock(&dev->object_name_lock);
1089 		return -ENOENT;
1090 	}
1091 
1092 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1093 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1094 	if (ret)
1095 		goto err;
1096 
1097 	args->handle = handle;
1098 	args->size = obj->size;
1099 
1100 err:
1101 	drm_gem_object_put(obj);
1102 	return ret;
1103 }
1104 
1105 /**
1106  * drm_gem_open - initializes GEM file-private structures at devnode open time
1107  * @dev: drm_device which is being opened by userspace
1108  * @file_private: drm file-private structure to set up
1109  *
1110  * Called at device open time, sets up the structure for handling refcounting
1111  * of mm objects.
1112  */
1113 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)1114 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1115 {
1116 	idr_init_base(&file_private->object_idr, 1);
1117 	mtx_init(&file_private->table_lock, IPL_NONE);
1118 }
1119 
1120 /**
1121  * drm_gem_release - release file-private GEM resources
1122  * @dev: drm_device which is being closed by userspace
1123  * @file_private: drm file-private structure to clean up
1124  *
1125  * Called at close time when the filp is going away.
1126  *
1127  * Releases any remaining references on objects by this filp.
1128  */
1129 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)1130 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1131 {
1132 	idr_for_each(&file_private->object_idr,
1133 		     &drm_gem_object_release_handle, file_private);
1134 	idr_destroy(&file_private->object_idr);
1135 }
1136 
1137 /**
1138  * drm_gem_object_release - release GEM buffer object resources
1139  * @obj: GEM buffer object
1140  *
1141  * This releases any structures and resources used by @obj and is the inverse of
1142  * drm_gem_object_init().
1143  */
1144 void
drm_gem_object_release(struct drm_gem_object * obj)1145 drm_gem_object_release(struct drm_gem_object *obj)
1146 {
1147 #ifdef __linux__
1148 	if (obj->filp)
1149 		fput(obj->filp);
1150 #else
1151 	if (obj->uao)
1152 		uao_detach(obj->uao);
1153 	if (obj->uobj.pgops)
1154 		uvm_obj_destroy(&obj->uobj);
1155 #endif
1156 
1157 	drm_gem_private_object_fini(obj);
1158 
1159 	drm_gem_free_mmap_offset(obj);
1160 	drm_gem_lru_remove(obj);
1161 }
1162 EXPORT_SYMBOL(drm_gem_object_release);
1163 
1164 /**
1165  * drm_gem_object_free - free a GEM object
1166  * @kref: kref of the object to free
1167  *
1168  * Called after the last reference to the object has been lost.
1169  *
1170  * Frees the object
1171  */
1172 void
drm_gem_object_free(struct kref * kref)1173 drm_gem_object_free(struct kref *kref)
1174 {
1175 	struct drm_gem_object *obj =
1176 		container_of(kref, struct drm_gem_object, refcount);
1177 
1178 	if (WARN_ON(!obj->funcs->free))
1179 		return;
1180 
1181 	obj->funcs->free(obj);
1182 }
1183 EXPORT_SYMBOL(drm_gem_object_free);
1184 
1185 #ifdef __linux__
1186 /**
1187  * drm_gem_vm_open - vma->ops->open implementation for GEM
1188  * @vma: VM area structure
1189  *
1190  * This function implements the #vm_operations_struct open() callback for GEM
1191  * drivers. This must be used together with drm_gem_vm_close().
1192  */
drm_gem_vm_open(struct vm_area_struct * vma)1193 void drm_gem_vm_open(struct vm_area_struct *vma)
1194 {
1195 	struct drm_gem_object *obj = vma->vm_private_data;
1196 
1197 	drm_gem_object_get(obj);
1198 }
1199 EXPORT_SYMBOL(drm_gem_vm_open);
1200 
1201 /**
1202  * drm_gem_vm_close - vma->ops->close implementation for GEM
1203  * @vma: VM area structure
1204  *
1205  * This function implements the #vm_operations_struct close() callback for GEM
1206  * drivers. This must be used together with drm_gem_vm_open().
1207  */
drm_gem_vm_close(struct vm_area_struct * vma)1208 void drm_gem_vm_close(struct vm_area_struct *vma)
1209 {
1210 	struct drm_gem_object *obj = vma->vm_private_data;
1211 
1212 	drm_gem_object_put(obj);
1213 }
1214 EXPORT_SYMBOL(drm_gem_vm_close);
1215 
1216 /**
1217  * drm_gem_mmap_obj - memory map a GEM object
1218  * @obj: the GEM object to map
1219  * @obj_size: the object size to be mapped, in bytes
1220  * @vma: VMA for the area to be mapped
1221  *
1222  * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1223  * vm_ops. Depending on their requirements, GEM objects can either
1224  * provide a fault handler in their vm_ops (in which case any accesses to
1225  * the object will be trapped, to perform migration, GTT binding, surface
1226  * register allocation, or performance monitoring), or mmap the buffer memory
1227  * synchronously after calling drm_gem_mmap_obj.
1228  *
1229  * This function is mainly intended to implement the DMABUF mmap operation, when
1230  * the GEM object is not looked up based on its fake offset. To implement the
1231  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1232  *
1233  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1234  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1235  * callers must verify access restrictions before calling this helper.
1236  *
1237  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1238  * size, or if no vm_ops are provided.
1239  */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1240 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1241 		     struct vm_area_struct *vma)
1242 {
1243 	int ret;
1244 
1245 	/* Check for valid size. */
1246 	if (obj_size < vma->vm_end - vma->vm_start)
1247 		return -EINVAL;
1248 
1249 	/* Take a ref for this mapping of the object, so that the fault
1250 	 * handler can dereference the mmap offset's pointer to the object.
1251 	 * This reference is cleaned up by the corresponding vm_close
1252 	 * (which should happen whether the vma was created by this call, or
1253 	 * by a vm_open due to mremap or partial unmap or whatever).
1254 	 */
1255 	drm_gem_object_get(obj);
1256 
1257 	vma->vm_private_data = obj;
1258 	vma->vm_ops = obj->funcs->vm_ops;
1259 
1260 	if (obj->funcs->mmap) {
1261 		ret = obj->funcs->mmap(obj, vma);
1262 		if (ret)
1263 			goto err_drm_gem_object_put;
1264 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1265 	} else {
1266 		if (!vma->vm_ops) {
1267 			ret = -EINVAL;
1268 			goto err_drm_gem_object_put;
1269 		}
1270 
1271 		vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1272 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1273 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1274 	}
1275 
1276 	return 0;
1277 
1278 err_drm_gem_object_put:
1279 	drm_gem_object_put(obj);
1280 	return ret;
1281 }
1282 EXPORT_SYMBOL(drm_gem_mmap_obj);
1283 
1284 /**
1285  * drm_gem_mmap - memory map routine for GEM objects
1286  * @filp: DRM file pointer
1287  * @vma: VMA for the area to be mapped
1288  *
1289  * If a driver supports GEM object mapping, mmap calls on the DRM file
1290  * descriptor will end up here.
1291  *
1292  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1293  * contain the fake offset we created when the GTT map ioctl was called on
1294  * the object) and map it with a call to drm_gem_mmap_obj().
1295  *
1296  * If the caller is not granted access to the buffer object, the mmap will fail
1297  * with EACCES. Please see the vma manager for more information.
1298  */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1299 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1300 {
1301 	struct drm_file *priv = filp->private_data;
1302 	struct drm_device *dev = priv->minor->dev;
1303 	struct drm_gem_object *obj = NULL;
1304 	struct drm_vma_offset_node *node;
1305 	int ret;
1306 
1307 	if (drm_dev_is_unplugged(dev))
1308 		return -ENODEV;
1309 
1310 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1311 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1312 						  vma->vm_pgoff,
1313 						  vma_pages(vma));
1314 	if (likely(node)) {
1315 		obj = container_of(node, struct drm_gem_object, vma_node);
1316 		/*
1317 		 * When the object is being freed, after it hits 0-refcnt it
1318 		 * proceeds to tear down the object. In the process it will
1319 		 * attempt to remove the VMA offset and so acquire this
1320 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1321 		 * that matches our range, we know it is in the process of being
1322 		 * destroyed and will be freed as soon as we release the lock -
1323 		 * so we have to check for the 0-refcnted object and treat it as
1324 		 * invalid.
1325 		 */
1326 		if (!kref_get_unless_zero(&obj->refcount))
1327 			obj = NULL;
1328 	}
1329 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1330 
1331 	if (!obj)
1332 		return -EINVAL;
1333 
1334 	if (!drm_vma_node_is_allowed(node, priv)) {
1335 		drm_gem_object_put(obj);
1336 		return -EACCES;
1337 	}
1338 
1339 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1340 			       vma);
1341 
1342 	drm_gem_object_put(obj);
1343 
1344 	return ret;
1345 }
1346 EXPORT_SYMBOL(drm_gem_mmap);
1347 #else /* ! __linux__ */
1348 
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,vm_prot_t accessprot,voff_t off,vsize_t size)1349 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1350 		     vm_prot_t accessprot, voff_t off, vsize_t size)
1351 {
1352 	int ret;
1353 
1354 	/* Check for valid size. */
1355 	if (obj_size < size)
1356 		return -EINVAL;
1357 
1358 	/* Take a ref for this mapping of the object, so that the fault
1359 	 * handler can dereference the mmap offset's pointer to the object.
1360 	 * This reference is cleaned up by the corresponding vm_close
1361 	 * (which should happen whether the vma was created by this call, or
1362 	 * by a vm_open due to mremap or partial unmap or whatever).
1363 	 */
1364 	drm_gem_object_get(obj);
1365 
1366 #ifdef __linux__
1367 	vma->vm_private_data = obj;
1368 	vma->vm_ops = obj->funcs->vm_ops;
1369 #else
1370 	if (obj->uobj.pgops == NULL)
1371 		uvm_obj_init(&obj->uobj, obj->funcs->vm_ops, 1);
1372 #endif
1373 
1374 	if (obj->funcs->mmap) {
1375 		ret = obj->funcs->mmap(obj, accessprot, off, size);
1376 		if (ret)
1377 			goto err_drm_gem_object_put;
1378 #ifdef notyet
1379 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1380 #endif
1381 	} else {
1382 #ifdef notyet
1383 		if (!vma->vm_ops) {
1384 			ret = -EINVAL;
1385 			goto err_drm_gem_object_put;
1386 		}
1387 
1388 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1389 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1390 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1391 #else
1392 		ret = -EINVAL;
1393 		goto err_drm_gem_object_put;
1394 #endif
1395 	}
1396 
1397 	return 0;
1398 
1399 err_drm_gem_object_put:
1400 	drm_gem_object_put(obj);
1401 	return ret;
1402 }
1403 
1404 struct uvm_object *
drm_gem_mmap(struct file * filp,vm_prot_t accessprot,voff_t off,vsize_t size)1405 drm_gem_mmap(struct file *filp, vm_prot_t accessprot, voff_t off,
1406     vsize_t size)
1407 {
1408 	struct drm_file *priv = (void *)filp;
1409 	struct drm_device *dev = priv->minor->dev;
1410 	struct drm_gem_object *obj = NULL;
1411 	struct drm_vma_offset_node *node;
1412 	int ret;
1413 
1414 	if (drm_dev_is_unplugged(dev))
1415 		return NULL;
1416 
1417 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1418 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1419 						  off >> PAGE_SHIFT,
1420 						  atop(round_page(size)));
1421 	if (likely(node)) {
1422 		obj = container_of(node, struct drm_gem_object, vma_node);
1423 		/*
1424 		 * When the object is being freed, after it hits 0-refcnt it
1425 		 * proceeds to tear down the object. In the process it will
1426 		 * attempt to remove the VMA offset and so acquire this
1427 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1428 		 * that matches our range, we know it is in the process of being
1429 		 * destroyed and will be freed as soon as we release the lock -
1430 		 * so we have to check for the 0-refcnted object and treat it as
1431 		 * invalid.
1432 		 */
1433 		if (!kref_get_unless_zero(&obj->refcount))
1434 			obj = NULL;
1435 	}
1436 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1437 
1438 	if (!obj)
1439 		return NULL;
1440 
1441 	if (!drm_vma_node_is_allowed(node, priv)) {
1442 		drm_gem_object_put(obj);
1443 		return NULL;
1444 	}
1445 
1446 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1447 			       accessprot, off, size);
1448 
1449 	drm_gem_object_put(obj);
1450 
1451 	return &obj->uobj;
1452 }
1453 
1454 #endif /* __linux__ */
1455 
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1456 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1457 			const struct drm_gem_object *obj)
1458 {
1459 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1460 	drm_printf_indent(p, indent, "refcount=%u\n",
1461 			  kref_read(&obj->refcount));
1462 	drm_printf_indent(p, indent, "start=%08lx\n",
1463 			  drm_vma_node_start(&obj->vma_node));
1464 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1465 	drm_printf_indent(p, indent, "imported=%s\n",
1466 			  str_yes_no(obj->import_attach));
1467 
1468 	if (obj->funcs->print_info)
1469 		obj->funcs->print_info(p, indent, obj);
1470 }
1471 
drm_gem_pin(struct drm_gem_object * obj)1472 int drm_gem_pin(struct drm_gem_object *obj)
1473 {
1474 	if (obj->funcs->pin)
1475 		return obj->funcs->pin(obj);
1476 
1477 	return 0;
1478 }
1479 
drm_gem_unpin(struct drm_gem_object * obj)1480 void drm_gem_unpin(struct drm_gem_object *obj)
1481 {
1482 	if (obj->funcs->unpin)
1483 		obj->funcs->unpin(obj);
1484 }
1485 
drm_gem_vmap(struct drm_gem_object * obj,struct iosys_map * map)1486 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1487 {
1488 	int ret;
1489 
1490 	dma_resv_assert_held(obj->resv);
1491 
1492 	if (!obj->funcs->vmap)
1493 		return -EOPNOTSUPP;
1494 
1495 	ret = obj->funcs->vmap(obj, map);
1496 	if (ret)
1497 		return ret;
1498 	else if (iosys_map_is_null(map))
1499 		return -ENOMEM;
1500 
1501 	return 0;
1502 }
1503 EXPORT_SYMBOL(drm_gem_vmap);
1504 
drm_gem_vunmap(struct drm_gem_object * obj,struct iosys_map * map)1505 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1506 {
1507 	dma_resv_assert_held(obj->resv);
1508 
1509 	if (iosys_map_is_null(map))
1510 		return;
1511 
1512 	if (obj->funcs->vunmap)
1513 		obj->funcs->vunmap(obj, map);
1514 
1515 	/* Always set the mapping to NULL. Callers may rely on this. */
1516 	iosys_map_clear(map);
1517 }
1518 EXPORT_SYMBOL(drm_gem_vunmap);
1519 
drm_gem_vmap_unlocked(struct drm_gem_object * obj,struct iosys_map * map)1520 int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1521 {
1522 	int ret;
1523 
1524 	dma_resv_lock(obj->resv, NULL);
1525 	ret = drm_gem_vmap(obj, map);
1526 	dma_resv_unlock(obj->resv);
1527 
1528 	return ret;
1529 }
1530 EXPORT_SYMBOL(drm_gem_vmap_unlocked);
1531 
drm_gem_vunmap_unlocked(struct drm_gem_object * obj,struct iosys_map * map)1532 void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1533 {
1534 	dma_resv_lock(obj->resv, NULL);
1535 	drm_gem_vunmap(obj, map);
1536 	dma_resv_unlock(obj->resv);
1537 }
1538 EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
1539 
1540 /**
1541  * drm_gem_lock_reservations - Sets up the ww context and acquires
1542  * the lock on an array of GEM objects.
1543  *
1544  * Once you've locked your reservations, you'll want to set up space
1545  * for your shared fences (if applicable), submit your job, then
1546  * drm_gem_unlock_reservations().
1547  *
1548  * @objs: drm_gem_objects to lock
1549  * @count: Number of objects in @objs
1550  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1551  * part of tracking this set of locked reservations.
1552  */
1553 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1554 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1555 			  struct ww_acquire_ctx *acquire_ctx)
1556 {
1557 	int contended = -1;
1558 	int i, ret;
1559 
1560 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1561 
1562 retry:
1563 	if (contended != -1) {
1564 		struct drm_gem_object *obj = objs[contended];
1565 
1566 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1567 								 acquire_ctx);
1568 		if (ret) {
1569 			ww_acquire_fini(acquire_ctx);
1570 			return ret;
1571 		}
1572 	}
1573 
1574 	for (i = 0; i < count; i++) {
1575 		if (i == contended)
1576 			continue;
1577 
1578 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1579 							    acquire_ctx);
1580 		if (ret) {
1581 			int j;
1582 
1583 			for (j = 0; j < i; j++)
1584 				dma_resv_unlock(objs[j]->resv);
1585 
1586 			if (contended != -1 && contended >= i)
1587 				dma_resv_unlock(objs[contended]->resv);
1588 
1589 			if (ret == -EDEADLK) {
1590 				contended = i;
1591 				goto retry;
1592 			}
1593 
1594 			ww_acquire_fini(acquire_ctx);
1595 			return ret;
1596 		}
1597 	}
1598 
1599 	ww_acquire_done(acquire_ctx);
1600 
1601 	return 0;
1602 }
1603 EXPORT_SYMBOL(drm_gem_lock_reservations);
1604 
1605 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1606 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1607 			    struct ww_acquire_ctx *acquire_ctx)
1608 {
1609 	int i;
1610 
1611 	for (i = 0; i < count; i++)
1612 		dma_resv_unlock(objs[i]->resv);
1613 
1614 	ww_acquire_fini(acquire_ctx);
1615 }
1616 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1617 
1618 /**
1619  * drm_gem_lru_init - initialize a LRU
1620  *
1621  * @lru: The LRU to initialize
1622  * @lock: The lock protecting the LRU
1623  */
1624 void
drm_gem_lru_init(struct drm_gem_lru * lru,struct rwlock * lock)1625 drm_gem_lru_init(struct drm_gem_lru *lru, struct rwlock *lock)
1626 {
1627 	lru->lock = lock;
1628 	lru->count = 0;
1629 	INIT_LIST_HEAD(&lru->list);
1630 }
1631 EXPORT_SYMBOL(drm_gem_lru_init);
1632 
1633 static void
drm_gem_lru_remove_locked(struct drm_gem_object * obj)1634 drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1635 {
1636 	obj->lru->count -= obj->size >> PAGE_SHIFT;
1637 	WARN_ON(obj->lru->count < 0);
1638 	list_del(&obj->lru_node);
1639 	obj->lru = NULL;
1640 }
1641 
1642 /**
1643  * drm_gem_lru_remove - remove object from whatever LRU it is in
1644  *
1645  * If the object is currently in any LRU, remove it.
1646  *
1647  * @obj: The GEM object to remove from current LRU
1648  */
1649 void
drm_gem_lru_remove(struct drm_gem_object * obj)1650 drm_gem_lru_remove(struct drm_gem_object *obj)
1651 {
1652 	struct drm_gem_lru *lru = obj->lru;
1653 
1654 	if (!lru)
1655 		return;
1656 
1657 	mutex_lock(lru->lock);
1658 	drm_gem_lru_remove_locked(obj);
1659 	mutex_unlock(lru->lock);
1660 }
1661 EXPORT_SYMBOL(drm_gem_lru_remove);
1662 
1663 /**
1664  * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU
1665  *
1666  * Like &drm_gem_lru_move_tail but lru lock must be held
1667  *
1668  * @lru: The LRU to move the object into.
1669  * @obj: The GEM object to move into this LRU
1670  */
1671 void
drm_gem_lru_move_tail_locked(struct drm_gem_lru * lru,struct drm_gem_object * obj)1672 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1673 {
1674 	lockdep_assert_held_once(lru->lock);
1675 
1676 	if (obj->lru)
1677 		drm_gem_lru_remove_locked(obj);
1678 
1679 	lru->count += obj->size >> PAGE_SHIFT;
1680 	list_add_tail(&obj->lru_node, &lru->list);
1681 	obj->lru = lru;
1682 }
1683 EXPORT_SYMBOL(drm_gem_lru_move_tail_locked);
1684 
1685 /**
1686  * drm_gem_lru_move_tail - move the object to the tail of the LRU
1687  *
1688  * If the object is already in this LRU it will be moved to the
1689  * tail.  Otherwise it will be removed from whichever other LRU
1690  * it is in (if any) and moved into this LRU.
1691  *
1692  * @lru: The LRU to move the object into.
1693  * @obj: The GEM object to move into this LRU
1694  */
1695 void
drm_gem_lru_move_tail(struct drm_gem_lru * lru,struct drm_gem_object * obj)1696 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1697 {
1698 	mutex_lock(lru->lock);
1699 	drm_gem_lru_move_tail_locked(lru, obj);
1700 	mutex_unlock(lru->lock);
1701 }
1702 EXPORT_SYMBOL(drm_gem_lru_move_tail);
1703 
1704 /**
1705  * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1706  *
1707  * If the shrink callback succeeds, it is expected that the driver
1708  * move the object out of this LRU.
1709  *
1710  * If the LRU possibly contain active buffers, it is the responsibility
1711  * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1712  * or if necessary block until the buffer becomes idle.
1713  *
1714  * @lru: The LRU to scan
1715  * @nr_to_scan: The number of pages to try to reclaim
1716  * @remaining: The number of pages left to reclaim, should be initialized by caller
1717  * @shrink: Callback to try to shrink/reclaim the object.
1718  */
1719 unsigned long
drm_gem_lru_scan(struct drm_gem_lru * lru,unsigned int nr_to_scan,unsigned long * remaining,bool (* shrink)(struct drm_gem_object * obj))1720 drm_gem_lru_scan(struct drm_gem_lru *lru,
1721 		 unsigned int nr_to_scan,
1722 		 unsigned long *remaining,
1723 		 bool (*shrink)(struct drm_gem_object *obj))
1724 {
1725 	struct drm_gem_lru still_in_lru;
1726 	struct drm_gem_object *obj;
1727 	unsigned freed = 0;
1728 
1729 	drm_gem_lru_init(&still_in_lru, lru->lock);
1730 
1731 	mutex_lock(lru->lock);
1732 
1733 	while (freed < nr_to_scan) {
1734 		obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1735 
1736 		if (!obj)
1737 			break;
1738 
1739 		drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1740 
1741 		/*
1742 		 * If it's in the process of being freed, gem_object->free()
1743 		 * may be blocked on lock waiting to remove it.  So just
1744 		 * skip it.
1745 		 */
1746 		if (!kref_get_unless_zero(&obj->refcount))
1747 			continue;
1748 
1749 		/*
1750 		 * Now that we own a reference, we can drop the lock for the
1751 		 * rest of the loop body, to reduce contention with other
1752 		 * code paths that need the LRU lock
1753 		 */
1754 		mutex_unlock(lru->lock);
1755 
1756 		/*
1757 		 * Note that this still needs to be trylock, since we can
1758 		 * hit shrinker in response to trying to get backing pages
1759 		 * for this obj (ie. while it's lock is already held)
1760 		 */
1761 		if (!dma_resv_trylock(obj->resv)) {
1762 			*remaining += obj->size >> PAGE_SHIFT;
1763 			goto tail;
1764 		}
1765 
1766 		if (shrink(obj)) {
1767 			freed += obj->size >> PAGE_SHIFT;
1768 
1769 			/*
1770 			 * If we succeeded in releasing the object's backing
1771 			 * pages, we expect the driver to have moved the object
1772 			 * out of this LRU
1773 			 */
1774 			WARN_ON(obj->lru == &still_in_lru);
1775 			WARN_ON(obj->lru == lru);
1776 		}
1777 
1778 		dma_resv_unlock(obj->resv);
1779 
1780 tail:
1781 		drm_gem_object_put(obj);
1782 		mutex_lock(lru->lock);
1783 	}
1784 
1785 	/*
1786 	 * Move objects we've skipped over out of the temporary still_in_lru
1787 	 * back into this LRU
1788 	 */
1789 	list_for_each_entry (obj, &still_in_lru.list, lru_node)
1790 		obj->lru = lru;
1791 	list_splice_tail(&still_in_lru.list, &lru->list);
1792 	lru->count += still_in_lru.count;
1793 
1794 	mutex_unlock(lru->lock);
1795 
1796 	return freed;
1797 }
1798 EXPORT_SYMBOL(drm_gem_lru_scan);
1799 
1800 /**
1801  * drm_gem_evict - helper to evict backing pages for a GEM object
1802  * @obj: obj in question
1803  */
drm_gem_evict(struct drm_gem_object * obj)1804 int drm_gem_evict(struct drm_gem_object *obj)
1805 {
1806 	dma_resv_assert_held(obj->resv);
1807 
1808 	if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
1809 		return -EBUSY;
1810 
1811 	if (obj->funcs->evict)
1812 		return obj->funcs->evict(obj);
1813 
1814 	return 0;
1815 }
1816 EXPORT_SYMBOL(drm_gem_evict);
1817