xref: /dragonfly/sys/dev/drm/drm_file.c (revision dc6f5bdf)
1 /*
2  * \author Rickard E. (Rik) Faith <faith@valinux.com>
3  * \author Daryll Strauss <daryll@valinux.com>
4  * \author Gareth Hughes <gareth@valinux.com>
5  */
6 
7 /*
8  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
9  *
10  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12  * All Rights Reserved.
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a
15  * copy of this software and associated documentation files (the "Software"),
16  * to deal in the Software without restriction, including without limitation
17  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18  * and/or sell copies of the Software, and to permit persons to whom the
19  * Software is furnished to do so, subject to the following conditions:
20  *
21  * The above copyright notice and this permission notice (including the next
22  * paragraph) shall be included in all copies or substantial portions of the
23  * Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
28  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31  * OTHER DEALINGS IN THE SOFTWARE.
32  */
33 
34 #include <sys/types.h>
35 #include <sys/uio.h>	/* must come first to avoid kfree() macros issues */
36 
37 #include <linux/poll.h>
38 #include <linux/slab.h>
39 #include <linux/module.h>
40 
41 #include <drm/drm_file.h>
42 #include <drm/drmP.h>
43 
44 #include "drm_legacy.h"
45 #include "drm_internal.h"
46 #include "drm_crtc_internal.h"
47 
48 #include <sys/devfs.h>
49 
50 /* from BKL pushdown */
51 DEFINE_MUTEX(drm_global_mutex);
52 
53 /**
54  * DOC: file operations
55  *
56  * Drivers must define the file operations structure that forms the DRM
57  * userspace API entry point, even though most of those operations are
58  * implemented in the DRM core. The mandatory functions are drm_open(),
59  * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
60  * (note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n). Drivers which
61  * implement private ioctls that require 32/64 bit compatibility support must
62  * provide their own .compat_ioctl() handler that processes private ioctls and
63  * calls drm_compat_ioctl() for core ioctls.
64  *
65  * In addition drm_read() and drm_poll() provide support for DRM events. DRM
66  * events are a generic and extensible means to send asynchronous events to
67  * userspace through the file descriptor. They are used to send vblank event and
68  * page flip completions by the KMS API. But drivers can also use it for their
69  * own needs, e.g. to signal completion of rendering.
70  *
71  * The memory mapping implementation will vary depending on how the driver
72  * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
73  * function, modern drivers should use one of the provided memory-manager
74  * specific implementations. For GEM-based drivers this is drm_gem_mmap().
75  *
76  * No other file operations are supported by the DRM userspace API. Overall the
77  * following is an example #file_operations structure::
78  *
79  *     static const example_drm_fops = {
80  *             .owner = THIS_MODULE,
81  *             .open = drm_open,
82  *             .release = drm_release,
83  *             .unlocked_ioctl = drm_ioctl,
84  *             .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
85  *             .poll = drm_poll,
86  *             .read = drm_read,
87  *             .llseek = no_llseek,
88  *             .mmap = drm_gem_mmap,
89  *     };
90  */
91 
92 extern devclass_t drm_devclass;
93 
94 static int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
95 		    struct drm_device *dev, struct file *filp, struct drm_minor *minor);
96 
97 static int drm_setup(struct drm_device * dev)
98 {
99 	int ret;
100 
101 	if (dev->driver->firstopen &&
102 	    drm_core_check_feature(dev, DRIVER_LEGACY)) {
103 		ret = dev->driver->firstopen(dev);
104 		if (ret != 0)
105 			return ret;
106 	}
107 
108 	dev->buf_use = 0;
109 
110 	ret = drm_legacy_dma_setup(dev);
111 	if (ret < 0)
112 		return ret;
113 
114 	init_waitqueue_head(&dev->lock.lock_queue);
115 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
116 		dev->irq_enabled = 0;
117 	dev->context_flag = 0;
118 	dev->last_context = 0;
119 	dev->if_version = 0;
120 
121 	dev->buf_sigio = NULL;
122 
123 
124 	DRM_DEBUG("\n");
125 	return 0;
126 }
127 
128 #define DRIVER_SOFTC(unit) \
129 	((struct drm_softc*)devclass_get_softc(drm_devclass, unit))
130 
131 static inline int dev_minor(cdev_t x)
132 {
133 	return minor(x);
134 }
135 
136 /**
137  * drm_open - open method for DRM file
138  * @inode: device inode
139  * @filp: file pointer.
140  *
141  * This function must be used by drivers as their .open() #file_operations
142  * method. It looks up the correct DRM device and instantiates all the per-file
143  * resources for it.
144  *
145  * RETURNS:
146  *
147  * 0 on success or negative errno value on falure.
148  */
149 // drm_open() is a file_operations function, not a dev_ops function
150 // int drm_open(struct inode *inode, struct file *filp)
151 int drm_open(struct dev_open_args *ap)
152 {
153 	struct file *filp = ap->a_fp;
154 	struct inode *inode = filp->f_data;	/* A Linux inode is a Unix vnode */
155 	struct drm_device *dev;
156 	struct drm_minor *minor;
157 	struct cdev *kdev = ap->a_head.a_dev;
158 	int flags = ap->a_oflags;
159 	int fmt = 0;
160 	struct thread *p = curthread;
161 	int retcode;
162 	int need_setup = 0;
163 #ifdef __DragonFly__
164 	struct drm_softc *softc = DRIVER_SOFTC(dev_minor(kdev));
165 
166 	dev = softc->drm_driver_data;
167 #endif
168 
169 	minor = drm_minor_acquire(iminor(inode));
170 	if (dev == NULL)
171 		return (ENXIO);
172 	if (!dev->open_count++)
173 		need_setup = 1;
174 
175 	/* share address_space across all char-devs of a single device */
176 #if 0
177 	filp->f_mapping = dev->anon_inode->i_mapping;
178 #endif
179 
180 	retcode = drm_open_helper(kdev, flags, fmt, p, dev, ap->a_fp, minor);
181 	if (retcode == 0) {
182 		DRM_LOCK(dev);
183 		device_busy(dev->dev->bsddev);
184 		DRM_UNLOCK(dev);
185 	}
186 	if (need_setup) {
187 		retcode = drm_setup(dev);
188 		if (retcode)
189 			goto err_undo;
190 	}
191 	return 0;
192 
193 err_undo:
194 	dev->open_count--;
195 	drm_minor_release(minor);
196 	return retcode;
197 }
198 EXPORT_SYMBOL(drm_open);
199 
200 /*
201  * Check whether DRI will run on this CPU.
202  *
203  * \return non-zero if the DRI will run on this CPU, or zero otherwise.
204  */
205 
206 /*
207  * Called whenever a process opens /dev/drm.
208  *
209  * \param filp file pointer.
210  * \param minor acquired minor-object.
211  * \return zero on success or a negative number on failure.
212  *
213  * Creates and initializes a drm_file structure for the file private data in \p
214  * filp and add it into the double linked list in \p dev.
215  */
216 static int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
217 		    struct drm_device *dev, struct file *filp, struct drm_minor *minor)
218 {
219 	struct drm_file *priv;
220 	int ret;
221 
222 	if (flags & O_EXCL)
223 		return EBUSY; /* No exclusive opens */
224 
225 	DRM_DEBUG("pid = %d, device = %s\n", DRM_CURRENTPID, devtoname(kdev));
226 
227 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
228 	if (!priv)
229 		return -ENOMEM;
230 
231 	filp->private_data = priv;
232 	priv->filp = filp;
233 	priv->pid = p->td_proc->p_pid;
234 	priv->minor = minor;
235 	priv->dev		= dev;
236 
237 	/* for compatibility root is always authenticated */
238 	priv->authenticated = capable(CAP_SYS_ADMIN);
239 	priv->lock_count = 0;
240 
241 	INIT_LIST_HEAD(&priv->lhead);
242 	INIT_LIST_HEAD(&priv->fbs);
243 	lockinit(&priv->fbs_lock, "dpfl", 0, LK_CANRECURSE);
244 	INIT_LIST_HEAD(&priv->blobs);
245 	INIT_LIST_HEAD(&priv->pending_event_list);
246 	INIT_LIST_HEAD(&priv->event_list);
247 	init_waitqueue_head(&priv->event_wait);
248 	priv->event_space = 4096; /* set aside 4k for event buffer */
249 
250 	lockinit(&priv->event_read_lock, "dperl", 0, LK_CANRECURSE);
251 
252 	if (drm_core_check_feature(dev, DRIVER_GEM))
253 		drm_gem_open(dev, priv);
254 
255 	if (dev->driver->open) {
256 		/* shared code returns -errno */
257 		ret = -dev->driver->open(dev, priv);
258 		if (ret != 0) {
259 			kfree(priv);
260 			return ret;
261 		}
262 	}
263 
264 #ifdef __DragonFly__
265 	/* first opener automatically becomes master */
266 	mutex_lock(&dev->master_mutex);
267 	priv->is_master = list_empty(&dev->filelist);
268 	mutex_unlock(&dev->master_mutex);
269 #endif
270 
271 	mutex_lock(&dev->filelist_mutex);
272 	list_add(&priv->lhead, &dev->filelist);
273 	mutex_unlock(&dev->filelist_mutex);
274 
275 	kdev->si_drv1 = dev;
276 	ret = devfs_set_cdevpriv(filp, priv, &drm_cdevpriv_dtor);
277 	if (ret != 0)
278 		drm_cdevpriv_dtor(priv);
279 
280 	return ret;
281 
282 #if 0
283 out_close:
284 #endif
285 	if (dev->driver->postclose)
286 		dev->driver->postclose(dev, priv);
287 	if (drm_core_check_feature(dev, DRIVER_GEM))
288 		drm_gem_release(dev, priv);
289 	put_pid(priv->pid);
290 	kfree(priv);
291 	filp->private_data = NULL;
292 	return ret;
293 }
294 
295 static void drm_events_release(struct drm_file *file_priv)
296 {
297 	struct drm_device *dev = file_priv->minor->dev;
298 	struct drm_pending_event *e, *et;
299 	unsigned long flags;
300 
301 	spin_lock_irqsave(&dev->event_lock, flags);
302 
303 	/* Unlink pending events */
304 	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
305 				 pending_link) {
306 		list_del(&e->pending_link);
307 		e->file_priv = NULL;
308 	}
309 
310 	/* Remove unconsumed events */
311 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
312 		list_del(&e->link);
313 		kfree(e);
314 	}
315 
316 	spin_unlock_irqrestore(&dev->event_lock, flags);
317 }
318 
319 /*
320  * drm_legacy_dev_reinit
321  *
322  * Reinitializes a legacy/ums drm device in it's lastclose function.
323  */
324 static void drm_legacy_dev_reinit(struct drm_device *dev)
325 {
326 	if (dev->irq_enabled)
327 		drm_irq_uninstall(dev);
328 
329 	mutex_lock(&dev->struct_mutex);
330 
331 	drm_legacy_agp_clear(dev);
332 
333 	drm_legacy_sg_cleanup(dev);
334 #if 0
335 	drm_legacy_vma_flush(dev);
336 #endif
337 	drm_legacy_dma_takedown(dev);
338 
339 	mutex_unlock(&dev->struct_mutex);
340 
341 	dev->sigdata.lock = NULL;
342 
343 	dev->context_flag = 0;
344 	dev->last_context = 0;
345 	dev->if_version = 0;
346 
347 	DRM_DEBUG("lastclose completed\n");
348 }
349 
350 /*
351  * Take down the DRM device.
352  *
353  * \param dev DRM device structure.
354  *
355  * Frees every resource in \p dev.
356  *
357  * \sa drm_device
358  */
359 void drm_lastclose(struct drm_device * dev)
360 {
361 	DRM_DEBUG("\n");
362 
363 	if (dev->driver->lastclose)
364 		dev->driver->lastclose(dev);
365 	DRM_DEBUG("driver lastclose completed\n");
366 
367 	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
368 		drm_irq_uninstall(dev);
369 
370 	mutex_lock(&dev->struct_mutex);
371 
372 	if (dev->unique) {
373 		kfree(dev->unique);
374 		dev->unique = NULL;
375 		dev->unique_len = 0;
376 	}
377 
378 	drm_legacy_agp_clear(dev);
379 
380 	drm_legacy_sg_cleanup(dev);
381 	drm_legacy_dma_takedown(dev);
382 
383 	if (dev->lock.hw_lock) {
384 		dev->lock.hw_lock = NULL; /* SHM removed */
385 		dev->lock.file_priv = NULL;
386 		wakeup(&dev->lock.lock_queue);
387 	}
388 
389 	mutex_unlock(&dev->struct_mutex);
390 
391 	DRM_DEBUG("lastclose completed\n");
392 
393 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
394 		drm_legacy_dev_reinit(dev);
395 }
396 
397 /**
398  * drm_release - release method for DRM file
399  * @inode: device inode
400  * @filp: file pointer.
401  *
402  * This function must be used by drivers as their .release() #file_operations
403  * method. It frees any resources associated with the open file, and if this is
404  * the last open file for the DRM device also proceeds to call drm_lastclose().
405  *
406  * RETURNS:
407  *
408  * Always succeeds and returns 0.
409  */
410 int drm_release(struct inode *inode, struct file *filp)
411 {
412 	struct drm_file *file_priv = filp->private_data;
413 	struct drm_minor *minor = file_priv->minor;
414 	struct drm_device *dev = minor->dev;
415 
416 #ifdef __DragonFly__
417 	/* dev is not correctly set yet */
418 	return 0;
419 #endif
420 
421 	mutex_lock(&drm_global_mutex);
422 
423 	DRM_DEBUG("open_count = %d\n", dev->open_count);
424 
425 	mutex_lock(&dev->filelist_mutex);
426 	list_del(&file_priv->lhead);
427 	mutex_unlock(&dev->filelist_mutex);
428 
429 	if (dev->driver->preclose)
430 		dev->driver->preclose(dev, file_priv);
431 
432 	/* ========================================================
433 	 * Begin inline drm_release
434 	 */
435 
436 	DRM_DEBUG("\n");
437 
438 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
439 		drm_legacy_lock_release(dev, filp);
440 
441 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
442 		drm_legacy_reclaim_buffers(dev, file_priv);
443 
444 	drm_events_release(file_priv);
445 
446 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
447 		drm_fb_release(file_priv);
448 		drm_property_destroy_user_blobs(dev, file_priv);
449 	}
450 
451 	if (drm_core_check_feature(dev, DRIVER_GEM))
452 		drm_gem_release(dev, file_priv);
453 
454 	drm_legacy_ctxbitmap_flush(dev, file_priv);
455 
456 	if (drm_is_primary_client(file_priv))
457 		drm_master_release(file_priv);
458 
459 	if (dev->driver->postclose)
460 		dev->driver->postclose(dev, file_priv);
461 
462 	if (drm_core_check_feature(dev, DRIVER_PRIME))
463 		drm_prime_destroy_file_private(&file_priv->prime);
464 
465 	WARN_ON(!list_empty(&file_priv->event_list));
466 
467 	put_pid(file_priv->pid);
468 	kfree(file_priv);
469 
470 	/* ========================================================
471 	 * End inline drm_release
472 	 */
473 
474 	if (!--dev->open_count) {
475 		drm_lastclose(dev);
476 #if 0	/* XXX: drm_put_dev() not implemented */
477 		if (drm_device_is_unplugged(dev))
478 			drm_put_dev(dev);
479 #endif
480 	}
481 	mutex_unlock(&drm_global_mutex);
482 
483 	drm_minor_release(minor);
484 
485 	return 0;
486 }
487 EXPORT_SYMBOL(drm_release);
488 
489 /**
490  * drm_read - read method for DRM file
491  * @filp: file pointer
492  * @buffer: userspace destination pointer for the read
493  * @count: count in bytes to read
494  * @offset: offset to read
495  *
496  * This function must be used by drivers as their .read() #file_operations
497  * method iff they use DRM events for asynchronous signalling to userspace.
498  * Since events are used by the KMS API for vblank and page flip completion this
499  * means all modern display drivers must use it.
500  *
501  * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
502  * must set the .llseek() #file_operation to no_llseek(). Polling support is
503  * provided by drm_poll().
504  *
505  * This function will only ever read a full event. Therefore userspace must
506  * supply a big enough buffer to fit any event to ensure forward progress. Since
507  * the maximum event space is currently 4K it's recommended to just use that for
508  * safety.
509  *
510  * RETURNS:
511  *
512  * Number of bytes read (always aligned to full events, and can be 0) or a
513  * negative error code on failure.
514  */
515 /*
516 ssize_t drm_read(struct file *filp, char __user *buffer,
517 		 size_t count, loff_t *offset)
518 */
519 int drm_read(struct dev_read_args *ap)
520 {
521 	struct file *filp = ap->a_fp;
522 	struct cdev *kdev = ap->a_head.a_dev;
523 	struct uio *uio = ap->a_uio;
524 	size_t count = uio->uio_resid;
525 	struct drm_file *file_priv = filp->private_data;
526 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
527 	int ret = 0;	/* drm_read() returns int in DragonFly */
528 
529 	ret = mutex_lock_interruptible(&file_priv->event_read_lock);
530 	if (ret)
531 		return ret;
532 
533 	for (;;) {
534 		struct drm_pending_event *e = NULL;
535 
536 		spin_lock_irq(&dev->event_lock);
537 		if (!list_empty(&file_priv->event_list)) {
538 			e = list_first_entry(&file_priv->event_list,
539 					struct drm_pending_event, link);
540 			file_priv->event_space += e->event->length;
541 			list_del(&e->link);
542 		}
543 		spin_unlock_irq(&dev->event_lock);
544 
545 		if (e == NULL) {
546 			if (ret) {
547 				ret = 0;	/* DragonFly expects a zero return value on success */
548 				break;
549 			}
550 
551 			if (filp->f_flag & O_NONBLOCK) {
552 				ret = -EAGAIN;
553 				break;
554 			}
555 
556 			mutex_unlock(&file_priv->event_read_lock);
557 			ret = wait_event_interruptible(file_priv->event_wait,
558 						       !list_empty(&file_priv->event_list));
559 			if (ret >= 0)
560 				ret = mutex_lock_interruptible(&file_priv->event_read_lock);
561 			if (ret)
562 				return ret;
563 		} else {
564 			unsigned length = e->event->length;
565 
566 			if (length > count - ret) {
567 put_back_event:
568 				spin_lock_irq(&dev->event_lock);
569 				file_priv->event_space -= length;
570 				list_add(&e->link, &file_priv->event_list);
571 				spin_unlock_irq(&dev->event_lock);
572 				break;
573 			}
574 
575 			if (uiomove((caddr_t)e->event, length, uio)) {
576 				if (ret == 0)
577 					ret = -EFAULT;
578 				goto put_back_event;
579 			}
580 
581 			ret += length;
582 			kfree(e);
583 		}
584 	}
585 	mutex_unlock(&file_priv->event_read_lock);
586 
587 	return ret;
588 }
589 EXPORT_SYMBOL(drm_read);
590 
591 /**
592  * drm_poll - poll method for DRM file
593  * @filp: file pointer
594  * @wait: poll waiter table
595  *
596  * This function must be used by drivers as their .read() #file_operations
597  * method iff they use DRM events for asynchronous signalling to userspace.
598  * Since events are used by the KMS API for vblank and page flip completion this
599  * means all modern display drivers must use it.
600  *
601  * See also drm_read().
602  *
603  * RETURNS:
604  *
605  * Mask of POLL flags indicating the current status of the file.
606  */
607 
608 static int
609 drmfilt(struct knote *kn, long hint)
610 {
611 	struct drm_file *file_priv = (struct drm_file *)kn->kn_hook;
612 	int ready = 0;
613 
614 //	poll_wait(filp, &file_priv->event_wait, wait);
615 
616 	if (!list_empty(&file_priv->event_list))
617 		ready = 1;
618 
619 	return (ready);
620 }
621 
622 static void
623 drmfilt_detach(struct knote *kn)
624 {
625 	struct drm_file *file_priv;
626 	struct klist *klist;
627 
628 	file_priv = (struct drm_file *)kn->kn_hook;
629 
630 	klist = &file_priv->dkq.ki_note;
631 	knote_remove(klist, kn);
632 }
633 
634 static struct filterops drmfiltops =
635         { FILTEROP_MPSAFE | FILTEROP_ISFD, NULL, drmfilt_detach, drmfilt };
636 
637 int
638 drm_kqfilter(struct dev_kqfilter_args *ap)
639 {
640 	struct file *filp = ap->a_fp;
641 	struct drm_file *file_priv = filp->private_data;
642 	struct knote *kn = ap->a_kn;
643 	struct klist *klist;
644 
645 	ap->a_result = 0;
646 
647 	switch (kn->kn_filter) {
648 	case EVFILT_READ:
649 	case EVFILT_WRITE:
650 		kn->kn_fop = &drmfiltops;
651 		kn->kn_hook = (caddr_t)file_priv;
652 		break;
653 	default:
654 		ap->a_result = EOPNOTSUPP;
655 		return (0);
656 	}
657 
658 	klist = &file_priv->dkq.ki_note;
659 	knote_insert(klist, kn);
660 
661 	return (0);
662 }
663 
664 /**
665  * drm_event_reserve_init_locked - init a DRM event and reserve space for it
666  * @dev: DRM device
667  * @file_priv: DRM file private data
668  * @p: tracking structure for the pending event
669  * @e: actual event data to deliver to userspace
670  *
671  * This function prepares the passed in event for eventual delivery. If the event
672  * doesn't get delivered (because the IOCTL fails later on, before queuing up
673  * anything) then the even must be cancelled and freed using
674  * drm_event_cancel_free(). Successfully initialized events should be sent out
675  * using drm_send_event() or drm_send_event_locked() to signal completion of the
676  * asynchronous event to userspace.
677  *
678  * If callers embedded @p into a larger structure it must be allocated with
679  * kmalloc and @p must be the first member element.
680  *
681  * This is the locked version of drm_event_reserve_init() for callers which
682  * already hold &drm_device.event_lock.
683  *
684  * RETURNS:
685  *
686  * 0 on success or a negative error code on failure.
687  */
688 int drm_event_reserve_init_locked(struct drm_device *dev,
689 				  struct drm_file *file_priv,
690 				  struct drm_pending_event *p,
691 				  struct drm_event *e)
692 {
693 	if (file_priv->event_space < e->length)
694 		return -ENOMEM;
695 
696 	file_priv->event_space -= e->length;
697 
698 	p->event = e;
699 	list_add(&p->pending_link, &file_priv->pending_event_list);
700 	p->file_priv = file_priv;
701 
702 	return 0;
703 }
704 EXPORT_SYMBOL(drm_event_reserve_init_locked);
705 
706 /**
707  * drm_event_reserve_init - init a DRM event and reserve space for it
708  * @dev: DRM device
709  * @file_priv: DRM file private data
710  * @p: tracking structure for the pending event
711  * @e: actual event data to deliver to userspace
712  *
713  * This function prepares the passed in event for eventual delivery. If the event
714  * doesn't get delivered (because the IOCTL fails later on, before queuing up
715  * anything) then the even must be cancelled and freed using
716  * drm_event_cancel_free(). Successfully initialized events should be sent out
717  * using drm_send_event() or drm_send_event_locked() to signal completion of the
718  * asynchronous event to userspace.
719  *
720  * If callers embedded @p into a larger structure it must be allocated with
721  * kmalloc and @p must be the first member element.
722  *
723  * Callers which already hold &drm_device.event_lock should use
724  * drm_event_reserve_init_locked() instead.
725  *
726  * RETURNS:
727  *
728  * 0 on success or a negative error code on failure.
729  */
730 int drm_event_reserve_init(struct drm_device *dev,
731 			   struct drm_file *file_priv,
732 			   struct drm_pending_event *p,
733 			   struct drm_event *e)
734 {
735 	unsigned long flags;
736 	int ret;
737 
738 	spin_lock_irqsave(&dev->event_lock, flags);
739 	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
740 	spin_unlock_irqrestore(&dev->event_lock, flags);
741 
742 	return ret;
743 }
744 EXPORT_SYMBOL(drm_event_reserve_init);
745 
746 /**
747  * drm_event_cancel_free - free a DRM event and release it's space
748  * @dev: DRM device
749  * @p: tracking structure for the pending event
750  *
751  * This function frees the event @p initialized with drm_event_reserve_init()
752  * and releases any allocated space.
753  */
754 void drm_event_cancel_free(struct drm_device *dev,
755 			   struct drm_pending_event *p)
756 {
757 	unsigned long flags;
758 	spin_lock_irqsave(&dev->event_lock, flags);
759 	if (p->file_priv) {
760 		p->file_priv->event_space += p->event->length;
761 		list_del(&p->pending_link);
762 	}
763 	spin_unlock_irqrestore(&dev->event_lock, flags);
764 	kfree(p);
765 }
766 EXPORT_SYMBOL(drm_event_cancel_free);
767 
768 /**
769  * drm_send_event_locked - send DRM event to file descriptor
770  * @dev: DRM device
771  * @e: DRM event to deliver
772  *
773  * This function sends the event @e, initialized with drm_event_reserve_init(),
774  * to its associated userspace DRM file. Callers must already hold
775  * &drm_device.event_lock, see drm_send_event() for the unlocked version.
776  *
777  * Note that the core will take care of unlinking and disarming events when the
778  * corresponding DRM file is closed. Drivers need not worry about whether the
779  * DRM file for this event still exists and can call this function upon
780  * completion of the asynchronous work unconditionally.
781  */
782 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
783 {
784 	assert_spin_locked(&dev->event_lock);
785 
786 	if (e->completion) {
787 		complete_all(e->completion);
788 		e->completion_release(e->completion);
789 		e->completion = NULL;
790 	}
791 
792 	if (e->fence) {
793 		dma_fence_signal(e->fence);
794 		dma_fence_put(e->fence);
795 	}
796 
797 	if (!e->file_priv) {
798 		kfree(e);
799 		return;
800 	}
801 
802 	list_del(&e->pending_link);
803 	list_add_tail(&e->link,
804 		      &e->file_priv->event_list);
805 	wake_up_interruptible(&e->file_priv->event_wait);
806 #ifdef __DragonFly__
807 	KNOTE(&e->file_priv->dkq.ki_note, 0);
808 #endif
809 
810 }
811 EXPORT_SYMBOL(drm_send_event_locked);
812 
813 /**
814  * drm_send_event - send DRM event to file descriptor
815  * @dev: DRM device
816  * @e: DRM event to deliver
817  *
818  * This function sends the event @e, initialized with drm_event_reserve_init(),
819  * to its associated userspace DRM file. This function acquires
820  * &drm_device.event_lock, see drm_send_event_locked() for callers which already
821  * hold this lock.
822  *
823  * Note that the core will take care of unlinking and disarming events when the
824  * corresponding DRM file is closed. Drivers need not worry about whether the
825  * DRM file for this event still exists and can call this function upon
826  * completion of the asynchronous work unconditionally.
827  */
828 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
829 {
830 	unsigned long irqflags;
831 
832 	spin_lock_irqsave(&dev->event_lock, irqflags);
833 	drm_send_event_locked(dev, e);
834 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
835 }
836 EXPORT_SYMBOL(drm_send_event);
837