1 /* 2 * \author Rickard E. (Rik) Faith <faith@valinux.com> 3 * \author Daryll Strauss <daryll@valinux.com> 4 * \author Gareth Hughes <gareth@valinux.com> 5 */ 6 7 /* 8 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com 9 * 10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 12 * All Rights Reserved. 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a 15 * copy of this software and associated documentation files (the "Software"), 16 * to deal in the Software without restriction, including without limitation 17 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 18 * and/or sell copies of the Software, and to permit persons to whom the 19 * Software is furnished to do so, subject to the following conditions: 20 * 21 * The above copyright notice and this permission notice (including the next 22 * paragraph) shall be included in all copies or substantial portions of the 23 * Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 31 * OTHER DEALINGS IN THE SOFTWARE. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/uio.h> /* must come first to avoid kfree() macros issues */ 36 37 #include <linux/poll.h> 38 #include <linux/slab.h> 39 #include <linux/module.h> 40 41 #include <drm/drm_file.h> 42 #include <drm/drmP.h> 43 44 #include "drm_legacy.h" 45 #include "drm_internal.h" 46 #include "drm_crtc_internal.h" 47 48 static void drm_events_release(struct drm_file *file_priv); 49 50 /* from BKL pushdown */ 51 DEFINE_MUTEX(drm_global_mutex); 52 53 /** 54 * DOC: file operations 55 * 56 * Drivers must define the file operations structure that forms the DRM 57 * userspace API entry point, even though most of those operations are 58 * implemented in the DRM core. The resulting &struct file_operations must be 59 * stored in the &drm_driver.fops field. The mandatory functions are drm_open(), 60 * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled 61 * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no 62 * need to sprinkle #ifdef into the code. Drivers which implement private ioctls 63 * that require 32/64 bit compatibility support must provide their own 64 * &file_operations.compat_ioctl handler that processes private ioctls and calls 65 * drm_compat_ioctl() for core ioctls. 66 * 67 * In addition drm_read() and drm_poll() provide support for DRM events. DRM 68 * events are a generic and extensible means to send asynchronous events to 69 * userspace through the file descriptor. They are used to send vblank event and 70 * page flip completions by the KMS API. But drivers can also use it for their 71 * own needs, e.g. to signal completion of rendering. 72 * 73 * For the driver-side event interface see drm_event_reserve_init() and 74 * drm_send_event() as the main starting points. 75 * 76 * The memory mapping implementation will vary depending on how the driver 77 * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() 78 * function, modern drivers should use one of the provided memory-manager 79 * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and 80 * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap(). 81 * 82 * No other file operations are supported by the DRM userspace API. Overall the 83 * following is an example &file_operations structure:: 84 * 85 * static const example_drm_fops = { 86 * .owner = THIS_MODULE, 87 * .open = drm_open, 88 * .release = drm_release, 89 * .unlocked_ioctl = drm_ioctl, 90 * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n 91 * .poll = drm_poll, 92 * .read = drm_read, 93 * .llseek = no_llseek, 94 * .mmap = drm_gem_mmap, 95 * }; 96 * 97 * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for 98 * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this 99 * simpler. 100 * 101 * The driver's &file_operations must be stored in &drm_driver.fops. 102 * 103 * For driver-private IOCTL handling see the more detailed discussion in 104 * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`. 105 */ 106 107 static int drm_open_helper(struct cdev *kdev, int flags, 108 struct file *filp, struct drm_minor *minor); 109 110 static int drm_setup(struct drm_device * dev) 111 { 112 int ret; 113 114 if (dev->driver->firstopen && 115 drm_core_check_feature(dev, DRIVER_LEGACY)) { 116 ret = dev->driver->firstopen(dev); 117 if (ret != 0) 118 return ret; 119 } 120 121 ret = drm_legacy_dma_setup(dev); 122 if (ret < 0) 123 return ret; 124 125 126 DRM_DEBUG("\n"); 127 return 0; 128 } 129 130 /** 131 * drm_open - open method for DRM file 132 * @inode: device inode 133 * @filp: file pointer. 134 * 135 * This function must be used by drivers as their &file_operations.open method. 136 * It looks up the correct DRM device and instantiates all the per-file 137 * resources for it. It also calls the &drm_driver.open driver callback. 138 * 139 * RETURNS: 140 * 141 * 0 on success or negative errno value on falure. 142 */ 143 // drm_open() is a file_operations function, not a dev_ops function 144 // int drm_open(struct inode *inode, struct file *filp) 145 int drm_open(struct dev_open_args *ap) 146 { 147 #ifdef __DragonFly__ 148 struct file *filp = ap->a_fpp ? *ap->a_fpp : NULL; 149 struct inode *inode = filp->f_data; /* A Linux inode is a Unix vnode */ 150 struct cdev *kdev = ap->a_head.a_dev; 151 int flags = ap->a_oflags; 152 #endif 153 struct drm_device *dev; 154 struct drm_minor *minor; 155 int retcode; 156 int need_setup = 0; 157 158 minor = drm_minor_acquire(iminor(inode)); 159 if (IS_ERR(minor)) 160 return PTR_ERR(minor); 161 162 dev = minor->dev; 163 if (!dev->open_count++) 164 need_setup = 1; 165 166 /* share address_space across all char-devs of a single device */ 167 #if 0 168 filp->f_mapping = dev->anon_inode->i_mapping; 169 #endif 170 171 #ifdef __DragonFly__ 172 retcode = drm_open_helper(kdev, flags, filp, minor); 173 #else 174 retcode = drm_open_helper(kdev, flags, ap->a_fp, minor); 175 #endif 176 if (retcode) 177 goto err_undo; 178 if (need_setup) { 179 retcode = drm_setup(dev); 180 if (retcode) 181 goto err_undo; 182 } 183 #ifdef __DragonFly__ 184 device_busy(dev->dev->bsddev); 185 #endif 186 return 0; 187 188 err_undo: 189 dev->open_count--; 190 drm_minor_release(minor); 191 return retcode; 192 } 193 EXPORT_SYMBOL(drm_open); 194 195 /* 196 * close() function 197 */ 198 int 199 drm_close(struct dev_close_args *ap) 200 { 201 #ifdef __DragonFly__ 202 struct file *filp = ap->a_fp; 203 struct inode *inode = filp->f_data; /* A Linux inode is a Unix vnode */ 204 #endif 205 struct drm_file *file_priv = filp->private_data; 206 struct drm_minor *minor = drm_minor_acquire(iminor(inode)); 207 struct drm_device *dev = minor->dev; 208 209 mutex_lock(&drm_global_mutex); 210 211 DRM_DEBUG("open_count = %d\n", dev->open_count); 212 213 mutex_lock(&dev->filelist_mutex); 214 list_del(&file_priv->lhead); 215 mutex_unlock(&dev->filelist_mutex); 216 217 if (drm_core_check_feature(dev, DRIVER_LEGACY) && 218 dev->driver->preclose) 219 dev->driver->preclose(dev, file_priv); 220 221 /* ======================================================== 222 * Begin inline drm_release 223 */ 224 225 DRM_DEBUG("pid = %d, device = 0x%p, open_count = %d\n", 226 curproc->p_pid, 227 dev, 228 dev->open_count); 229 230 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 231 drm_legacy_lock_release(dev, filp); 232 233 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 234 drm_legacy_reclaim_buffers(dev, file_priv); 235 236 drm_events_release(file_priv); 237 238 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 239 drm_fb_release(file_priv); 240 drm_property_destroy_user_blobs(dev, file_priv); 241 } 242 243 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 244 drm_syncobj_release(file_priv); 245 246 if (drm_core_check_feature(dev, DRIVER_GEM)) 247 drm_gem_release(dev, file_priv); 248 249 drm_legacy_ctxbitmap_flush(dev, file_priv); 250 251 if (drm_is_primary_client(file_priv)) 252 drm_master_release(file_priv); 253 254 if (dev->driver->postclose) 255 dev->driver->postclose(dev, file_priv); 256 257 if (drm_core_check_feature(dev, DRIVER_PRIME)) 258 drm_prime_destroy_file_private(&file_priv->prime); 259 260 WARN_ON(!list_empty(&file_priv->event_list)); 261 262 put_pid(file_priv->pid); 263 kfree(file_priv); 264 265 /* ======================================================== 266 * End inline drm_release 267 */ 268 269 if (!--dev->open_count) { 270 drm_lastclose(dev); 271 #if 0 /* XXX: drm_put_dev() not implemented */ 272 if (drm_dev_is_unplugged(dev)) 273 drm_put_dev(dev); 274 #endif 275 } 276 mutex_unlock(&drm_global_mutex); 277 278 drm_minor_release(minor); 279 280 return 0; 281 } 282 EXPORT_SYMBOL(drm_close); 283 284 /* 285 * Check whether DRI will run on this CPU. 286 * 287 * \return non-zero if the DRI will run on this CPU, or zero otherwise. 288 */ 289 static int drm_cpu_valid(void) 290 { 291 #if defined(__sparc__) && !defined(__sparc_v9__) 292 return 0; /* No cmpxchg before v9 sparc. */ 293 #endif 294 return 1; 295 } 296 297 /* 298 * Called whenever a process opens /dev/drm. 299 * 300 * \param filp file pointer. 301 * \param minor acquired minor-object. 302 * \return zero on success or a negative number on failure. 303 * 304 * Creates and initializes a drm_file structure for the file private data in \p 305 * filp and add it into the double linked list in \p dev. 306 */ 307 static int drm_open_helper(struct cdev *kdev, int flags, 308 struct file *filp, struct drm_minor *minor) 309 { 310 struct drm_device *dev = minor->dev; 311 struct drm_file *priv; 312 int ret; 313 314 if (flags & O_EXCL) 315 return -EBUSY; /* No exclusive opens */ 316 if (!drm_cpu_valid()) 317 return -EINVAL; 318 319 DRM_DEBUG("pid = %d, minor = %d\n", DRM_CURRENTPID, minor->index); 320 321 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 322 if (!priv) 323 return -ENOMEM; 324 325 filp->private_data = priv; 326 priv->filp = filp; 327 priv->pid = curproc->p_pid; 328 priv->minor = minor; 329 priv->dev = dev; 330 331 /* for compatibility root is always authenticated */ 332 priv->authenticated = capable(CAP_SYS_ADMIN); 333 priv->lock_count = 0; 334 335 INIT_LIST_HEAD(&priv->lhead); 336 INIT_LIST_HEAD(&priv->fbs); 337 lockinit(&priv->fbs_lock, "dpfl", 0, LK_CANRECURSE); 338 INIT_LIST_HEAD(&priv->blobs); 339 INIT_LIST_HEAD(&priv->pending_event_list); 340 INIT_LIST_HEAD(&priv->event_list); 341 init_waitqueue_head(&priv->event_wait); 342 priv->event_space = 4096; /* set aside 4k for event buffer */ 343 344 lockinit(&priv->event_read_lock, "dperl", 0, LK_CANRECURSE); 345 346 if (drm_core_check_feature(dev, DRIVER_GEM)) 347 drm_gem_open(dev, priv); 348 349 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 350 drm_syncobj_open(priv); 351 352 if (drm_core_check_feature(dev, DRIVER_PRIME)) 353 drm_prime_init_file_private(&priv->prime); 354 355 if (dev->driver->open) { 356 /* shared code returns -errno */ 357 ret = -dev->driver->open(dev, priv); 358 if (ret != 0) 359 goto out_prime_destroy; 360 } 361 362 #ifdef __DragonFly__ 363 kdev->si_drv1 = dev; 364 #endif 365 366 if (drm_is_primary_client(priv)) { 367 ret = drm_master_open(priv); 368 if (ret) 369 goto out_close; 370 } 371 372 mutex_lock(&dev->filelist_mutex); 373 list_add(&priv->lhead, &dev->filelist); 374 mutex_unlock(&dev->filelist_mutex); 375 376 #ifdef __alpha__ 377 /* 378 * Default the hose 379 */ 380 if (!dev->hose) { 381 struct pci_dev *pci_dev; 382 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); 383 if (pci_dev) { 384 dev->hose = pci_dev->sysdata; 385 pci_dev_put(pci_dev); 386 } 387 if (!dev->hose) { 388 struct pci_bus *b = list_entry(pci_root_buses.next, 389 struct pci_bus, node); 390 if (b) 391 dev->hose = b->sysdata; 392 } 393 } 394 #endif 395 396 return 0; 397 398 out_close: 399 if (dev->driver->postclose) 400 dev->driver->postclose(dev, priv); 401 out_prime_destroy: 402 if (drm_core_check_feature(dev, DRIVER_PRIME)) 403 drm_prime_destroy_file_private(&priv->prime); 404 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 405 drm_syncobj_release(priv); 406 if (drm_core_check_feature(dev, DRIVER_GEM)) 407 drm_gem_release(dev, priv); 408 put_pid(priv->pid); 409 kfree(priv); 410 filp->private_data = NULL; 411 return ret; 412 } 413 414 static void drm_events_release(struct drm_file *file_priv) 415 { 416 struct drm_device *dev = file_priv->minor->dev; 417 struct drm_pending_event *e, *et; 418 unsigned long flags; 419 420 spin_lock_irqsave(&dev->event_lock, flags); 421 422 /* Unlink pending events */ 423 list_for_each_entry_safe(e, et, &file_priv->pending_event_list, 424 pending_link) { 425 list_del(&e->pending_link); 426 e->file_priv = NULL; 427 } 428 429 /* Remove unconsumed events */ 430 list_for_each_entry_safe(e, et, &file_priv->event_list, link) { 431 list_del(&e->link); 432 kfree(e); 433 } 434 435 spin_unlock_irqrestore(&dev->event_lock, flags); 436 } 437 438 static void drm_legacy_dev_reinit(struct drm_device *dev) 439 { 440 if (dev->irq_enabled) 441 drm_irq_uninstall(dev); 442 443 mutex_lock(&dev->struct_mutex); 444 445 drm_legacy_agp_clear(dev); 446 447 drm_legacy_sg_cleanup(dev); 448 drm_legacy_vma_flush(dev); 449 drm_legacy_dma_takedown(dev); 450 451 mutex_unlock(&dev->struct_mutex); 452 453 dev->sigdata.lock = NULL; 454 455 dev->context_flag = 0; 456 dev->last_context = 0; 457 dev->if_version = 0; 458 459 DRM_DEBUG("lastclose completed\n"); 460 } 461 462 void drm_lastclose(struct drm_device * dev) 463 { 464 DRM_DEBUG("\n"); 465 466 if (dev->driver->lastclose) 467 dev->driver->lastclose(dev); 468 DRM_DEBUG("driver lastclose completed\n"); 469 470 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 471 drm_legacy_dev_reinit(dev); 472 } 473 474 /** 475 * drm_release - release method for DRM file 476 * @inode: device inode 477 * @filp: file pointer. 478 * 479 * This function must be used by drivers as their &file_operations.release 480 * method. It frees any resources associated with the open file, and calls the 481 * &drm_driver.postclose driver callback. If this is the last open file for the 482 * DRM device also proceeds to call the &drm_driver.lastclose driver callback. 483 * 484 * RETURNS: 485 * 486 * Always succeeds and returns 0. 487 */ 488 int drm_release(struct inode *inode, struct file *filp) 489 { 490 struct drm_file *file_priv = filp->private_data; 491 struct drm_minor *minor = file_priv->minor; 492 struct drm_device *dev = minor->dev; 493 494 #ifdef __DragonFly__ 495 /* dev is not correctly set yet */ 496 return 0; 497 #endif 498 499 mutex_lock(&drm_global_mutex); 500 501 DRM_DEBUG("open_count = %d\n", dev->open_count); 502 503 mutex_lock(&dev->filelist_mutex); 504 list_del(&file_priv->lhead); 505 mutex_unlock(&dev->filelist_mutex); 506 507 if (drm_core_check_feature(dev, DRIVER_LEGACY) && 508 dev->driver->preclose) 509 dev->driver->preclose(dev, file_priv); 510 511 /* ======================================================== 512 * Begin inline drm_release 513 */ 514 515 DRM_DEBUG("pid = %d, device = 0x%p, open_count = %d\n", 516 curproc->p_pid, 517 dev, 518 dev->open_count); 519 520 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 521 drm_legacy_lock_release(dev, filp); 522 523 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 524 drm_legacy_reclaim_buffers(dev, file_priv); 525 526 drm_events_release(file_priv); 527 528 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 529 drm_fb_release(file_priv); 530 drm_property_destroy_user_blobs(dev, file_priv); 531 } 532 533 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) 534 drm_syncobj_release(file_priv); 535 536 if (drm_core_check_feature(dev, DRIVER_GEM)) 537 drm_gem_release(dev, file_priv); 538 539 drm_legacy_ctxbitmap_flush(dev, file_priv); 540 541 if (drm_is_primary_client(file_priv)) 542 drm_master_release(file_priv); 543 544 if (dev->driver->postclose) 545 dev->driver->postclose(dev, file_priv); 546 547 if (drm_core_check_feature(dev, DRIVER_PRIME)) 548 drm_prime_destroy_file_private(&file_priv->prime); 549 550 WARN_ON(!list_empty(&file_priv->event_list)); 551 552 put_pid(file_priv->pid); 553 kfree(file_priv); 554 555 /* ======================================================== 556 * End inline drm_release 557 */ 558 559 if (!--dev->open_count) { 560 drm_lastclose(dev); 561 #if 0 /* XXX: drm_put_dev() not implemented */ 562 if (drm_dev_is_unplugged(dev)) 563 drm_put_dev(dev); 564 #endif 565 } 566 mutex_unlock(&drm_global_mutex); 567 568 drm_minor_release(minor); 569 570 return 0; 571 } 572 EXPORT_SYMBOL(drm_release); 573 574 /** 575 * drm_read - read method for DRM file 576 * @filp: file pointer 577 * @buffer: userspace destination pointer for the read 578 * @count: count in bytes to read 579 * @offset: offset to read 580 * 581 * This function must be used by drivers as their &file_operations.read 582 * method iff they use DRM events for asynchronous signalling to userspace. 583 * Since events are used by the KMS API for vblank and page flip completion this 584 * means all modern display drivers must use it. 585 * 586 * @offset is ignored, DRM events are read like a pipe. Therefore drivers also 587 * must set the &file_operation.llseek to no_llseek(). Polling support is 588 * provided by drm_poll(). 589 * 590 * This function will only ever read a full event. Therefore userspace must 591 * supply a big enough buffer to fit any event to ensure forward progress. Since 592 * the maximum event space is currently 4K it's recommended to just use that for 593 * safety. 594 * 595 * RETURNS: 596 * 597 * Number of bytes read (always aligned to full events, and can be 0) or a 598 * negative error code on failure. 599 */ 600 /* 601 ssize_t drm_read(struct file *filp, char __user *buffer, 602 size_t count, loff_t *offset) 603 */ 604 int drm_read(struct dev_read_args *ap) 605 { 606 struct file *filp = ap->a_fp; 607 struct uio *uio = ap->a_uio; 608 size_t count = uio->uio_resid; 609 struct drm_file *file_priv = filp->private_data; 610 struct drm_device *dev = file_priv->minor->dev; 611 int ret = 0; /* drm_read() returns int in DragonFly */ 612 613 ret = mutex_lock_interruptible(&file_priv->event_read_lock); 614 if (ret) 615 return ret; 616 617 for (;;) { 618 struct drm_pending_event *e = NULL; 619 620 spin_lock_irq(&dev->event_lock); 621 if (!list_empty(&file_priv->event_list)) { 622 e = list_first_entry(&file_priv->event_list, 623 struct drm_pending_event, link); 624 file_priv->event_space += e->event->length; 625 list_del(&e->link); 626 } 627 spin_unlock_irq(&dev->event_lock); 628 629 if (e == NULL) { 630 if (ret) { 631 ret = 0; /* DragonFly expects a zero return value on success */ 632 break; 633 } 634 635 if (filp->f_flag & O_NONBLOCK) { 636 ret = -EAGAIN; 637 break; 638 } 639 640 mutex_unlock(&file_priv->event_read_lock); 641 ret = wait_event_interruptible(file_priv->event_wait, 642 !list_empty(&file_priv->event_list)); 643 if (ret >= 0) 644 ret = mutex_lock_interruptible(&file_priv->event_read_lock); 645 if (ret) 646 return ret; 647 } else { 648 unsigned length = e->event->length; 649 650 if (length > count - ret) { 651 put_back_event: 652 spin_lock_irq(&dev->event_lock); 653 file_priv->event_space -= length; 654 list_add(&e->link, &file_priv->event_list); 655 spin_unlock_irq(&dev->event_lock); 656 break; 657 } 658 659 if (uiomove((caddr_t)e->event, length, uio)) { 660 if (ret == 0) 661 ret = -EFAULT; 662 goto put_back_event; 663 } 664 665 ret += length; 666 kfree(e); 667 } 668 } 669 mutex_unlock(&file_priv->event_read_lock); 670 671 return ret; 672 } 673 EXPORT_SYMBOL(drm_read); 674 675 /** 676 * drm_poll - poll method for DRM file 677 * @filp: file pointer 678 * @wait: poll waiter table 679 * 680 * This function must be used by drivers as their &file_operations.read method 681 * iff they use DRM events for asynchronous signalling to userspace. Since 682 * events are used by the KMS API for vblank and page flip completion this means 683 * all modern display drivers must use it. 684 * 685 * See also drm_read(). 686 * 687 * RETURNS: 688 * 689 * Mask of POLL flags indicating the current status of the file. 690 */ 691 static int 692 drmfilt(struct knote *kn, long hint) 693 { 694 struct drm_file *file_priv = (struct drm_file *)kn->kn_hook; 695 int ready = 0; 696 697 // poll_wait(filp, &file_priv->event_wait, wait); 698 699 if (!list_empty(&file_priv->event_list)) 700 ready = 1; 701 702 return (ready); 703 } 704 705 static void 706 drmfilt_detach(struct knote *kn) 707 { 708 struct drm_file *file_priv; 709 struct klist *klist; 710 711 file_priv = (struct drm_file *)kn->kn_hook; 712 713 klist = &file_priv->dkq.ki_note; 714 knote_remove(klist, kn); 715 } 716 717 static struct filterops drmfiltops = 718 { FILTEROP_MPSAFE | FILTEROP_ISFD, NULL, drmfilt_detach, drmfilt }; 719 720 int 721 drm_kqfilter(struct dev_kqfilter_args *ap) 722 { 723 struct file *filp = ap->a_fp; 724 struct drm_file *file_priv = filp->private_data; 725 struct knote *kn = ap->a_kn; 726 struct klist *klist; 727 728 ap->a_result = 0; 729 730 switch (kn->kn_filter) { 731 case EVFILT_READ: 732 case EVFILT_WRITE: 733 kn->kn_fop = &drmfiltops; 734 kn->kn_hook = (caddr_t)file_priv; 735 break; 736 default: 737 ap->a_result = EOPNOTSUPP; 738 return (0); 739 } 740 741 klist = &file_priv->dkq.ki_note; 742 knote_insert(klist, kn); 743 744 return (0); 745 } 746 747 /** 748 * drm_event_reserve_init_locked - init a DRM event and reserve space for it 749 * @dev: DRM device 750 * @file_priv: DRM file private data 751 * @p: tracking structure for the pending event 752 * @e: actual event data to deliver to userspace 753 * 754 * This function prepares the passed in event for eventual delivery. If the event 755 * doesn't get delivered (because the IOCTL fails later on, before queuing up 756 * anything) then the even must be cancelled and freed using 757 * drm_event_cancel_free(). Successfully initialized events should be sent out 758 * using drm_send_event() or drm_send_event_locked() to signal completion of the 759 * asynchronous event to userspace. 760 * 761 * If callers embedded @p into a larger structure it must be allocated with 762 * kmalloc and @p must be the first member element. 763 * 764 * This is the locked version of drm_event_reserve_init() for callers which 765 * already hold &drm_device.event_lock. 766 * 767 * RETURNS: 768 * 769 * 0 on success or a negative error code on failure. 770 */ 771 int drm_event_reserve_init_locked(struct drm_device *dev, 772 struct drm_file *file_priv, 773 struct drm_pending_event *p, 774 struct drm_event *e) 775 { 776 if (file_priv->event_space < e->length) 777 return -ENOMEM; 778 779 file_priv->event_space -= e->length; 780 781 p->event = e; 782 list_add(&p->pending_link, &file_priv->pending_event_list); 783 p->file_priv = file_priv; 784 785 return 0; 786 } 787 EXPORT_SYMBOL(drm_event_reserve_init_locked); 788 789 /** 790 * drm_event_reserve_init - init a DRM event and reserve space for it 791 * @dev: DRM device 792 * @file_priv: DRM file private data 793 * @p: tracking structure for the pending event 794 * @e: actual event data to deliver to userspace 795 * 796 * This function prepares the passed in event for eventual delivery. If the event 797 * doesn't get delivered (because the IOCTL fails later on, before queuing up 798 * anything) then the even must be cancelled and freed using 799 * drm_event_cancel_free(). Successfully initialized events should be sent out 800 * using drm_send_event() or drm_send_event_locked() to signal completion of the 801 * asynchronous event to userspace. 802 * 803 * If callers embedded @p into a larger structure it must be allocated with 804 * kmalloc and @p must be the first member element. 805 * 806 * Callers which already hold &drm_device.event_lock should use 807 * drm_event_reserve_init_locked() instead. 808 * 809 * RETURNS: 810 * 811 * 0 on success or a negative error code on failure. 812 */ 813 int drm_event_reserve_init(struct drm_device *dev, 814 struct drm_file *file_priv, 815 struct drm_pending_event *p, 816 struct drm_event *e) 817 { 818 unsigned long flags; 819 int ret; 820 821 spin_lock_irqsave(&dev->event_lock, flags); 822 ret = drm_event_reserve_init_locked(dev, file_priv, p, e); 823 spin_unlock_irqrestore(&dev->event_lock, flags); 824 825 return ret; 826 } 827 EXPORT_SYMBOL(drm_event_reserve_init); 828 829 /** 830 * drm_event_cancel_free - free a DRM event and release it's space 831 * @dev: DRM device 832 * @p: tracking structure for the pending event 833 * 834 * This function frees the event @p initialized with drm_event_reserve_init() 835 * and releases any allocated space. It is used to cancel an event when the 836 * nonblocking operation could not be submitted and needed to be aborted. 837 */ 838 void drm_event_cancel_free(struct drm_device *dev, 839 struct drm_pending_event *p) 840 { 841 unsigned long flags; 842 spin_lock_irqsave(&dev->event_lock, flags); 843 if (p->file_priv) { 844 p->file_priv->event_space += p->event->length; 845 list_del(&p->pending_link); 846 } 847 spin_unlock_irqrestore(&dev->event_lock, flags); 848 849 if (p->fence) 850 dma_fence_put(p->fence); 851 852 kfree(p); 853 } 854 EXPORT_SYMBOL(drm_event_cancel_free); 855 856 /** 857 * drm_send_event_locked - send DRM event to file descriptor 858 * @dev: DRM device 859 * @e: DRM event to deliver 860 * 861 * This function sends the event @e, initialized with drm_event_reserve_init(), 862 * to its associated userspace DRM file. Callers must already hold 863 * &drm_device.event_lock, see drm_send_event() for the unlocked version. 864 * 865 * Note that the core will take care of unlinking and disarming events when the 866 * corresponding DRM file is closed. Drivers need not worry about whether the 867 * DRM file for this event still exists and can call this function upon 868 * completion of the asynchronous work unconditionally. 869 */ 870 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) 871 { 872 assert_spin_locked(&dev->event_lock); 873 874 if (e->completion) { 875 complete_all(e->completion); 876 e->completion_release(e->completion); 877 e->completion = NULL; 878 } 879 880 if (e->fence) { 881 dma_fence_signal(e->fence); 882 dma_fence_put(e->fence); 883 } 884 885 if (!e->file_priv) { 886 kfree(e); 887 return; 888 } 889 890 list_del(&e->pending_link); 891 list_add_tail(&e->link, 892 &e->file_priv->event_list); 893 wake_up_interruptible(&e->file_priv->event_wait); 894 #ifdef __DragonFly__ 895 KNOTE(&e->file_priv->dkq.ki_note, 0); 896 #endif 897 } 898 EXPORT_SYMBOL(drm_send_event_locked); 899 900 /** 901 * drm_send_event - send DRM event to file descriptor 902 * @dev: DRM device 903 * @e: DRM event to deliver 904 * 905 * This function sends the event @e, initialized with drm_event_reserve_init(), 906 * to its associated userspace DRM file. This function acquires 907 * &drm_device.event_lock, see drm_send_event_locked() for callers which already 908 * hold this lock. 909 * 910 * Note that the core will take care of unlinking and disarming events when the 911 * corresponding DRM file is closed. Drivers need not worry about whether the 912 * DRM file for this event still exists and can call this function upon 913 * completion of the asynchronous work unconditionally. 914 */ 915 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) 916 { 917 unsigned long irqflags; 918 919 spin_lock_irqsave(&dev->event_lock, irqflags); 920 drm_send_event_locked(dev, e); 921 spin_unlock_irqrestore(&dev->event_lock, irqflags); 922 } 923 EXPORT_SYMBOL(drm_send_event); 924