1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved. 3 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert 4 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer, 5 * All rights reserved. 6 * Copyright (c) 1982, 1986, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $DragonFly: src/sys/kern/kern_device.c,v 1.27 2007/07/23 18:59:50 dillon Exp $ 31 */ 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/systm.h> 37 #include <sys/module.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 #include <sys/bio.h> 41 #include <sys/buf.h> 42 #include <sys/vnode.h> 43 #include <sys/queue.h> 44 #include <sys/device.h> 45 #include <sys/tree.h> 46 #include <sys/syslink_rpc.h> 47 #include <sys/proc.h> 48 #include <machine/stdarg.h> 49 #include <sys/thread2.h> 50 51 /* 52 * system link descriptors identify the command in the 53 * arguments structure. 54 */ 55 #define DDESCNAME(name) __CONCAT(__CONCAT(dev_,name),_desc) 56 57 #define DEVOP_DESC_INIT(name) \ 58 struct syslink_desc DDESCNAME(name) = { \ 59 __offsetof(struct dev_ops, __CONCAT(d_, name)), \ 60 #name } 61 62 DEVOP_DESC_INIT(default); 63 DEVOP_DESC_INIT(open); 64 DEVOP_DESC_INIT(close); 65 DEVOP_DESC_INIT(read); 66 DEVOP_DESC_INIT(write); 67 DEVOP_DESC_INIT(ioctl); 68 DEVOP_DESC_INIT(dump); 69 DEVOP_DESC_INIT(psize); 70 DEVOP_DESC_INIT(poll); 71 DEVOP_DESC_INIT(mmap); 72 DEVOP_DESC_INIT(strategy); 73 DEVOP_DESC_INIT(kqfilter); 74 DEVOP_DESC_INIT(clone); 75 76 /* 77 * Misc default ops 78 */ 79 struct dev_ops dead_dev_ops; 80 81 struct dev_ops default_dev_ops = { 82 { "null" }, 83 .d_default = NULL, /* must be NULL */ 84 .d_open = noopen, 85 .d_close = noclose, 86 .d_read = noread, 87 .d_write = nowrite, 88 .d_ioctl = noioctl, 89 .d_poll = nopoll, 90 .d_mmap = nommap, 91 .d_strategy = nostrategy, 92 .d_dump = nodump, 93 .d_psize = nopsize, 94 .d_kqfilter = nokqfilter, 95 .d_clone = noclone 96 }; 97 98 /************************************************************************ 99 * GENERAL DEVICE API FUNCTIONS * 100 ************************************************************************/ 101 102 int 103 dev_dopen(cdev_t dev, int oflags, int devtype, struct ucred *cred) 104 { 105 struct dev_open_args ap; 106 107 ap.a_head.a_desc = &dev_open_desc; 108 ap.a_head.a_dev = dev; 109 ap.a_oflags = oflags; 110 ap.a_devtype = devtype; 111 ap.a_cred = cred; 112 return(dev->si_ops->d_open(&ap)); 113 } 114 115 int 116 dev_dclose(cdev_t dev, int fflag, int devtype) 117 { 118 struct dev_close_args ap; 119 120 ap.a_head.a_desc = &dev_close_desc; 121 ap.a_head.a_dev = dev; 122 ap.a_fflag = fflag; 123 ap.a_devtype = devtype; 124 return(dev->si_ops->d_close(&ap)); 125 } 126 127 int 128 dev_dread(cdev_t dev, struct uio *uio, int ioflag) 129 { 130 struct dev_read_args ap; 131 int error; 132 133 ap.a_head.a_desc = &dev_read_desc; 134 ap.a_head.a_dev = dev; 135 ap.a_uio = uio; 136 ap.a_ioflag = ioflag; 137 error = dev->si_ops->d_read(&ap); 138 if (error == 0) 139 dev->si_lastread = time_second; 140 return (error); 141 } 142 143 int 144 dev_dwrite(cdev_t dev, struct uio *uio, int ioflag) 145 { 146 struct dev_write_args ap; 147 int error; 148 149 dev->si_lastwrite = time_second; 150 ap.a_head.a_desc = &dev_write_desc; 151 ap.a_head.a_dev = dev; 152 ap.a_uio = uio; 153 ap.a_ioflag = ioflag; 154 error = dev->si_ops->d_write(&ap); 155 return (error); 156 } 157 158 int 159 dev_dioctl(cdev_t dev, u_long cmd, caddr_t data, int fflag, struct ucred *cred) 160 { 161 struct dev_ioctl_args ap; 162 163 ap.a_head.a_desc = &dev_ioctl_desc; 164 ap.a_head.a_dev = dev; 165 ap.a_cmd = cmd; 166 ap.a_data = data; 167 ap.a_fflag = fflag; 168 ap.a_cred = cred; 169 return(dev->si_ops->d_ioctl(&ap)); 170 } 171 172 int 173 dev_dpoll(cdev_t dev, int events) 174 { 175 struct dev_poll_args ap; 176 int error; 177 178 ap.a_head.a_desc = &dev_poll_desc; 179 ap.a_head.a_dev = dev; 180 ap.a_events = events; 181 error = dev->si_ops->d_poll(&ap); 182 if (error == 0) 183 return(ap.a_events); 184 return (seltrue(dev, events)); 185 } 186 187 int 188 dev_dmmap(cdev_t dev, vm_offset_t offset, int nprot) 189 { 190 struct dev_mmap_args ap; 191 int error; 192 193 ap.a_head.a_desc = &dev_mmap_desc; 194 ap.a_head.a_dev = dev; 195 ap.a_offset = offset; 196 ap.a_nprot = nprot; 197 error = dev->si_ops->d_mmap(&ap); 198 if (error == 0) 199 return(ap.a_result); 200 return(-1); 201 } 202 203 int 204 dev_dclone(cdev_t dev) 205 { 206 struct dev_clone_args ap; 207 208 ap.a_head.a_desc = &dev_clone_desc; 209 ap.a_head.a_dev = dev; 210 return (dev->si_ops->d_clone(&ap)); 211 } 212 213 /* 214 * Core device strategy call, used to issue I/O on a device. There are 215 * two versions, a non-chained version and a chained version. The chained 216 * version reuses a BIO set up by vn_strategy(). The only difference is 217 * that, for now, we do not push a new tracking structure when chaining 218 * from vn_strategy. XXX this will ultimately have to change. 219 */ 220 void 221 dev_dstrategy(cdev_t dev, struct bio *bio) 222 { 223 struct dev_strategy_args ap; 224 struct bio_track *track; 225 226 ap.a_head.a_desc = &dev_strategy_desc; 227 ap.a_head.a_dev = dev; 228 ap.a_bio = bio; 229 230 KKASSERT(bio->bio_track == NULL); 231 KKASSERT(bio->bio_buf->b_cmd != BUF_CMD_DONE); 232 if (bio->bio_buf->b_cmd == BUF_CMD_READ) 233 track = &dev->si_track_read; 234 else 235 track = &dev->si_track_write; 236 atomic_add_int(&track->bk_active, 1); 237 bio->bio_track = track; 238 (void)dev->si_ops->d_strategy(&ap); 239 } 240 241 void 242 dev_dstrategy_chain(cdev_t dev, struct bio *bio) 243 { 244 struct dev_strategy_args ap; 245 246 KKASSERT(bio->bio_track != NULL); 247 ap.a_head.a_desc = &dev_strategy_desc; 248 ap.a_head.a_dev = dev; 249 ap.a_bio = bio; 250 (void)dev->si_ops->d_strategy(&ap); 251 } 252 253 /* 254 * note: the disk layer is expected to set count, blkno, and secsize before 255 * forwarding the message. 256 */ 257 int 258 dev_ddump(cdev_t dev) 259 { 260 struct dev_dump_args ap; 261 262 ap.a_head.a_desc = &dev_dump_desc; 263 ap.a_head.a_dev = dev; 264 ap.a_count = 0; 265 ap.a_blkno = 0; 266 ap.a_secsize = 0; 267 return(dev->si_ops->d_dump(&ap)); 268 } 269 270 int64_t 271 dev_dpsize(cdev_t dev) 272 { 273 struct dev_psize_args ap; 274 int error; 275 276 ap.a_head.a_desc = &dev_psize_desc; 277 ap.a_head.a_dev = dev; 278 error = dev->si_ops->d_psize(&ap); 279 if (error == 0) 280 return (ap.a_result); 281 return(-1); 282 } 283 284 int 285 dev_dkqfilter(cdev_t dev, struct knote *kn) 286 { 287 struct dev_kqfilter_args ap; 288 int error; 289 290 ap.a_head.a_desc = &dev_kqfilter_desc; 291 ap.a_head.a_dev = dev; 292 ap.a_kn = kn; 293 error = dev->si_ops->d_kqfilter(&ap); 294 if (error == 0) 295 return(ap.a_result); 296 return(ENODEV); 297 } 298 299 /************************************************************************ 300 * DEVICE HELPER FUNCTIONS * 301 ************************************************************************/ 302 303 int 304 dev_drefs(cdev_t dev) 305 { 306 return(dev->si_sysref.refcnt); 307 } 308 309 const char * 310 dev_dname(cdev_t dev) 311 { 312 return(dev->si_ops->head.name); 313 } 314 315 int 316 dev_dflags(cdev_t dev) 317 { 318 return(dev->si_ops->head.flags); 319 } 320 321 int 322 dev_dmaj(cdev_t dev) 323 { 324 return(dev->si_ops->head.maj); 325 } 326 327 /* 328 * Used when forwarding a request through layers. The caller adjusts 329 * ap->a_head.a_dev and then calls this function. 330 */ 331 int 332 dev_doperate(struct dev_generic_args *ap) 333 { 334 int (*func)(struct dev_generic_args *); 335 336 func = *(void **)((char *)ap->a_dev->si_ops + ap->a_desc->sd_offset); 337 return (func(ap)); 338 } 339 340 /* 341 * Used by the console intercept code only. Issue an operation through 342 * a foreign ops structure allowing the ops structure associated 343 * with the device to remain intact. 344 */ 345 int 346 dev_doperate_ops(struct dev_ops *ops, struct dev_generic_args *ap) 347 { 348 int (*func)(struct dev_generic_args *); 349 350 func = *(void **)((char *)ops + ap->a_desc->sd_offset); 351 return (func(ap)); 352 } 353 354 /* 355 * Convert a template dev_ops into the real thing by filling in 356 * uninitialized fields. 357 */ 358 void 359 compile_dev_ops(struct dev_ops *ops) 360 { 361 int offset; 362 363 for (offset = offsetof(struct dev_ops, dev_ops_first_field); 364 offset <= offsetof(struct dev_ops, dev_ops_last_field); 365 offset += sizeof(void *) 366 ) { 367 void **func_p = (void **)((char *)ops + offset); 368 void **def_p = (void **)((char *)&default_dev_ops + offset); 369 if (*func_p == NULL) { 370 if (ops->d_default) 371 *func_p = ops->d_default; 372 else 373 *func_p = *def_p; 374 } 375 } 376 } 377 378 /************************************************************************ 379 * MAJOR/MINOR SPACE FUNCTION * 380 ************************************************************************/ 381 382 /* 383 * This makes a dev_ops entry visible to userland (e.g /dev/<blah>). 384 * 385 * The kernel can overload a data space by making multiple dev_ops_add() 386 * calls, but only the most recent one in the list matching the mask/match 387 * will be visible to userland. 388 * 389 * make_dev() does not automatically call dev_ops_add() (nor do we want it 390 * to, since partition-managed disk devices are overloaded on top of the 391 * raw device). 392 * 393 * Disk devices typically register their major, e.g. 'ad0', and then call 394 * into the disk label management code which overloads its own onto e.g. 'ad0' 395 * to support all the various slice and partition combinations. 396 * 397 * The mask/match supplied in this call are a full 32 bits and the same 398 * mask and match must be specified in a later dev_ops_remove() call to 399 * match this add. However, the match value for the minor number should never 400 * have any bits set in the major number's bit range (8-15). The mask value 401 * may be conveniently specified as -1 without creating any major number 402 * interference. 403 */ 404 405 static 406 int 407 rb_dev_ops_compare(struct dev_ops_maj *a, struct dev_ops_maj *b) 408 { 409 if (a->maj < b->maj) 410 return(-1); 411 else if (a->maj > b->maj) 412 return(1); 413 return(0); 414 } 415 416 RB_GENERATE2(dev_ops_rb_tree, dev_ops_maj, rbnode, rb_dev_ops_compare, int, maj); 417 418 struct dev_ops_rb_tree dev_ops_rbhead = RB_INITIALIZER(dev_ops_rbhead); 419 420 int 421 dev_ops_add(struct dev_ops *ops, u_int mask, u_int match) 422 { 423 static int next_maj = 256; /* first dynamic major number */ 424 struct dev_ops_maj *rbmaj; 425 struct dev_ops_link *link; 426 427 compile_dev_ops(ops); 428 if (ops->head.maj < 0) { 429 while (dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, next_maj) != NULL) { 430 if (++next_maj <= 0) 431 next_maj = 256; 432 } 433 ops->head.maj = next_maj; 434 } 435 rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, ops->head.maj); 436 if (rbmaj == NULL) { 437 rbmaj = kmalloc(sizeof(*rbmaj), M_DEVBUF, M_INTWAIT | M_ZERO); 438 rbmaj->maj = ops->head.maj; 439 dev_ops_rb_tree_RB_INSERT(&dev_ops_rbhead, rbmaj); 440 } 441 for (link = rbmaj->link; link; link = link->next) { 442 /* 443 * If we get an exact match we usurp the target, but we only print 444 * a warning message if a different device switch is installed. 445 */ 446 if (link->mask == mask && link->match == match) { 447 if (link->ops != ops) { 448 kprintf("WARNING: \"%s\" (%p) is usurping \"%s\"'s" 449 " (%p)\n", 450 ops->head.name, ops, 451 link->ops->head.name, link->ops); 452 link->ops = ops; 453 ++ops->head.refs; 454 } 455 return(0); 456 } 457 /* 458 * XXX add additional warnings for overlaps 459 */ 460 } 461 462 link = kmalloc(sizeof(struct dev_ops_link), M_DEVBUF, M_INTWAIT|M_ZERO); 463 link->mask = mask; 464 link->match = match; 465 link->ops = ops; 466 link->next = rbmaj->link; 467 rbmaj->link = link; 468 ++ops->head.refs; 469 return(0); 470 } 471 472 /* 473 * Should only be used by udev2dev(). 474 * 475 * If the minor number is -1, we match the first ops we find for this 476 * major. If the mask is not -1 then multiple minor numbers can match 477 * the same ops. 478 * 479 * Note that this function will return NULL if the minor number is not within 480 * the bounds of the installed mask(s). 481 * 482 * The specified minor number should NOT include any major bits. 483 */ 484 struct dev_ops * 485 dev_ops_get(int x, int y) 486 { 487 struct dev_ops_maj *rbmaj; 488 struct dev_ops_link *link; 489 490 rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, x); 491 if (rbmaj == NULL) 492 return(NULL); 493 for (link = rbmaj->link; link; link = link->next) { 494 if (y == -1 || (link->mask & y) == link->match) 495 return(link->ops); 496 } 497 return(NULL); 498 } 499 500 /* 501 * Take a cookie cutter to the major/minor device space for the passed 502 * device and generate a new dev_ops visible to userland which the caller 503 * can then modify. The original device is not modified but portions of 504 * its major/minor space will no longer be visible to userland. 505 */ 506 struct dev_ops * 507 dev_ops_add_override(cdev_t backing_dev, struct dev_ops *template, 508 u_int mask, u_int match) 509 { 510 struct dev_ops *ops; 511 struct dev_ops *backing_ops = backing_dev->si_ops; 512 513 ops = kmalloc(sizeof(struct dev_ops), M_DEVBUF, M_INTWAIT); 514 *ops = *template; 515 ops->head.name = backing_ops->head.name; 516 ops->head.maj = backing_ops->head.maj; 517 ops->head.flags |= backing_ops->head.flags & ~D_TRACKCLOSE; 518 compile_dev_ops(ops); 519 dev_ops_add(ops, mask, match); 520 521 return(ops); 522 } 523 524 void 525 dev_ops_remove_override(struct dev_ops *ops, u_int mask, u_int match) 526 { 527 dev_ops_remove(ops, mask, match); 528 if (ops->head.refs) { 529 kprintf("dev_ops_remove_override: %s still has %d refs!\n", 530 ops->head.name, ops->head.refs); 531 } else { 532 bzero(ops, sizeof(*ops)); 533 kfree(ops, M_DEVBUF); 534 } 535 } 536 537 /* 538 * Remove all matching dev_ops entries from the dev_ops_array[] major 539 * array so no new user opens can be performed, and destroy all devices 540 * installed in the hash table that are associated with this dev_ops. (see 541 * destroy_all_devs()). 542 * 543 * The mask and match should match a previous call to dev_ops_add*(). 544 */ 545 int 546 dev_ops_remove(struct dev_ops *ops, u_int mask, u_int match) 547 { 548 struct dev_ops_maj *rbmaj; 549 struct dev_ops_link *link; 550 struct dev_ops_link **plink; 551 552 if (ops != &dead_dev_ops) 553 destroy_all_devs(ops, mask, match); 554 555 rbmaj = dev_ops_rb_tree_RB_LOOKUP(&dev_ops_rbhead, ops->head.maj); 556 if (rbmaj == NULL) { 557 kprintf("double-remove of dev_ops %p for %s(%d)\n", 558 ops, ops->head.name, ops->head.maj); 559 return(0); 560 } 561 for (plink = &rbmaj->link; (link = *plink) != NULL; 562 plink = &link->next) { 563 if (link->mask == mask && link->match == match) { 564 if (link->ops == ops) 565 break; 566 kprintf("%s: ERROR: cannot remove dev_ops, " 567 "its major number %d was stolen by %s\n", 568 ops->head.name, ops->head.maj, 569 link->ops->head.name 570 ); 571 } 572 } 573 if (link == NULL) { 574 kprintf("%s(%d)[%08x/%08x]: WARNING: ops removed " 575 "multiple times!\n", 576 ops->head.name, ops->head.maj, mask, match); 577 } else { 578 *plink = link->next; 579 --ops->head.refs; /* XXX ops_release() / record refs */ 580 kfree(link, M_DEVBUF); 581 } 582 583 /* 584 * Scrap the RB tree node for the major number if no ops are 585 * installed any longer. 586 */ 587 if (rbmaj->link == NULL) { 588 dev_ops_rb_tree_RB_REMOVE(&dev_ops_rbhead, rbmaj); 589 kfree(rbmaj, M_DEVBUF); 590 } 591 592 #if 0 593 /* 594 * The same ops might be used with multiple devices, so don't 595 * complain if the ref count is non-zero. 596 */ 597 if (ops->head.refs != 0) { 598 kprintf("%s(%d)[%08x/%08x]: Warning: dev_ops_remove() called " 599 "while %d device refs still exist!\n", 600 ops->head.name, ops->head.maj, mask, match, 601 ops->head.refs); 602 } else { 603 if (bootverbose) 604 kprintf("%s: ops removed\n", ops->head.name); 605 } 606 #endif 607 return 0; 608 } 609 610 /* 611 * dev_ops_scan() - Issue a callback for all installed dev_ops structures. 612 * 613 * The scan will terminate if a callback returns a negative number. 614 */ 615 struct dev_ops_scan_info { 616 int (*callback)(struct dev_ops *, void *); 617 void *arg; 618 }; 619 620 static 621 int 622 dev_ops_scan_callback(struct dev_ops_maj *rbmaj, void *arg) 623 { 624 struct dev_ops_scan_info *info = arg; 625 struct dev_ops_link *link; 626 int count = 0; 627 int r; 628 629 for (link = rbmaj->link; link; link = link->next) { 630 r = info->callback(link->ops, info->arg); 631 if (r < 0) 632 return(r); 633 count += r; 634 } 635 return(count); 636 } 637 638 int 639 dev_ops_scan(int (*callback)(struct dev_ops *, void *), void *arg) 640 { 641 struct dev_ops_scan_info info = { callback, arg }; 642 643 return (dev_ops_rb_tree_RB_SCAN(&dev_ops_rbhead, NULL, 644 dev_ops_scan_callback, &info)); 645 } 646 647 648 /* 649 * Release a ops entry. When the ref count reaches zero, recurse 650 * through the stack. 651 */ 652 void 653 dev_ops_release(struct dev_ops *ops) 654 { 655 --ops->head.refs; 656 if (ops->head.refs == 0) { 657 /* XXX */ 658 } 659 } 660 661 struct dev_ops * 662 dev_ops_intercept(cdev_t dev, struct dev_ops *iops) 663 { 664 struct dev_ops *oops = dev->si_ops; 665 666 compile_dev_ops(iops); 667 iops->head.maj = oops->head.maj; 668 iops->head.data = oops->head.data; 669 iops->head.flags = oops->head.flags; 670 dev->si_ops = iops; 671 dev->si_flags |= SI_INTERCEPTED; 672 673 return (oops); 674 } 675 676 void 677 dev_ops_restore(cdev_t dev, struct dev_ops *oops) 678 { 679 struct dev_ops *iops = dev->si_ops; 680 681 dev->si_ops = oops; 682 dev->si_flags &= ~SI_INTERCEPTED; 683 iops->head.maj = 0; 684 iops->head.data = NULL; 685 iops->head.flags = 0; 686 } 687 688 /************************************************************************ 689 * DEFAULT DEV OPS FUNCTIONS * 690 ************************************************************************/ 691 692 693 /* 694 * Unsupported devswitch functions (e.g. for writing to read-only device). 695 * XXX may belong elsewhere. 696 */ 697 698 int 699 noclone(struct dev_clone_args *ap) 700 { 701 /* take no action */ 702 return (0); /* allow the clone */ 703 } 704 705 int 706 noopen(struct dev_open_args *ap) 707 { 708 return (ENODEV); 709 } 710 711 int 712 noclose(struct dev_close_args *ap) 713 { 714 return (ENODEV); 715 } 716 717 int 718 noread(struct dev_read_args *ap) 719 { 720 return (ENODEV); 721 } 722 723 int 724 nowrite(struct dev_write_args *ap) 725 { 726 return (ENODEV); 727 } 728 729 int 730 noioctl(struct dev_ioctl_args *ap) 731 { 732 return (ENODEV); 733 } 734 735 int 736 nokqfilter(struct dev_kqfilter_args *ap) 737 { 738 return (ENODEV); 739 } 740 741 int 742 nommap(struct dev_mmap_args *ap) 743 { 744 return (ENODEV); 745 } 746 747 int 748 nopoll(struct dev_poll_args *ap) 749 { 750 ap->a_events = 0; 751 return(0); 752 } 753 754 int 755 nostrategy(struct dev_strategy_args *ap) 756 { 757 struct bio *bio = ap->a_bio; 758 759 bio->bio_buf->b_flags |= B_ERROR; 760 bio->bio_buf->b_error = EOPNOTSUPP; 761 biodone(bio); 762 return(0); 763 } 764 765 int 766 nopsize(struct dev_psize_args *ap) 767 { 768 ap->a_result = 0; 769 return(0); 770 } 771 772 int 773 nodump(struct dev_dump_args *ap) 774 { 775 return (ENODEV); 776 } 777 778 /* 779 * XXX this is probably bogus. Any device that uses it isn't checking the 780 * minor number. 781 */ 782 int 783 nullopen(struct dev_open_args *ap) 784 { 785 return (0); 786 } 787 788 int 789 nullclose(struct dev_close_args *ap) 790 { 791 return (0); 792 } 793 794