1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/mount.h> 38 #include <sys/vnode.h> 39 #include <sys/types.h> 40 #include <sys/lock.h> 41 #include <sys/msgport.h> 42 #include <sys/sysctl.h> 43 #include <sys/ucred.h> 44 #include <sys/devfs.h> 45 #include <sys/devfs_rules.h> 46 #include <sys/udev.h> 47 48 #include <sys/msgport2.h> 49 #include <sys/spinlock2.h> 50 #include <sys/mplock2.h> 51 #include <sys/sysref2.h> 52 53 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 54 DEVFS_DECLARE_CLONE_BITMAP(ops_id); 55 /* 56 * SYSREF Integration - reference counting, allocation, 57 * sysid and syslink integration. 58 */ 59 static void devfs_cdev_terminate(cdev_t dev); 60 static void devfs_cdev_lock(cdev_t dev); 61 static void devfs_cdev_unlock(cdev_t dev); 62 static struct sysref_class cdev_sysref_class = { 63 .name = "cdev", 64 .mtype = M_DEVFS, 65 .proto = SYSREF_PROTO_DEV, 66 .offset = offsetof(struct cdev, si_sysref), 67 .objsize = sizeof(struct cdev), 68 .nom_cache = 32, 69 .flags = 0, 70 .ops = { 71 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate, 72 .lock = (sysref_lock_func_t)devfs_cdev_lock, 73 .unlock = (sysref_unlock_func_t)devfs_cdev_unlock 74 } 75 }; 76 77 static struct objcache *devfs_node_cache; 78 static struct objcache *devfs_msg_cache; 79 static struct objcache *devfs_dev_cache; 80 81 static struct objcache_malloc_args devfs_node_malloc_args = { 82 sizeof(struct devfs_node), M_DEVFS }; 83 struct objcache_malloc_args devfs_msg_malloc_args = { 84 sizeof(struct devfs_msg), M_DEVFS }; 85 struct objcache_malloc_args devfs_dev_malloc_args = { 86 sizeof(struct cdev), M_DEVFS }; 87 88 static struct devfs_dev_head devfs_dev_list = 89 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 90 static struct devfs_mnt_head devfs_mnt_list = 91 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 92 static struct devfs_chandler_head devfs_chandler_list = 93 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 94 static struct devfs_alias_head devfs_alias_list = 95 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 96 static struct devfs_dev_ops_head devfs_dev_ops_list = 97 TAILQ_HEAD_INITIALIZER(devfs_dev_ops_list); 98 99 struct lock devfs_lock; 100 static struct lwkt_port devfs_dispose_port; 101 static struct lwkt_port devfs_msg_port; 102 static struct thread *td_core; 103 104 static struct spinlock ino_lock; 105 static ino_t d_ino; 106 static int devfs_debug_enable; 107 static int devfs_run; 108 109 static ino_t devfs_fetch_ino(void); 110 static int devfs_create_all_dev_worker(struct devfs_node *); 111 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 112 static int devfs_destroy_dev_worker(cdev_t); 113 static int devfs_destroy_related_worker(cdev_t); 114 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 115 static int devfs_propagate_dev(cdev_t, int); 116 static int devfs_unlink_dev(cdev_t dev); 117 static void devfs_msg_exec(devfs_msg_t msg); 118 119 static int devfs_chandler_add_worker(const char *, d_clone_t *); 120 static int devfs_chandler_del_worker(const char *); 121 122 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 123 static void devfs_msg_core(void *); 124 125 static int devfs_find_device_by_name_worker(devfs_msg_t); 126 static int devfs_find_device_by_udev_worker(devfs_msg_t); 127 128 static int devfs_apply_reset_rules_caller(char *, int); 129 130 static int devfs_scan_callback_worker(devfs_scan_t *, void *); 131 132 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 133 char *, size_t, int); 134 135 static int devfs_make_alias_worker(struct devfs_alias *); 136 static int devfs_destroy_alias_worker(struct devfs_alias *); 137 static int devfs_alias_remove(cdev_t); 138 static int devfs_alias_reap(void); 139 static int devfs_alias_propagate(struct devfs_alias *, int); 140 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 141 static int devfs_alias_check_create(struct devfs_node *); 142 143 static int devfs_clr_related_flag_worker(cdev_t, uint32_t); 144 static int devfs_destroy_related_without_flag_worker(cdev_t, uint32_t); 145 146 static void *devfs_reaperp_callback(struct devfs_node *, void *); 147 static void *devfs_gc_dirs_callback(struct devfs_node *, void *); 148 static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *); 149 static void * 150 devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *); 151 152 /* 153 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 154 * using kvprintf 155 */ 156 int 157 devfs_debug(int level, char *fmt, ...) 158 { 159 __va_list ap; 160 161 __va_start(ap, fmt); 162 if (level <= devfs_debug_enable) 163 kvprintf(fmt, ap); 164 __va_end(ap); 165 166 return 0; 167 } 168 169 /* 170 * devfs_allocp() Allocates a new devfs node with the specified 171 * parameters. The node is also automatically linked into the topology 172 * if a parent is specified. It also calls the rule and alias stuff to 173 * be applied on the new node 174 */ 175 struct devfs_node * 176 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 177 struct devfs_node *parent, struct mount *mp, cdev_t dev) 178 { 179 struct devfs_node *node = NULL; 180 size_t namlen = strlen(name); 181 182 node = objcache_get(devfs_node_cache, M_WAITOK); 183 bzero(node, sizeof(*node)); 184 185 atomic_add_long(&DEVFS_MNTDATA(mp)->leak_count, 1); 186 187 node->d_dev = NULL; 188 node->nchildren = 1; 189 node->mp = mp; 190 node->d_dir.d_ino = devfs_fetch_ino(); 191 192 /* 193 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 194 * respectively. 195 */ 196 node->cookie_jar = 2; 197 198 /* 199 * Access Control members 200 */ 201 node->mode = DEVFS_DEFAULT_MODE; 202 node->uid = DEVFS_DEFAULT_UID; 203 node->gid = DEVFS_DEFAULT_GID; 204 205 switch (devfsnodetype) { 206 case Nroot: 207 /* 208 * Ensure that we don't recycle the root vnode by marking it as 209 * linked into the topology. 210 */ 211 node->flags |= DEVFS_NODE_LINKED; 212 case Ndir: 213 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 214 node->d_dir.d_type = DT_DIR; 215 node->nchildren = 2; 216 break; 217 218 case Nlink: 219 node->d_dir.d_type = DT_LNK; 220 break; 221 222 case Nreg: 223 node->d_dir.d_type = DT_REG; 224 break; 225 226 case Ndev: 227 if (dev != NULL) { 228 node->d_dir.d_type = DT_CHR; 229 node->d_dev = dev; 230 231 node->mode = dev->si_perms; 232 node->uid = dev->si_uid; 233 node->gid = dev->si_gid; 234 235 devfs_alias_check_create(node); 236 } 237 break; 238 239 default: 240 panic("devfs_allocp: unknown node type"); 241 } 242 243 node->v_node = NULL; 244 node->node_type = devfsnodetype; 245 246 /* Initialize the dirent structure of each devfs vnode */ 247 node->d_dir.d_namlen = namlen; 248 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 249 memcpy(node->d_dir.d_name, name, namlen); 250 node->d_dir.d_name[namlen] = '\0'; 251 252 /* Initialize the parent node element */ 253 node->parent = parent; 254 255 /* Initialize *time members */ 256 nanotime(&node->atime); 257 node->mtime = node->ctime = node->atime; 258 259 /* 260 * Associate with parent as last step, clean out namecache 261 * reference. 262 */ 263 if ((parent != NULL) && 264 ((parent->node_type == Nroot) || (parent->node_type == Ndir))) { 265 parent->nchildren++; 266 node->cookie = parent->cookie_jar++; 267 node->flags |= DEVFS_NODE_LINKED; 268 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 269 270 /* This forces negative namecache lookups to clear */ 271 ++mp->mnt_namecache_gen; 272 } 273 274 /* Apply rules */ 275 devfs_rule_check_apply(node, NULL); 276 277 atomic_add_long(&DEVFS_MNTDATA(mp)->file_count, 1); 278 279 return node; 280 } 281 282 /* 283 * devfs_allocv() allocates a new vnode based on a devfs node. 284 */ 285 int 286 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 287 { 288 struct vnode *vp; 289 int error = 0; 290 291 KKASSERT(node); 292 293 /* 294 * devfs master lock must not be held across a vget() call, we have 295 * to hold our ad-hoc vp to avoid a free race from destroying the 296 * contents of the structure. The vget() will interlock recycles 297 * for us. 298 */ 299 try_again: 300 while ((vp = node->v_node) != NULL) { 301 vhold(vp); 302 lockmgr(&devfs_lock, LK_RELEASE); 303 error = vget(vp, LK_EXCLUSIVE); 304 vdrop(vp); 305 lockmgr(&devfs_lock, LK_EXCLUSIVE); 306 if (error == 0) { 307 *vpp = vp; 308 goto out; 309 } 310 if (error != ENOENT) { 311 *vpp = NULL; 312 goto out; 313 } 314 } 315 316 /* 317 * devfs master lock must not be held across a getnewvnode() call. 318 */ 319 lockmgr(&devfs_lock, LK_RELEASE); 320 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) { 321 lockmgr(&devfs_lock, LK_EXCLUSIVE); 322 goto out; 323 } 324 lockmgr(&devfs_lock, LK_EXCLUSIVE); 325 326 vp = *vpp; 327 328 if (node->v_node != NULL) { 329 vp->v_type = VBAD; 330 vx_put(vp); 331 goto try_again; 332 } 333 334 vp->v_data = node; 335 node->v_node = vp; 336 337 switch (node->node_type) { 338 case Nroot: 339 vsetflags(vp, VROOT); 340 /* fall through */ 341 case Ndir: 342 vp->v_type = VDIR; 343 break; 344 345 case Nlink: 346 vp->v_type = VLNK; 347 break; 348 349 case Nreg: 350 vp->v_type = VREG; 351 break; 352 353 case Ndev: 354 vp->v_type = VCHR; 355 KKASSERT(node->d_dev); 356 357 vp->v_uminor = node->d_dev->si_uminor; 358 vp->v_umajor = node->d_dev->si_umajor; 359 360 v_associate_rdev(vp, node->d_dev); 361 vp->v_ops = &node->mp->mnt_vn_spec_ops; 362 break; 363 364 default: 365 panic("devfs_allocv: unknown node type"); 366 } 367 368 out: 369 return error; 370 } 371 372 /* 373 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 374 * based on the newly created devfs node. 375 */ 376 int 377 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 378 char *name, struct devfs_node *parent, cdev_t dev) 379 { 380 struct devfs_node *node; 381 382 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 383 384 if (node != NULL) 385 devfs_allocv(vpp, node); 386 else 387 *vpp = NULL; 388 389 return 0; 390 } 391 392 /* 393 * Destroy the devfs_node. The node must be unlinked from the topology. 394 * 395 * This function will also destroy any vnode association with the node 396 * and device. 397 * 398 * The cdev_t itself remains intact. 399 * 400 * The core lock is not necessarily held on call and must be temporarily 401 * released if it is to avoid a deadlock. 402 */ 403 int 404 devfs_freep(struct devfs_node *node) 405 { 406 struct vnode *vp; 407 int relock; 408 409 KKASSERT(node); 410 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || 411 (node->node_type == Nroot)); 412 413 /* 414 * Protect against double frees 415 */ 416 KKASSERT((node->flags & DEVFS_DESTROYED) == 0); 417 node->flags |= DEVFS_DESTROYED; 418 419 /* 420 * Avoid deadlocks between devfs_lock and the vnode lock when 421 * disassociating the vnode (stress2 pty vs ls -la /dev/pts). 422 * 423 * This also prevents the vnode reclaim code from double-freeing 424 * the node. The vget() is required to safely modified the vp 425 * and cycle the refs to terminate an inactive vp. 426 */ 427 if (lockstatus(&devfs_lock, curthread) == LK_EXCLUSIVE) { 428 lockmgr(&devfs_lock, LK_RELEASE); 429 relock = 1; 430 } else { 431 relock = 0; 432 } 433 434 while ((vp = node->v_node) != NULL) { 435 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0) 436 break; 437 v_release_rdev(vp); 438 vp->v_data = NULL; 439 node->v_node = NULL; 440 cache_inval_vp(vp, CINV_DESTROY); 441 vput(vp); 442 } 443 444 /* 445 * Remaining cleanup 446 */ 447 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->leak_count, 1); 448 if (node->symlink_name) { 449 kfree(node->symlink_name, M_DEVFS); 450 node->symlink_name = NULL; 451 } 452 453 /* 454 * Remove the node from the orphan list if it is still on it. 455 */ 456 if (node->flags & DEVFS_ORPHANED) 457 devfs_tracer_del_orphan(node); 458 459 if (node->d_dir.d_name) { 460 kfree(node->d_dir.d_name, M_DEVFS); 461 node->d_dir.d_name = NULL; 462 } 463 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->file_count, 1); 464 objcache_put(devfs_node_cache, node); 465 466 if (relock) 467 lockmgr(&devfs_lock, LK_EXCLUSIVE); 468 469 return 0; 470 } 471 472 /* 473 * Unlink the devfs node from the topology and add it to the orphan list. 474 * The node will later be destroyed by freep. 475 * 476 * Any vnode association, including the v_rdev and v_data, remains intact 477 * until the freep. 478 */ 479 int 480 devfs_unlinkp(struct devfs_node *node) 481 { 482 struct devfs_node *parent; 483 KKASSERT(node); 484 485 /* 486 * Add the node to the orphan list, so it is referenced somewhere, to 487 * so we don't leak it. 488 */ 489 devfs_tracer_add_orphan(node); 490 491 parent = node->parent; 492 493 /* 494 * If the parent is known we can unlink the node out of the topology 495 */ 496 if (parent) { 497 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 498 parent->nchildren--; 499 node->flags &= ~DEVFS_NODE_LINKED; 500 } 501 502 node->parent = NULL; 503 return 0; 504 } 505 506 void * 507 devfs_iterate_topology(struct devfs_node *node, 508 devfs_iterate_callback_t *callback, void *arg1) 509 { 510 struct devfs_node *node1, *node2; 511 void *ret = NULL; 512 513 if ((node->node_type == Nroot) || (node->node_type == Ndir)) { 514 if (node->nchildren > 2) { 515 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 516 link, node2) { 517 if ((ret = devfs_iterate_topology(node1, callback, arg1))) 518 return ret; 519 } 520 } 521 } 522 523 ret = callback(node, arg1); 524 return ret; 525 } 526 527 /* 528 * devfs_reaperp() is a recursive function that iterates through all the 529 * topology, unlinking and freeing all devfs nodes. 530 */ 531 static void * 532 devfs_reaperp_callback(struct devfs_node *node, void *unused) 533 { 534 devfs_unlinkp(node); 535 devfs_freep(node); 536 537 return NULL; 538 } 539 540 static void * 541 devfs_gc_dirs_callback(struct devfs_node *node, void *unused) 542 { 543 if (node->node_type == Ndir) { 544 if ((node->nchildren == 2) && 545 !(node->flags & DEVFS_USER_CREATED)) { 546 devfs_unlinkp(node); 547 devfs_freep(node); 548 } 549 } 550 551 return NULL; 552 } 553 554 static void * 555 devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target) 556 { 557 if ((node->node_type == Nlink) && (node->link_target == target)) { 558 devfs_unlinkp(node); 559 devfs_freep(node); 560 } 561 562 return NULL; 563 } 564 565 /* 566 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 567 * freeing a node, but also removes empty directories and links that link 568 * via devfs auto-link mechanism to the node being deleted. 569 */ 570 int 571 devfs_gc(struct devfs_node *node) 572 { 573 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 574 575 if (node->nlinks > 0) 576 devfs_iterate_topology(root_node, 577 (devfs_iterate_callback_t *)devfs_gc_links_callback, node); 578 579 devfs_unlinkp(node); 580 devfs_iterate_topology(root_node, 581 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL); 582 583 devfs_freep(node); 584 585 return 0; 586 } 587 588 /* 589 * devfs_create_dev() is the asynchronous entry point for device creation. 590 * It just sends a message with the relevant details to the devfs core. 591 * 592 * This function will reference the passed device. The reference is owned 593 * by devfs and represents all of the device's node associations. 594 */ 595 int 596 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 597 { 598 reference_dev(dev); 599 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 600 601 return 0; 602 } 603 604 /* 605 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 606 * It just sends a message with the relevant details to the devfs core. 607 */ 608 int 609 devfs_destroy_dev(cdev_t dev) 610 { 611 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 612 return 0; 613 } 614 615 /* 616 * devfs_mount_add() is the synchronous entry point for adding a new devfs 617 * mount. It sends a synchronous message with the relevant details to the 618 * devfs core. 619 */ 620 int 621 devfs_mount_add(struct devfs_mnt_data *mnt) 622 { 623 devfs_msg_t msg; 624 625 msg = devfs_msg_get(); 626 msg->mdv_mnt = mnt; 627 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 628 devfs_msg_put(msg); 629 630 return 0; 631 } 632 633 /* 634 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 635 * It sends a synchronous message with the relevant details to the devfs core. 636 */ 637 int 638 devfs_mount_del(struct devfs_mnt_data *mnt) 639 { 640 devfs_msg_t msg; 641 642 msg = devfs_msg_get(); 643 msg->mdv_mnt = mnt; 644 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 645 devfs_msg_put(msg); 646 647 return 0; 648 } 649 650 /* 651 * devfs_destroy_related() is the synchronous entry point for device 652 * destruction by subname. It just sends a message with the relevant details to 653 * the devfs core. 654 */ 655 int 656 devfs_destroy_related(cdev_t dev) 657 { 658 devfs_msg_t msg; 659 660 msg = devfs_msg_get(); 661 msg->mdv_load = dev; 662 msg = devfs_msg_send_sync(DEVFS_DESTROY_RELATED, msg); 663 devfs_msg_put(msg); 664 return 0; 665 } 666 667 int 668 devfs_clr_related_flag(cdev_t dev, uint32_t flag) 669 { 670 devfs_msg_t msg; 671 672 msg = devfs_msg_get(); 673 msg->mdv_flags.dev = dev; 674 msg->mdv_flags.flag = flag; 675 msg = devfs_msg_send_sync(DEVFS_CLR_RELATED_FLAG, msg); 676 devfs_msg_put(msg); 677 678 return 0; 679 } 680 681 int 682 devfs_destroy_related_without_flag(cdev_t dev, uint32_t flag) 683 { 684 devfs_msg_t msg; 685 686 msg = devfs_msg_get(); 687 msg->mdv_flags.dev = dev; 688 msg->mdv_flags.flag = flag; 689 msg = devfs_msg_send_sync(DEVFS_DESTROY_RELATED_WO_FLAG, msg); 690 devfs_msg_put(msg); 691 692 return 0; 693 } 694 695 /* 696 * devfs_create_all_dev is the asynchronous entry point to trigger device 697 * node creation. It just sends a message with the relevant details to 698 * the devfs core. 699 */ 700 int 701 devfs_create_all_dev(struct devfs_node *root) 702 { 703 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 704 return 0; 705 } 706 707 /* 708 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 709 * devices with a specific set of dev_ops and minor. It just sends a 710 * message with the relevant details to the devfs core. 711 */ 712 int 713 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 714 { 715 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 716 return 0; 717 } 718 719 /* 720 * devfs_clone_handler_add is the synchronous entry point to add a new 721 * clone handler. It just sends a message with the relevant details to 722 * the devfs core. 723 */ 724 int 725 devfs_clone_handler_add(const char *name, d_clone_t *nhandler) 726 { 727 devfs_msg_t msg; 728 729 msg = devfs_msg_get(); 730 msg->mdv_chandler.name = name; 731 msg->mdv_chandler.nhandler = nhandler; 732 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 733 devfs_msg_put(msg); 734 return 0; 735 } 736 737 /* 738 * devfs_clone_handler_del is the synchronous entry point to remove a 739 * clone handler. It just sends a message with the relevant details to 740 * the devfs core. 741 */ 742 int 743 devfs_clone_handler_del(const char *name) 744 { 745 devfs_msg_t msg; 746 747 msg = devfs_msg_get(); 748 msg->mdv_chandler.name = name; 749 msg->mdv_chandler.nhandler = NULL; 750 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 751 devfs_msg_put(msg); 752 return 0; 753 } 754 755 /* 756 * devfs_find_device_by_name is the synchronous entry point to find a 757 * device given its name. It sends a synchronous message with the 758 * relevant details to the devfs core and returns the answer. 759 */ 760 cdev_t 761 devfs_find_device_by_name(const char *fmt, ...) 762 { 763 cdev_t found = NULL; 764 devfs_msg_t msg; 765 char *target; 766 __va_list ap; 767 768 if (fmt == NULL) 769 return NULL; 770 771 __va_start(ap, fmt); 772 kvasnrprintf(&target, PATH_MAX, 10, fmt, ap); 773 __va_end(ap); 774 775 msg = devfs_msg_get(); 776 msg->mdv_name = target; 777 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 778 found = msg->mdv_cdev; 779 devfs_msg_put(msg); 780 kvasfree(&target); 781 782 return found; 783 } 784 785 /* 786 * devfs_find_device_by_udev is the synchronous entry point to find a 787 * device given its udev number. It sends a synchronous message with 788 * the relevant details to the devfs core and returns the answer. 789 */ 790 cdev_t 791 devfs_find_device_by_udev(udev_t udev) 792 { 793 cdev_t found = NULL; 794 devfs_msg_t msg; 795 796 msg = devfs_msg_get(); 797 msg->mdv_udev = udev; 798 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 799 found = msg->mdv_cdev; 800 devfs_msg_put(msg); 801 802 devfs_debug(DEVFS_DEBUG_DEBUG, 803 "devfs_find_device_by_udev found? %s -end:3-\n", 804 ((found) ? found->si_name:"NO")); 805 return found; 806 } 807 808 struct vnode * 809 devfs_inode_to_vnode(struct mount *mp, ino_t target) 810 { 811 struct vnode *vp = NULL; 812 devfs_msg_t msg; 813 814 if (mp == NULL) 815 return NULL; 816 817 msg = devfs_msg_get(); 818 msg->mdv_ino.mp = mp; 819 msg->mdv_ino.ino = target; 820 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 821 vp = msg->mdv_ino.vp; 822 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 823 devfs_msg_put(msg); 824 825 return vp; 826 } 827 828 /* 829 * devfs_make_alias is the asynchronous entry point to register an alias 830 * for a device. It just sends a message with the relevant details to the 831 * devfs core. 832 */ 833 int 834 devfs_make_alias(const char *name, cdev_t dev_target) 835 { 836 struct devfs_alias *alias; 837 size_t len; 838 839 len = strlen(name); 840 841 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 842 alias->name = kstrdup(name, M_DEVFS); 843 alias->namlen = len; 844 alias->dev_target = dev_target; 845 846 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 847 return 0; 848 } 849 850 /* 851 * devfs_destroy_alias is the asynchronous entry point to deregister an alias 852 * for a device. It just sends a message with the relevant details to the 853 * devfs core. 854 */ 855 int 856 devfs_destroy_alias(const char *name, cdev_t dev_target) 857 { 858 struct devfs_alias *alias; 859 size_t len; 860 861 len = strlen(name); 862 863 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 864 alias->name = kstrdup(name, M_DEVFS); 865 alias->namlen = len; 866 alias->dev_target = dev_target; 867 868 devfs_msg_send_generic(DEVFS_DESTROY_ALIAS, alias); 869 return 0; 870 } 871 872 /* 873 * devfs_apply_rules is the asynchronous entry point to trigger application 874 * of all rules. It just sends a message with the relevant details to the 875 * devfs core. 876 */ 877 int 878 devfs_apply_rules(char *mntto) 879 { 880 char *new_name; 881 882 new_name = kstrdup(mntto, M_DEVFS); 883 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 884 885 return 0; 886 } 887 888 /* 889 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 890 * rules. It just sends a message with the relevant details to the devfs core. 891 */ 892 int 893 devfs_reset_rules(char *mntto) 894 { 895 char *new_name; 896 897 new_name = kstrdup(mntto, M_DEVFS); 898 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 899 900 return 0; 901 } 902 903 904 /* 905 * devfs_scan_callback is the asynchronous entry point to call a callback 906 * on all cdevs. 907 * It just sends a message with the relevant details to the devfs core. 908 */ 909 int 910 devfs_scan_callback(devfs_scan_t *callback, void *arg) 911 { 912 devfs_msg_t msg; 913 914 KKASSERT(callback); 915 916 msg = devfs_msg_get(); 917 msg->mdv_load = callback; 918 msg->mdv_load2 = arg; 919 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 920 devfs_msg_put(msg); 921 922 return 0; 923 } 924 925 926 /* 927 * Acts as a message drain. Any message that is replied to here gets destroyed 928 * and the memory freed. 929 */ 930 static void 931 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 932 { 933 devfs_msg_put((devfs_msg_t)msg); 934 } 935 936 /* 937 * devfs_msg_get allocates a new devfs msg and returns it. 938 */ 939 devfs_msg_t 940 devfs_msg_get(void) 941 { 942 return objcache_get(devfs_msg_cache, M_WAITOK); 943 } 944 945 /* 946 * devfs_msg_put deallocates a given devfs msg. 947 */ 948 int 949 devfs_msg_put(devfs_msg_t msg) 950 { 951 objcache_put(devfs_msg_cache, msg); 952 return 0; 953 } 954 955 /* 956 * devfs_msg_send is the generic asynchronous message sending facility 957 * for devfs. By default the reply port is the automatic disposal port. 958 * 959 * If the current thread is the devfs_msg_port thread we execute the 960 * operation synchronously. 961 */ 962 void 963 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 964 { 965 lwkt_port_t port = &devfs_msg_port; 966 967 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 968 969 devfs_msg->hdr.u.ms_result = cmd; 970 971 if (port->mpu_td == curthread) { 972 devfs_msg_exec(devfs_msg); 973 lwkt_replymsg(&devfs_msg->hdr, 0); 974 } else { 975 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 976 } 977 } 978 979 /* 980 * devfs_msg_send_sync is the generic synchronous message sending 981 * facility for devfs. It initializes a local reply port and waits 982 * for the core's answer. This answer is then returned. 983 */ 984 devfs_msg_t 985 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 986 { 987 struct lwkt_port rep_port; 988 devfs_msg_t msg_incoming; 989 lwkt_port_t port = &devfs_msg_port; 990 991 lwkt_initport_thread(&rep_port, curthread); 992 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 993 994 devfs_msg->hdr.u.ms_result = cmd; 995 996 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 997 msg_incoming = lwkt_waitport(&rep_port, 0); 998 999 return msg_incoming; 1000 } 1001 1002 /* 1003 * sends a message with a generic argument. 1004 */ 1005 void 1006 devfs_msg_send_generic(uint32_t cmd, void *load) 1007 { 1008 devfs_msg_t devfs_msg = devfs_msg_get(); 1009 1010 devfs_msg->mdv_load = load; 1011 devfs_msg_send(cmd, devfs_msg); 1012 } 1013 1014 /* 1015 * sends a message with a name argument. 1016 */ 1017 void 1018 devfs_msg_send_name(uint32_t cmd, char *name) 1019 { 1020 devfs_msg_t devfs_msg = devfs_msg_get(); 1021 1022 devfs_msg->mdv_name = name; 1023 devfs_msg_send(cmd, devfs_msg); 1024 } 1025 1026 /* 1027 * sends a message with a mount argument. 1028 */ 1029 void 1030 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 1031 { 1032 devfs_msg_t devfs_msg = devfs_msg_get(); 1033 1034 devfs_msg->mdv_mnt = mnt; 1035 devfs_msg_send(cmd, devfs_msg); 1036 } 1037 1038 /* 1039 * sends a message with an ops argument. 1040 */ 1041 void 1042 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 1043 { 1044 devfs_msg_t devfs_msg = devfs_msg_get(); 1045 1046 devfs_msg->mdv_ops.ops = ops; 1047 devfs_msg->mdv_ops.minor = minor; 1048 devfs_msg_send(cmd, devfs_msg); 1049 } 1050 1051 /* 1052 * sends a message with a clone handler argument. 1053 */ 1054 void 1055 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 1056 { 1057 devfs_msg_t devfs_msg = devfs_msg_get(); 1058 1059 devfs_msg->mdv_chandler.name = name; 1060 devfs_msg->mdv_chandler.nhandler = handler; 1061 devfs_msg_send(cmd, devfs_msg); 1062 } 1063 1064 /* 1065 * sends a message with a device argument. 1066 */ 1067 void 1068 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 1069 { 1070 devfs_msg_t devfs_msg = devfs_msg_get(); 1071 1072 devfs_msg->mdv_dev.dev = dev; 1073 devfs_msg->mdv_dev.uid = uid; 1074 devfs_msg->mdv_dev.gid = gid; 1075 devfs_msg->mdv_dev.perms = perms; 1076 1077 devfs_msg_send(cmd, devfs_msg); 1078 } 1079 1080 /* 1081 * sends a message with a link argument. 1082 */ 1083 void 1084 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1085 { 1086 devfs_msg_t devfs_msg = devfs_msg_get(); 1087 1088 devfs_msg->mdv_link.name = name; 1089 devfs_msg->mdv_link.target = target; 1090 devfs_msg->mdv_link.mp = mp; 1091 devfs_msg_send(cmd, devfs_msg); 1092 } 1093 1094 /* 1095 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1096 * and calls the relevant worker functions. By using messages it's assured 1097 * that events occur in the correct order. 1098 */ 1099 static void 1100 devfs_msg_core(void *arg) 1101 { 1102 devfs_msg_t msg; 1103 1104 lwkt_initport_thread(&devfs_msg_port, curthread); 1105 1106 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1107 devfs_run = 1; 1108 wakeup(td_core); 1109 lockmgr(&devfs_lock, LK_RELEASE); 1110 1111 get_mplock(); /* mpsafe yet? */ 1112 1113 while (devfs_run) { 1114 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1115 devfs_debug(DEVFS_DEBUG_DEBUG, 1116 "devfs_msg_core, new msg: %x\n", 1117 (unsigned int)msg->hdr.u.ms_result); 1118 devfs_msg_exec(msg); 1119 lwkt_replymsg(&msg->hdr, 0); 1120 } 1121 1122 rel_mplock(); 1123 wakeup(td_core); 1124 1125 lwkt_exit(); 1126 } 1127 1128 static void 1129 devfs_msg_exec(devfs_msg_t msg) 1130 { 1131 struct devfs_mnt_data *mnt; 1132 struct devfs_node *node; 1133 cdev_t dev; 1134 1135 /* 1136 * Acquire the devfs lock to ensure safety of all called functions 1137 */ 1138 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1139 1140 switch (msg->hdr.u.ms_result) { 1141 case DEVFS_DEVICE_CREATE: 1142 dev = msg->mdv_dev.dev; 1143 devfs_create_dev_worker(dev, 1144 msg->mdv_dev.uid, 1145 msg->mdv_dev.gid, 1146 msg->mdv_dev.perms); 1147 break; 1148 case DEVFS_DEVICE_DESTROY: 1149 dev = msg->mdv_dev.dev; 1150 devfs_destroy_dev_worker(dev); 1151 break; 1152 case DEVFS_DESTROY_RELATED: 1153 devfs_destroy_related_worker(msg->mdv_load); 1154 break; 1155 case DEVFS_DESTROY_DEV_BY_OPS: 1156 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1157 msg->mdv_ops.minor); 1158 break; 1159 case DEVFS_CREATE_ALL_DEV: 1160 node = (struct devfs_node *)msg->mdv_load; 1161 devfs_create_all_dev_worker(node); 1162 break; 1163 case DEVFS_MOUNT_ADD: 1164 mnt = msg->mdv_mnt; 1165 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1166 devfs_create_all_dev_worker(mnt->root_node); 1167 break; 1168 case DEVFS_MOUNT_DEL: 1169 mnt = msg->mdv_mnt; 1170 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1171 devfs_iterate_topology(mnt->root_node, devfs_reaperp_callback, 1172 NULL); 1173 if (mnt->leak_count) { 1174 devfs_debug(DEVFS_DEBUG_SHOW, 1175 "Leaked %ld devfs_node elements!\n", 1176 mnt->leak_count); 1177 } 1178 break; 1179 case DEVFS_CHANDLER_ADD: 1180 devfs_chandler_add_worker(msg->mdv_chandler.name, 1181 msg->mdv_chandler.nhandler); 1182 break; 1183 case DEVFS_CHANDLER_DEL: 1184 devfs_chandler_del_worker(msg->mdv_chandler.name); 1185 break; 1186 case DEVFS_FIND_DEVICE_BY_NAME: 1187 devfs_find_device_by_name_worker(msg); 1188 break; 1189 case DEVFS_FIND_DEVICE_BY_UDEV: 1190 devfs_find_device_by_udev_worker(msg); 1191 break; 1192 case DEVFS_MAKE_ALIAS: 1193 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1194 break; 1195 case DEVFS_DESTROY_ALIAS: 1196 devfs_destroy_alias_worker((struct devfs_alias *)msg->mdv_load); 1197 break; 1198 case DEVFS_APPLY_RULES: 1199 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1200 break; 1201 case DEVFS_RESET_RULES: 1202 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1203 break; 1204 case DEVFS_SCAN_CALLBACK: 1205 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load, 1206 msg->mdv_load2); 1207 break; 1208 case DEVFS_CLR_RELATED_FLAG: 1209 devfs_clr_related_flag_worker(msg->mdv_flags.dev, 1210 msg->mdv_flags.flag); 1211 break; 1212 case DEVFS_DESTROY_RELATED_WO_FLAG: 1213 devfs_destroy_related_without_flag_worker(msg->mdv_flags.dev, 1214 msg->mdv_flags.flag); 1215 break; 1216 case DEVFS_INODE_TO_VNODE: 1217 msg->mdv_ino.vp = devfs_iterate_topology( 1218 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1219 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback, 1220 &msg->mdv_ino.ino); 1221 break; 1222 case DEVFS_TERMINATE_CORE: 1223 devfs_run = 0; 1224 break; 1225 case DEVFS_SYNC: 1226 break; 1227 default: 1228 devfs_debug(DEVFS_DEBUG_WARNING, 1229 "devfs_msg_core: unknown message " 1230 "received at core\n"); 1231 break; 1232 } 1233 lockmgr(&devfs_lock, LK_RELEASE); 1234 } 1235 1236 /* 1237 * Worker function to insert a new dev into the dev list and initialize its 1238 * permissions. It also calls devfs_propagate_dev which in turn propagates 1239 * the change to all mount points. 1240 * 1241 * The passed dev is already referenced. This reference is eaten by this 1242 * function and represents the dev's linkage into devfs_dev_list. 1243 */ 1244 static int 1245 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1246 { 1247 KKASSERT(dev); 1248 1249 dev->si_uid = uid; 1250 dev->si_gid = gid; 1251 dev->si_perms = perms; 1252 1253 devfs_link_dev(dev); 1254 devfs_propagate_dev(dev, 1); 1255 1256 udev_event_attach(dev, NULL, 0); 1257 1258 return 0; 1259 } 1260 1261 /* 1262 * Worker function to delete a dev from the dev list and free the cdev. 1263 * It also calls devfs_propagate_dev which in turn propagates the change 1264 * to all mount points. 1265 */ 1266 static int 1267 devfs_destroy_dev_worker(cdev_t dev) 1268 { 1269 int error; 1270 1271 KKASSERT(dev); 1272 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1273 1274 error = devfs_unlink_dev(dev); 1275 devfs_propagate_dev(dev, 0); 1276 1277 udev_event_detach(dev, NULL, 0); 1278 1279 if (error == 0) 1280 release_dev(dev); /* link ref */ 1281 release_dev(dev); 1282 release_dev(dev); 1283 1284 return 0; 1285 } 1286 1287 /* 1288 * Worker function to destroy all devices with a certain basename. 1289 * Calls devfs_destroy_dev_worker for the actual destruction. 1290 */ 1291 static int 1292 devfs_destroy_related_worker(cdev_t needle) 1293 { 1294 cdev_t dev; 1295 1296 restart: 1297 devfs_debug(DEVFS_DEBUG_DEBUG, "related worker: %s\n", 1298 needle->si_name); 1299 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1300 if (dev->si_parent == needle) { 1301 devfs_destroy_related_worker(dev); 1302 devfs_destroy_dev_worker(dev); 1303 goto restart; 1304 } 1305 } 1306 return 0; 1307 } 1308 1309 static int 1310 devfs_clr_related_flag_worker(cdev_t needle, uint32_t flag) 1311 { 1312 cdev_t dev, dev1; 1313 1314 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1315 if (dev->si_parent == needle) { 1316 devfs_clr_related_flag_worker(dev, flag); 1317 dev->si_flags &= ~flag; 1318 } 1319 } 1320 1321 return 0; 1322 } 1323 1324 static int 1325 devfs_destroy_related_without_flag_worker(cdev_t needle, uint32_t flag) 1326 { 1327 cdev_t dev; 1328 1329 restart: 1330 devfs_debug(DEVFS_DEBUG_DEBUG, "related_wo_flag: %s\n", 1331 needle->si_name); 1332 1333 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1334 if (dev->si_parent == needle) { 1335 devfs_destroy_related_without_flag_worker(dev, flag); 1336 if (!(dev->si_flags & flag)) { 1337 devfs_destroy_dev_worker(dev); 1338 devfs_debug(DEVFS_DEBUG_DEBUG, 1339 "related_wo_flag: %s restart\n", dev->si_name); 1340 goto restart; 1341 } 1342 } 1343 } 1344 1345 return 0; 1346 } 1347 1348 /* 1349 * Worker function that creates all device nodes on top of a devfs 1350 * root node. 1351 */ 1352 static int 1353 devfs_create_all_dev_worker(struct devfs_node *root) 1354 { 1355 cdev_t dev; 1356 1357 KKASSERT(root); 1358 1359 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1360 devfs_create_device_node(root, dev, NULL, NULL); 1361 } 1362 1363 return 0; 1364 } 1365 1366 /* 1367 * Worker function that destroys all devices that match a specific 1368 * dev_ops and/or minor. If minor is less than 0, it is not matched 1369 * against. It also propagates all changes. 1370 */ 1371 static int 1372 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1373 { 1374 cdev_t dev, dev1; 1375 1376 KKASSERT(ops); 1377 1378 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1379 if (dev->si_ops != ops) 1380 continue; 1381 if ((minor < 0) || (dev->si_uminor == minor)) { 1382 devfs_destroy_dev_worker(dev); 1383 } 1384 } 1385 1386 return 0; 1387 } 1388 1389 /* 1390 * Worker function that registers a new clone handler in devfs. 1391 */ 1392 static int 1393 devfs_chandler_add_worker(const char *name, d_clone_t *nhandler) 1394 { 1395 struct devfs_clone_handler *chandler = NULL; 1396 u_char len = strlen(name); 1397 1398 if (len == 0) 1399 return 1; 1400 1401 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1402 if (chandler->namlen != len) 1403 continue; 1404 1405 if (!memcmp(chandler->name, name, len)) { 1406 /* Clonable basename already exists */ 1407 return 1; 1408 } 1409 } 1410 1411 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1412 chandler->name = kstrdup(name, M_DEVFS); 1413 chandler->namlen = len; 1414 chandler->nhandler = nhandler; 1415 1416 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1417 return 0; 1418 } 1419 1420 /* 1421 * Worker function that removes a given clone handler from the 1422 * clone handler list. 1423 */ 1424 static int 1425 devfs_chandler_del_worker(const char *name) 1426 { 1427 struct devfs_clone_handler *chandler, *chandler2; 1428 u_char len = strlen(name); 1429 1430 if (len == 0) 1431 return 1; 1432 1433 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1434 if (chandler->namlen != len) 1435 continue; 1436 if (memcmp(chandler->name, name, len)) 1437 continue; 1438 1439 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1440 kfree(chandler->name, M_DEVFS); 1441 kfree(chandler, M_DEVFS); 1442 break; 1443 } 1444 1445 return 0; 1446 } 1447 1448 /* 1449 * Worker function that finds a given device name and changes 1450 * the message received accordingly so that when replied to, 1451 * the answer is returned to the caller. 1452 */ 1453 static int 1454 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1455 { 1456 struct devfs_alias *alias; 1457 cdev_t dev; 1458 cdev_t found = NULL; 1459 1460 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1461 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1462 found = dev; 1463 break; 1464 } 1465 } 1466 if (found == NULL) { 1467 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1468 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1469 found = alias->dev_target; 1470 break; 1471 } 1472 } 1473 } 1474 devfs_msg->mdv_cdev = found; 1475 1476 return 0; 1477 } 1478 1479 /* 1480 * Worker function that finds a given device udev and changes 1481 * the message received accordingly so that when replied to, 1482 * the answer is returned to the caller. 1483 */ 1484 static int 1485 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1486 { 1487 cdev_t dev, dev1; 1488 cdev_t found = NULL; 1489 1490 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1491 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1492 found = dev; 1493 break; 1494 } 1495 } 1496 devfs_msg->mdv_cdev = found; 1497 1498 return 0; 1499 } 1500 1501 /* 1502 * Worker function that inserts a given alias into the 1503 * alias list, and propagates the alias to all mount 1504 * points. 1505 */ 1506 static int 1507 devfs_make_alias_worker(struct devfs_alias *alias) 1508 { 1509 struct devfs_alias *alias2; 1510 size_t len = strlen(alias->name); 1511 int found = 0; 1512 1513 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1514 if (len != alias2->namlen) 1515 continue; 1516 1517 if (!memcmp(alias->name, alias2->name, len)) { 1518 found = 1; 1519 break; 1520 } 1521 } 1522 1523 if (!found) { 1524 /* 1525 * The alias doesn't exist yet, so we add it to the alias list 1526 */ 1527 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1528 devfs_alias_propagate(alias, 0); 1529 udev_event_attach(alias->dev_target, alias->name, 1); 1530 } else { 1531 devfs_debug(DEVFS_DEBUG_WARNING, 1532 "Warning: duplicate devfs_make_alias for %s\n", 1533 alias->name); 1534 kfree(alias->name, M_DEVFS); 1535 kfree(alias, M_DEVFS); 1536 } 1537 1538 return 0; 1539 } 1540 1541 /* 1542 * Worker function that delete a given alias from the 1543 * alias list, and propagates the removal to all mount 1544 * points. 1545 */ 1546 static int 1547 devfs_destroy_alias_worker(struct devfs_alias *alias) 1548 { 1549 struct devfs_alias *alias2; 1550 int found = 0; 1551 1552 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1553 if (alias->dev_target != alias2->dev_target) 1554 continue; 1555 1556 if (devfs_WildCmp(alias->name, alias2->name) == 0) { 1557 found = 1; 1558 break; 1559 } 1560 } 1561 1562 if (!found) { 1563 devfs_debug(DEVFS_DEBUG_WARNING, 1564 "Warning: devfs_destroy_alias for inexistant alias: %s\n", 1565 alias->name); 1566 kfree(alias->name, M_DEVFS); 1567 kfree(alias, M_DEVFS); 1568 } else { 1569 /* 1570 * The alias exists, so we delete it from the alias list 1571 */ 1572 TAILQ_REMOVE(&devfs_alias_list, alias2, link); 1573 devfs_alias_propagate(alias2, 1); 1574 udev_event_detach(alias2->dev_target, alias2->name, 1); 1575 kfree(alias->name, M_DEVFS); 1576 kfree(alias, M_DEVFS); 1577 kfree(alias2->name, M_DEVFS); 1578 kfree(alias2, M_DEVFS); 1579 } 1580 1581 return 0; 1582 } 1583 1584 /* 1585 * Function that removes and frees all aliases. 1586 */ 1587 static int 1588 devfs_alias_reap(void) 1589 { 1590 struct devfs_alias *alias, *alias2; 1591 1592 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1593 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1594 kfree(alias->name, M_DEVFS); 1595 kfree(alias, M_DEVFS); 1596 } 1597 return 0; 1598 } 1599 1600 /* 1601 * Function that removes an alias matching a specific cdev and frees 1602 * it accordingly. 1603 */ 1604 static int 1605 devfs_alias_remove(cdev_t dev) 1606 { 1607 struct devfs_alias *alias, *alias2; 1608 1609 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1610 if (alias->dev_target == dev) { 1611 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1612 udev_event_detach(alias->dev_target, alias->name, 1); 1613 kfree(alias->name, M_DEVFS); 1614 kfree(alias, M_DEVFS); 1615 } 1616 } 1617 return 0; 1618 } 1619 1620 /* 1621 * This function propagates an alias addition or removal to 1622 * all mount points. 1623 */ 1624 static int 1625 devfs_alias_propagate(struct devfs_alias *alias, int remove) 1626 { 1627 struct devfs_mnt_data *mnt; 1628 1629 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1630 if (remove) { 1631 devfs_destroy_node(mnt->root_node, alias->name); 1632 } else { 1633 devfs_alias_apply(mnt->root_node, alias); 1634 } 1635 } 1636 return 0; 1637 } 1638 1639 /* 1640 * This function is a recursive function iterating through 1641 * all device nodes in the topology and, if applicable, 1642 * creating the relevant alias for a device node. 1643 */ 1644 static int 1645 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1646 { 1647 struct devfs_node *node1, *node2; 1648 1649 KKASSERT(alias != NULL); 1650 1651 if ((node->node_type == Nroot) || (node->node_type == Ndir)) { 1652 if (node->nchildren > 2) { 1653 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1654 devfs_alias_apply(node1, alias); 1655 } 1656 } 1657 } else { 1658 if (node->d_dev == alias->dev_target) 1659 devfs_alias_create(alias->name, node, 0); 1660 } 1661 return 0; 1662 } 1663 1664 /* 1665 * This function checks if any alias possibly is applicable 1666 * to the given node. If so, the alias is created. 1667 */ 1668 static int 1669 devfs_alias_check_create(struct devfs_node *node) 1670 { 1671 struct devfs_alias *alias; 1672 1673 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1674 if (node->d_dev == alias->dev_target) 1675 devfs_alias_create(alias->name, node, 0); 1676 } 1677 return 0; 1678 } 1679 1680 /* 1681 * This function creates an alias with a given name 1682 * linking to a given devfs node. It also increments 1683 * the link count on the target node. 1684 */ 1685 int 1686 devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based) 1687 { 1688 struct mount *mp = target->mp; 1689 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1690 struct devfs_node *linknode; 1691 char *create_path = NULL; 1692 char *name; 1693 char *name_buf; 1694 int result = 0; 1695 1696 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1697 1698 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1699 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1700 1701 if (create_path) 1702 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1703 1704 1705 if (devfs_find_device_node_by_name(parent, name)) { 1706 devfs_debug(DEVFS_DEBUG_WARNING, 1707 "Node already exists: %s " 1708 "(devfs_make_alias_worker)!\n", 1709 name); 1710 result = 1; 1711 goto done; 1712 } 1713 1714 linknode = devfs_allocp(Nlink, name, parent, mp, NULL); 1715 if (linknode == NULL) { 1716 result = 1; 1717 goto done; 1718 } 1719 1720 linknode->link_target = target; 1721 target->nlinks++; 1722 1723 if (rule_based) 1724 linknode->flags |= DEVFS_RULE_CREATED; 1725 1726 done: 1727 kfree(name_buf, M_TEMP); 1728 return (result); 1729 } 1730 1731 /* 1732 * This function is called by the core and handles mount point 1733 * strings. It either calls the relevant worker (devfs_apply_ 1734 * reset_rules_worker) on all mountpoints or only a specific 1735 * one. 1736 */ 1737 static int 1738 devfs_apply_reset_rules_caller(char *mountto, int apply) 1739 { 1740 struct devfs_mnt_data *mnt; 1741 1742 if (mountto[0] == '*') { 1743 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1744 devfs_iterate_topology(mnt->root_node, 1745 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1746 NULL); 1747 } 1748 } else { 1749 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1750 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) { 1751 devfs_iterate_topology(mnt->root_node, 1752 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1753 NULL); 1754 break; 1755 } 1756 } 1757 } 1758 1759 kfree(mountto, M_DEVFS); 1760 return 0; 1761 } 1762 1763 /* 1764 * This function calls a given callback function for 1765 * every dev node in the devfs dev list. 1766 */ 1767 static int 1768 devfs_scan_callback_worker(devfs_scan_t *callback, void *arg) 1769 { 1770 cdev_t dev, dev1; 1771 struct devfs_alias *alias, *alias1; 1772 1773 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1774 callback(dev->si_name, dev, false, arg); 1775 } 1776 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias1) { 1777 callback(alias->name, alias->dev_target, true, arg); 1778 } 1779 1780 return 0; 1781 } 1782 1783 /* 1784 * This function tries to resolve a given directory, or if not 1785 * found and creation requested, creates the given directory. 1786 */ 1787 static struct devfs_node * 1788 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1789 size_t name_len, int create) 1790 { 1791 struct devfs_node *node, *found = NULL; 1792 1793 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1794 if (name_len != node->d_dir.d_namlen) 1795 continue; 1796 1797 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1798 found = node; 1799 break; 1800 } 1801 } 1802 1803 if ((found == NULL) && (create)) { 1804 found = devfs_allocp(Ndir, dir_name, parent, parent->mp, NULL); 1805 } 1806 1807 return found; 1808 } 1809 1810 /* 1811 * This function tries to resolve a complete path. If creation is requested, 1812 * if a given part of the path cannot be resolved (because it doesn't exist), 1813 * it is created. 1814 */ 1815 struct devfs_node * 1816 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1817 { 1818 struct devfs_node *node = parent; 1819 char *buf; 1820 size_t idx = 0; 1821 1822 if (path == NULL) 1823 return parent; 1824 1825 buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1826 1827 while (*path && idx < PATH_MAX - 1) { 1828 if (*path != '/') { 1829 buf[idx++] = *path; 1830 } else { 1831 buf[idx] = '\0'; 1832 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1833 if (node == NULL) { 1834 kfree(buf, M_TEMP); 1835 return NULL; 1836 } 1837 idx = 0; 1838 } 1839 ++path; 1840 } 1841 buf[idx] = '\0'; 1842 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1843 kfree (buf, M_TEMP); 1844 return (node); 1845 } 1846 1847 /* 1848 * Takes a full path and strips it into a directory path and a name. 1849 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 1850 * requires a working buffer with enough size to keep the whole 1851 * fullpath. 1852 */ 1853 int 1854 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 1855 { 1856 char *name = NULL; 1857 char *path = NULL; 1858 size_t len = strlen(fullpath) + 1; 1859 int i; 1860 1861 KKASSERT((fullpath != NULL) && (buf != NULL)); 1862 KKASSERT((pathp != NULL) && (namep != NULL)); 1863 1864 memcpy(buf, fullpath, len); 1865 1866 for (i = len-1; i>= 0; i--) { 1867 if (buf[i] == '/') { 1868 buf[i] = '\0'; 1869 name = &(buf[i+1]); 1870 path = buf; 1871 break; 1872 } 1873 } 1874 1875 *pathp = path; 1876 1877 if (name) { 1878 *namep = name; 1879 } else { 1880 *namep = buf; 1881 } 1882 1883 return 0; 1884 } 1885 1886 /* 1887 * This function creates a new devfs node for a given device. It can 1888 * handle a complete path as device name, and accordingly creates 1889 * the path and the final device node. 1890 * 1891 * The reference count on the passed dev remains unchanged. 1892 */ 1893 struct devfs_node * 1894 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 1895 char *dev_name, char *path_fmt, ...) 1896 { 1897 struct devfs_node *parent, *node = NULL; 1898 char *path = NULL; 1899 char *name; 1900 char *name_buf; 1901 __va_list ap; 1902 int i, found; 1903 char *create_path = NULL; 1904 char *names = "pqrsPQRS"; 1905 1906 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1907 1908 if (path_fmt != NULL) { 1909 __va_start(ap, path_fmt); 1910 kvasnrprintf(&path, PATH_MAX, 10, path_fmt, ap); 1911 __va_end(ap); 1912 } 1913 1914 parent = devfs_resolve_or_create_path(root, path, 1); 1915 KKASSERT(parent); 1916 1917 devfs_resolve_name_path( 1918 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 1919 name_buf, &create_path, &name); 1920 1921 if (create_path) 1922 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1923 1924 1925 if (devfs_find_device_node_by_name(parent, name)) { 1926 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: " 1927 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name); 1928 goto out; 1929 } 1930 1931 node = devfs_allocp(Ndev, name, parent, parent->mp, dev); 1932 nanotime(&parent->mtime); 1933 1934 /* 1935 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 1936 * directory 1937 */ 1938 if ((dev) && (strlen(dev->si_name) >= 4) && 1939 (!memcmp(dev->si_name, "ptm/", 4))) { 1940 node->parent->flags |= DEVFS_HIDDEN; 1941 node->flags |= DEVFS_HIDDEN; 1942 } 1943 1944 /* 1945 * Ugly pty magic, to tag pty devices as such and hide them if needed. 1946 */ 1947 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 1948 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1949 1950 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 1951 found = 0; 1952 for (i = 0; i < strlen(names); i++) { 1953 if (name[3] == names[i]) { 1954 found = 1; 1955 break; 1956 } 1957 } 1958 if (found) 1959 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1960 } 1961 1962 out: 1963 kfree(name_buf, M_TEMP); 1964 kvasfree(&path); 1965 return node; 1966 } 1967 1968 /* 1969 * This function finds a given device node in the topology with a given 1970 * cdev. 1971 */ 1972 void * 1973 devfs_find_device_node_callback(struct devfs_node *node, cdev_t target) 1974 { 1975 if ((node->node_type == Ndev) && (node->d_dev == target)) { 1976 return node; 1977 } 1978 1979 return NULL; 1980 } 1981 1982 /* 1983 * This function finds a device node in the given parent directory by its 1984 * name and returns it. 1985 */ 1986 struct devfs_node * 1987 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 1988 { 1989 struct devfs_node *node, *found = NULL; 1990 size_t len = strlen(target); 1991 1992 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1993 if (len != node->d_dir.d_namlen) 1994 continue; 1995 1996 if (!memcmp(node->d_dir.d_name, target, len)) { 1997 found = node; 1998 break; 1999 } 2000 } 2001 2002 return found; 2003 } 2004 2005 static void * 2006 devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop) 2007 { 2008 struct vnode *vp = NULL; 2009 ino_t target = *inop; 2010 2011 if (node->d_dir.d_ino == target) { 2012 if (node->v_node) { 2013 vp = node->v_node; 2014 vget(vp, LK_EXCLUSIVE | LK_RETRY); 2015 vn_unlock(vp); 2016 } else { 2017 devfs_allocv(&vp, node); 2018 vn_unlock(vp); 2019 } 2020 } 2021 2022 return vp; 2023 } 2024 2025 /* 2026 * This function takes a cdev and removes its devfs node in the 2027 * given topology. The cdev remains intact. 2028 */ 2029 int 2030 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 2031 { 2032 KKASSERT(target != NULL); 2033 return devfs_destroy_node(root, target->si_name); 2034 } 2035 2036 /* 2037 * This function takes a path to a devfs node, resolves it and 2038 * removes the devfs node from the given topology. 2039 */ 2040 int 2041 devfs_destroy_node(struct devfs_node *root, char *target) 2042 { 2043 struct devfs_node *node, *parent; 2044 char *name; 2045 char *name_buf; 2046 char *create_path = NULL; 2047 2048 KKASSERT(target); 2049 2050 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 2051 ksnprintf(name_buf, PATH_MAX, "%s", target); 2052 2053 devfs_resolve_name_path(target, name_buf, &create_path, &name); 2054 2055 if (create_path) 2056 parent = devfs_resolve_or_create_path(root, create_path, 0); 2057 else 2058 parent = root; 2059 2060 if (parent == NULL) { 2061 kfree(name_buf, M_TEMP); 2062 return 1; 2063 } 2064 2065 node = devfs_find_device_node_by_name(parent, name); 2066 2067 if (node) { 2068 nanotime(&node->parent->mtime); 2069 devfs_gc(node); 2070 } 2071 2072 kfree(name_buf, M_TEMP); 2073 2074 return 0; 2075 } 2076 2077 /* 2078 * Just set perms and ownership for given node. 2079 */ 2080 int 2081 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 2082 u_short mode, u_long flags) 2083 { 2084 node->mode = mode; 2085 node->uid = uid; 2086 node->gid = gid; 2087 2088 return 0; 2089 } 2090 2091 /* 2092 * Propagates a device attach/detach to all mount 2093 * points. Also takes care of automatic alias removal 2094 * for a deleted cdev. 2095 */ 2096 static int 2097 devfs_propagate_dev(cdev_t dev, int attach) 2098 { 2099 struct devfs_mnt_data *mnt; 2100 2101 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 2102 if (attach) { 2103 /* Device is being attached */ 2104 devfs_create_device_node(mnt->root_node, dev, 2105 NULL, NULL ); 2106 } else { 2107 /* Device is being detached */ 2108 devfs_alias_remove(dev); 2109 devfs_destroy_device_node(mnt->root_node, dev); 2110 } 2111 } 2112 return 0; 2113 } 2114 2115 /* 2116 * devfs_clone either returns a basename from a complete name by 2117 * returning the length of the name without trailing digits, or, 2118 * if clone != 0, calls the device's clone handler to get a new 2119 * device, which in turn is returned in devp. 2120 */ 2121 cdev_t 2122 devfs_clone(cdev_t dev, const char *name, size_t len, int mode, 2123 struct ucred *cred) 2124 { 2125 int error; 2126 struct devfs_clone_handler *chandler; 2127 struct dev_clone_args ap; 2128 2129 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 2130 if (chandler->namlen != len) 2131 continue; 2132 if ((!memcmp(chandler->name, name, len)) && (chandler->nhandler)) { 2133 lockmgr(&devfs_lock, LK_RELEASE); 2134 devfs_config(); 2135 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2136 2137 ap.a_head.a_dev = dev; 2138 ap.a_dev = NULL; 2139 ap.a_name = name; 2140 ap.a_namelen = len; 2141 ap.a_mode = mode; 2142 ap.a_cred = cred; 2143 error = (chandler->nhandler)(&ap); 2144 if (error) 2145 continue; 2146 2147 return ap.a_dev; 2148 } 2149 } 2150 2151 return NULL; 2152 } 2153 2154 2155 /* 2156 * Registers a new orphan in the orphan list. 2157 */ 2158 void 2159 devfs_tracer_add_orphan(struct devfs_node *node) 2160 { 2161 struct devfs_orphan *orphan; 2162 2163 KKASSERT(node); 2164 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2165 orphan->node = node; 2166 2167 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2168 node->flags |= DEVFS_ORPHANED; 2169 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2170 } 2171 2172 /* 2173 * Removes an orphan from the orphan list. 2174 */ 2175 void 2176 devfs_tracer_del_orphan(struct devfs_node *node) 2177 { 2178 struct devfs_orphan *orphan; 2179 2180 KKASSERT(node); 2181 2182 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2183 if (orphan->node == node) { 2184 node->flags &= ~DEVFS_ORPHANED; 2185 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2186 kfree(orphan, M_DEVFS); 2187 break; 2188 } 2189 } 2190 } 2191 2192 /* 2193 * Counts the orphans in the orphan list, and if cleanup 2194 * is specified, also frees the orphan and removes it from 2195 * the list. 2196 */ 2197 size_t 2198 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2199 { 2200 struct devfs_orphan *orphan, *orphan2; 2201 size_t count = 0; 2202 2203 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2204 count++; 2205 /* 2206 * If we are instructed to clean up, we do so. 2207 */ 2208 if (cleanup) { 2209 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2210 orphan->node->flags &= ~DEVFS_ORPHANED; 2211 devfs_freep(orphan->node); 2212 kfree(orphan, M_DEVFS); 2213 } 2214 } 2215 2216 return count; 2217 } 2218 2219 /* 2220 * Fetch an ino_t from the global d_ino by increasing it 2221 * while spinlocked. 2222 */ 2223 static ino_t 2224 devfs_fetch_ino(void) 2225 { 2226 ino_t ret; 2227 2228 spin_lock(&ino_lock); 2229 ret = d_ino++; 2230 spin_unlock(&ino_lock); 2231 2232 return ret; 2233 } 2234 2235 /* 2236 * Allocates a new cdev and initializes it's most basic 2237 * fields. 2238 */ 2239 cdev_t 2240 devfs_new_cdev(struct dev_ops *ops, int minor, struct dev_ops *bops) 2241 { 2242 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2243 2244 sysref_activate(&dev->si_sysref); 2245 reference_dev(dev); 2246 bzero(dev, offsetof(struct cdev, si_sysref)); 2247 2248 dev->si_uid = 0; 2249 dev->si_gid = 0; 2250 dev->si_perms = 0; 2251 dev->si_drv1 = NULL; 2252 dev->si_drv2 = NULL; 2253 dev->si_lastread = 0; /* time_uptime */ 2254 dev->si_lastwrite = 0; /* time_uptime */ 2255 2256 dev->si_dict = NULL; 2257 dev->si_parent = NULL; 2258 dev->si_ops = ops; 2259 dev->si_flags = 0; 2260 dev->si_uminor = minor; 2261 dev->si_bops = bops; 2262 2263 /* 2264 * Since the disk subsystem is in the way, we need to 2265 * propagate the D_CANFREE from bops (and ops) to 2266 * si_flags. 2267 */ 2268 if (bops && (bops->head.flags & D_CANFREE)) { 2269 dev->si_flags |= SI_CANFREE; 2270 } else if (ops->head.flags & D_CANFREE) { 2271 dev->si_flags |= SI_CANFREE; 2272 } 2273 2274 /* If there is a backing device, we reference its ops */ 2275 dev->si_inode = makeudev( 2276 devfs_reference_ops((bops)?(bops):(ops)), 2277 minor ); 2278 dev->si_umajor = umajor(dev->si_inode); 2279 2280 return dev; 2281 } 2282 2283 static void 2284 devfs_cdev_terminate(cdev_t dev) 2285 { 2286 int locked = 0; 2287 2288 /* Check if it is locked already. if not, we acquire the devfs lock */ 2289 if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 2290 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2291 locked = 1; 2292 } 2293 2294 /* 2295 * Make sure the node isn't linked anymore. Otherwise we've screwed 2296 * up somewhere, since normal devs are unlinked on the call to 2297 * destroy_dev and only-cdevs that have not been used for cloning 2298 * are not linked in the first place. only-cdevs used for cloning 2299 * will be linked in, too, and should only be destroyed via 2300 * destroy_dev, not destroy_only_dev, so we catch that problem, too. 2301 */ 2302 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2303 2304 /* If we acquired the lock, we also get rid of it */ 2305 if (locked) 2306 lockmgr(&devfs_lock, LK_RELEASE); 2307 2308 /* If there is a backing device, we release the backing device's ops */ 2309 devfs_release_ops((dev->si_bops)?(dev->si_bops):(dev->si_ops)); 2310 2311 /* Finally destroy the device */ 2312 sysref_put(&dev->si_sysref); 2313 } 2314 2315 /* 2316 * Dummies for now (individual locks for MPSAFE) 2317 */ 2318 static void 2319 devfs_cdev_lock(cdev_t dev) 2320 { 2321 } 2322 2323 static void 2324 devfs_cdev_unlock(cdev_t dev) 2325 { 2326 } 2327 2328 static int 2329 devfs_detached_filter_eof(struct knote *kn, long hint) 2330 { 2331 kn->kn_flags |= (EV_EOF | EV_NODATA); 2332 return (1); 2333 } 2334 2335 static void 2336 devfs_detached_filter_detach(struct knote *kn) 2337 { 2338 cdev_t dev = (cdev_t)kn->kn_hook; 2339 2340 knote_remove(&dev->si_kqinfo.ki_note, kn); 2341 } 2342 2343 static struct filterops devfs_detached_filterops = 2344 { FILTEROP_ISFD, NULL, 2345 devfs_detached_filter_detach, 2346 devfs_detached_filter_eof }; 2347 2348 /* 2349 * Delegates knote filter handling responsibility to devfs 2350 * 2351 * Any device that implements kqfilter event handling and could be detached 2352 * or shut down out from under the kevent subsystem must allow devfs to 2353 * assume responsibility for any knotes it may hold. 2354 */ 2355 void 2356 devfs_assume_knotes(cdev_t dev, struct kqinfo *kqi) 2357 { 2358 /* 2359 * Let kern/kern_event.c do the heavy lifting. 2360 */ 2361 knote_assume_knotes(kqi, &dev->si_kqinfo, 2362 &devfs_detached_filterops, (void *)dev); 2363 2364 /* 2365 * These should probably be activated individually, but doing so 2366 * would require refactoring kq's public in-kernel interface. 2367 */ 2368 KNOTE(&dev->si_kqinfo.ki_note, 0); 2369 } 2370 2371 /* 2372 * Links a given cdev into the dev list. 2373 */ 2374 int 2375 devfs_link_dev(cdev_t dev) 2376 { 2377 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2378 dev->si_flags |= SI_DEVFS_LINKED; 2379 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2380 2381 return 0; 2382 } 2383 2384 /* 2385 * Removes a given cdev from the dev list. The caller is responsible for 2386 * releasing the reference on the device associated with the linkage. 2387 * 2388 * Returns EALREADY if the dev has already been unlinked. 2389 */ 2390 static int 2391 devfs_unlink_dev(cdev_t dev) 2392 { 2393 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2394 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2395 dev->si_flags &= ~SI_DEVFS_LINKED; 2396 return (0); 2397 } 2398 return (EALREADY); 2399 } 2400 2401 int 2402 devfs_node_is_accessible(struct devfs_node *node) 2403 { 2404 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2405 return 1; 2406 else 2407 return 0; 2408 } 2409 2410 int 2411 devfs_reference_ops(struct dev_ops *ops) 2412 { 2413 int unit; 2414 struct devfs_dev_ops *found = NULL; 2415 struct devfs_dev_ops *devops; 2416 2417 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2418 if (devops->ops == ops) { 2419 found = devops; 2420 break; 2421 } 2422 } 2423 2424 if (!found) { 2425 found = kmalloc(sizeof(struct devfs_dev_ops), M_DEVFS, M_WAITOK); 2426 found->ops = ops; 2427 found->ref_count = 0; 2428 TAILQ_INSERT_TAIL(&devfs_dev_ops_list, found, link); 2429 } 2430 2431 KKASSERT(found); 2432 2433 if (found->ref_count == 0) { 2434 found->id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2435 if (found->id == -1) { 2436 /* Ran out of unique ids */ 2437 devfs_debug(DEVFS_DEBUG_WARNING, 2438 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2439 } 2440 } 2441 unit = found->id; 2442 ++found->ref_count; 2443 2444 return unit; 2445 } 2446 2447 void 2448 devfs_release_ops(struct dev_ops *ops) 2449 { 2450 struct devfs_dev_ops *found = NULL; 2451 struct devfs_dev_ops *devops; 2452 2453 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2454 if (devops->ops == ops) { 2455 found = devops; 2456 break; 2457 } 2458 } 2459 2460 KKASSERT(found); 2461 2462 --found->ref_count; 2463 2464 if (found->ref_count == 0) { 2465 TAILQ_REMOVE(&devfs_dev_ops_list, found, link); 2466 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), found->id); 2467 kfree(found, M_DEVFS); 2468 } 2469 } 2470 2471 /* 2472 * Wait for asynchronous messages to complete in the devfs helper 2473 * thread, then return. Do nothing if the helper thread is dead 2474 * or we are being indirectly called from the helper thread itself. 2475 */ 2476 void 2477 devfs_config(void) 2478 { 2479 devfs_msg_t msg; 2480 2481 if (devfs_run && curthread != td_core) { 2482 msg = devfs_msg_get(); 2483 msg = devfs_msg_send_sync(DEVFS_SYNC, msg); 2484 devfs_msg_put(msg); 2485 } 2486 } 2487 2488 /* 2489 * Called on init of devfs; creates the objcaches and 2490 * spawns off the devfs core thread. Also initializes 2491 * locks. 2492 */ 2493 static void 2494 devfs_init(void) 2495 { 2496 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2497 /* Create objcaches for nodes, msgs and devs */ 2498 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2499 NULL, NULL, NULL, 2500 objcache_malloc_alloc, 2501 objcache_malloc_free, 2502 &devfs_node_malloc_args ); 2503 2504 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2505 NULL, NULL, NULL, 2506 objcache_malloc_alloc, 2507 objcache_malloc_free, 2508 &devfs_msg_malloc_args ); 2509 2510 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2511 NULL, NULL, NULL, 2512 objcache_malloc_alloc, 2513 objcache_malloc_free, 2514 &devfs_dev_malloc_args ); 2515 2516 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2517 2518 /* Initialize the reply-only port which acts as a message drain */ 2519 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2520 2521 /* Initialize *THE* devfs lock */ 2522 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2523 2524 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2525 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2526 0, -1, "devfs_msg_core"); 2527 while (devfs_run == 0) 2528 lksleep(td_core, &devfs_lock, 0, "devfsc", 0); 2529 lockmgr(&devfs_lock, LK_RELEASE); 2530 2531 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2532 } 2533 2534 /* 2535 * Called on unload of devfs; takes care of destroying the core 2536 * and the objcaches. Also removes aliases that are no longer needed. 2537 */ 2538 static void 2539 devfs_uninit(void) 2540 { 2541 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2542 2543 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2544 while (devfs_run) 2545 tsleep(td_core, 0, "devfsc", hz*10); 2546 tsleep(td_core, 0, "devfsc", hz); 2547 2548 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2549 2550 /* Destroy the objcaches */ 2551 objcache_destroy(devfs_msg_cache); 2552 objcache_destroy(devfs_node_cache); 2553 objcache_destroy(devfs_dev_cache); 2554 2555 devfs_alias_reap(); 2556 } 2557 2558 /* 2559 * This is a sysctl handler to assist userland devname(3) to 2560 * find the device name for a given udev. 2561 */ 2562 static int 2563 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2564 { 2565 udev_t udev; 2566 cdev_t found; 2567 int error; 2568 2569 2570 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2571 return (error); 2572 2573 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); 2574 2575 if (udev == NOUDEV) 2576 return(EINVAL); 2577 2578 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2579 return(ENOENT); 2580 2581 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2582 } 2583 2584 2585 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 2586 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); 2587 2588 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2589 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2590 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2591 0, "Enable DevFS debugging"); 2592 2593 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, 2594 devfs_init, NULL); 2595 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, 2596 devfs_uninit, NULL); 2597 2598 /* 2599 * WildCmp() - compare wild string to sane string 2600 * 2601 * Returns 0 on success, -1 on failure. 2602 */ 2603 static int 2604 wildCmp(const char **mary, int d, const char *w, const char *s) 2605 { 2606 int i; 2607 2608 /* 2609 * skip fixed portion 2610 */ 2611 for (;;) { 2612 switch(*w) { 2613 case '*': 2614 /* 2615 * optimize terminator 2616 */ 2617 if (w[1] == 0) 2618 return(0); 2619 if (w[1] != '?' && w[1] != '*') { 2620 /* 2621 * optimize * followed by non-wild 2622 */ 2623 for (i = 0; s + i < mary[d]; ++i) { 2624 if (s[i] == w[1] && wildCmp(mary, d + 1, w + 1, s + i) == 0) 2625 return(0); 2626 } 2627 } else { 2628 /* 2629 * less-optimal 2630 */ 2631 for (i = 0; s + i < mary[d]; ++i) { 2632 if (wildCmp(mary, d + 1, w + 1, s + i) == 0) 2633 return(0); 2634 } 2635 } 2636 mary[d] = s; 2637 return(-1); 2638 case '?': 2639 if (*s == 0) 2640 return(-1); 2641 ++w; 2642 ++s; 2643 break; 2644 default: 2645 if (*w != *s) 2646 return(-1); 2647 if (*w == 0) /* terminator */ 2648 return(0); 2649 ++w; 2650 ++s; 2651 break; 2652 } 2653 } 2654 /* not reached */ 2655 return(-1); 2656 } 2657 2658 2659 /* 2660 * WildCaseCmp() - compare wild string to sane string, case insensitive 2661 * 2662 * Returns 0 on success, -1 on failure. 2663 */ 2664 static int 2665 wildCaseCmp(const char **mary, int d, const char *w, const char *s) 2666 { 2667 int i; 2668 2669 /* 2670 * skip fixed portion 2671 */ 2672 for (;;) { 2673 switch(*w) { 2674 case '*': 2675 /* 2676 * optimize terminator 2677 */ 2678 if (w[1] == 0) 2679 return(0); 2680 if (w[1] != '?' && w[1] != '*') { 2681 /* 2682 * optimize * followed by non-wild 2683 */ 2684 for (i = 0; s + i < mary[d]; ++i) { 2685 if (s[i] == w[1] && wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2686 return(0); 2687 } 2688 } else { 2689 /* 2690 * less-optimal 2691 */ 2692 for (i = 0; s + i < mary[d]; ++i) { 2693 if (wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2694 return(0); 2695 } 2696 } 2697 mary[d] = s; 2698 return(-1); 2699 case '?': 2700 if (*s == 0) 2701 return(-1); 2702 ++w; 2703 ++s; 2704 break; 2705 default: 2706 if (*w != *s) { 2707 #define tolower(x) ((x >= 'A' && x <= 'Z')?(x+('a'-'A')):(x)) 2708 if (tolower(*w) != tolower(*s)) 2709 return(-1); 2710 } 2711 if (*w == 0) /* terminator */ 2712 return(0); 2713 ++w; 2714 ++s; 2715 break; 2716 } 2717 } 2718 /* not reached */ 2719 return(-1); 2720 } 2721 2722 int 2723 devfs_WildCmp(const char *w, const char *s) 2724 { 2725 int i; 2726 int c; 2727 int slen = strlen(s); 2728 const char **mary; 2729 2730 for (i = c = 0; w[i]; ++i) { 2731 if (w[i] == '*') 2732 ++c; 2733 } 2734 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 2735 for (i = 0; i < c; ++i) 2736 mary[i] = s + slen; 2737 i = wildCmp(mary, 0, w, s); 2738 kfree(mary, M_DEVFS); 2739 return(i); 2740 } 2741 2742 int 2743 devfs_WildCaseCmp(const char *w, const char *s) 2744 { 2745 int i; 2746 int c; 2747 int slen = strlen(s); 2748 const char **mary; 2749 2750 for (i = c = 0; w[i]; ++i) { 2751 if (w[i] == '*') 2752 ++c; 2753 } 2754 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 2755 for (i = 0; i < c; ++i) 2756 mary[i] = s + slen; 2757 i = wildCaseCmp(mary, 0, w, s); 2758 kfree(mary, M_DEVFS); 2759 return(i); 2760 } 2761 2762