1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/mount.h> 39 #include <sys/vnode.h> 40 #include <sys/types.h> 41 #include <sys/lock.h> 42 #include <sys/file.h> 43 #include <sys/msgport.h> 44 #include <sys/sysctl.h> 45 #include <sys/ucred.h> 46 #include <sys/devfs.h> 47 #include <sys/devfs_rules.h> 48 #include <sys/udev.h> 49 50 #include <sys/msgport2.h> 51 #include <sys/spinlock2.h> 52 #include <sys/mplock2.h> 53 #include <sys/sysref2.h> 54 55 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 56 DEVFS_DEFINE_CLONE_BITMAP(ops_id); 57 /* 58 * SYSREF Integration - reference counting, allocation, 59 * sysid and syslink integration. 60 */ 61 static void devfs_cdev_terminate(cdev_t dev); 62 static void devfs_cdev_lock(cdev_t dev); 63 static void devfs_cdev_unlock(cdev_t dev); 64 static struct sysref_class cdev_sysref_class = { 65 .name = "cdev", 66 .mtype = M_DEVFS, 67 .proto = SYSREF_PROTO_DEV, 68 .offset = offsetof(struct cdev, si_sysref), 69 .objsize = sizeof(struct cdev), 70 .nom_cache = 32, 71 .flags = 0, 72 .ops = { 73 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate, 74 .lock = (sysref_lock_func_t)devfs_cdev_lock, 75 .unlock = (sysref_unlock_func_t)devfs_cdev_unlock 76 } 77 }; 78 79 static struct objcache *devfs_node_cache; 80 static struct objcache *devfs_msg_cache; 81 static struct objcache *devfs_dev_cache; 82 83 static struct objcache_malloc_args devfs_node_malloc_args = { 84 sizeof(struct devfs_node), M_DEVFS }; 85 struct objcache_malloc_args devfs_msg_malloc_args = { 86 sizeof(struct devfs_msg), M_DEVFS }; 87 struct objcache_malloc_args devfs_dev_malloc_args = { 88 sizeof(struct cdev), M_DEVFS }; 89 90 static struct devfs_dev_head devfs_dev_list = 91 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 92 static struct devfs_mnt_head devfs_mnt_list = 93 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 94 static struct devfs_chandler_head devfs_chandler_list = 95 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 96 static struct devfs_alias_head devfs_alias_list = 97 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 98 static struct devfs_dev_ops_head devfs_dev_ops_list = 99 TAILQ_HEAD_INITIALIZER(devfs_dev_ops_list); 100 101 struct lock devfs_lock; 102 static struct lwkt_port devfs_dispose_port; 103 static struct lwkt_port devfs_msg_port; 104 static struct thread *td_core; 105 106 static struct spinlock ino_lock; 107 static ino_t d_ino; 108 static int devfs_debug_enable; 109 static int devfs_run; 110 111 static ino_t devfs_fetch_ino(void); 112 static int devfs_create_all_dev_worker(struct devfs_node *); 113 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 114 static int devfs_destroy_dev_worker(cdev_t); 115 static int devfs_destroy_related_worker(cdev_t); 116 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 117 static int devfs_propagate_dev(cdev_t, int); 118 static int devfs_unlink_dev(cdev_t dev); 119 static void devfs_msg_exec(devfs_msg_t msg); 120 121 static int devfs_chandler_add_worker(const char *, d_clone_t *); 122 static int devfs_chandler_del_worker(const char *); 123 124 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 125 static void devfs_msg_core(void *); 126 127 static int devfs_find_device_by_name_worker(devfs_msg_t); 128 static int devfs_find_device_by_udev_worker(devfs_msg_t); 129 130 static int devfs_apply_reset_rules_caller(char *, int); 131 132 static int devfs_scan_callback_worker(devfs_scan_t *, void *); 133 134 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 135 char *, size_t, int); 136 137 static int devfs_make_alias_worker(struct devfs_alias *); 138 static int devfs_destroy_alias_worker(struct devfs_alias *); 139 static int devfs_alias_remove(cdev_t); 140 static int devfs_alias_reap(void); 141 static int devfs_alias_propagate(struct devfs_alias *, int); 142 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 143 static int devfs_alias_check_create(struct devfs_node *); 144 145 static int devfs_clr_related_flag_worker(cdev_t, uint32_t); 146 static int devfs_destroy_related_without_flag_worker(cdev_t, uint32_t); 147 148 static void *devfs_reaperp_callback(struct devfs_node *, void *); 149 static void *devfs_gc_dirs_callback(struct devfs_node *, void *); 150 static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *); 151 static void * 152 devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *); 153 154 /* 155 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 156 * using kvprintf 157 */ 158 int 159 devfs_debug(int level, char *fmt, ...) 160 { 161 __va_list ap; 162 163 __va_start(ap, fmt); 164 if (level <= devfs_debug_enable) 165 kvprintf(fmt, ap); 166 __va_end(ap); 167 168 return 0; 169 } 170 171 /* 172 * devfs_allocp() Allocates a new devfs node with the specified 173 * parameters. The node is also automatically linked into the topology 174 * if a parent is specified. It also calls the rule and alias stuff to 175 * be applied on the new node 176 */ 177 struct devfs_node * 178 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 179 struct devfs_node *parent, struct mount *mp, cdev_t dev) 180 { 181 struct devfs_node *node = NULL; 182 size_t namlen = strlen(name); 183 184 node = objcache_get(devfs_node_cache, M_WAITOK); 185 bzero(node, sizeof(*node)); 186 187 atomic_add_long(&DEVFS_MNTDATA(mp)->leak_count, 1); 188 189 node->d_dev = NULL; 190 node->nchildren = 1; 191 node->mp = mp; 192 node->d_dir.d_ino = devfs_fetch_ino(); 193 194 /* 195 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 196 * respectively. 197 */ 198 node->cookie_jar = 2; 199 200 /* 201 * Access Control members 202 */ 203 node->mode = DEVFS_DEFAULT_MODE; 204 node->uid = DEVFS_DEFAULT_UID; 205 node->gid = DEVFS_DEFAULT_GID; 206 207 switch (devfsnodetype) { 208 case Nroot: 209 /* 210 * Ensure that we don't recycle the root vnode by marking it as 211 * linked into the topology. 212 */ 213 node->flags |= DEVFS_NODE_LINKED; 214 case Ndir: 215 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 216 node->d_dir.d_type = DT_DIR; 217 node->nchildren = 2; 218 break; 219 220 case Nlink: 221 node->d_dir.d_type = DT_LNK; 222 break; 223 224 case Nreg: 225 node->d_dir.d_type = DT_REG; 226 break; 227 228 case Ndev: 229 if (dev != NULL) { 230 node->d_dir.d_type = DT_CHR; 231 node->d_dev = dev; 232 233 node->mode = dev->si_perms; 234 node->uid = dev->si_uid; 235 node->gid = dev->si_gid; 236 237 devfs_alias_check_create(node); 238 } 239 break; 240 241 default: 242 panic("devfs_allocp: unknown node type"); 243 } 244 245 node->v_node = NULL; 246 node->node_type = devfsnodetype; 247 248 /* Initialize the dirent structure of each devfs vnode */ 249 node->d_dir.d_namlen = namlen; 250 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 251 memcpy(node->d_dir.d_name, name, namlen); 252 node->d_dir.d_name[namlen] = '\0'; 253 254 /* Initialize the parent node element */ 255 node->parent = parent; 256 257 /* Initialize *time members */ 258 nanotime(&node->atime); 259 node->mtime = node->ctime = node->atime; 260 261 /* 262 * Associate with parent as last step, clean out namecache 263 * reference. 264 */ 265 if ((parent != NULL) && 266 ((parent->node_type == Nroot) || (parent->node_type == Ndir))) { 267 parent->nchildren++; 268 node->cookie = parent->cookie_jar++; 269 node->flags |= DEVFS_NODE_LINKED; 270 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 271 272 /* This forces negative namecache lookups to clear */ 273 ++mp->mnt_namecache_gen; 274 } 275 276 /* 277 * Apply rules (requires root node, skip if we are creating the root node) 278 */ 279 if (DEVFS_MNTDATA(mp)->root_node) 280 devfs_rule_check_apply(node, NULL); 281 282 atomic_add_long(&DEVFS_MNTDATA(mp)->file_count, 1); 283 284 return node; 285 } 286 287 /* 288 * devfs_allocv() allocates a new vnode based on a devfs node. 289 */ 290 int 291 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 292 { 293 struct vnode *vp; 294 int error = 0; 295 296 KKASSERT(node); 297 298 /* 299 * devfs master lock must not be held across a vget() call, we have 300 * to hold our ad-hoc vp to avoid a free race from destroying the 301 * contents of the structure. The vget() will interlock recycles 302 * for us. 303 */ 304 try_again: 305 while ((vp = node->v_node) != NULL) { 306 vhold(vp); 307 lockmgr(&devfs_lock, LK_RELEASE); 308 error = vget(vp, LK_EXCLUSIVE); 309 vdrop(vp); 310 lockmgr(&devfs_lock, LK_EXCLUSIVE); 311 if (error == 0) { 312 *vpp = vp; 313 goto out; 314 } 315 if (error != ENOENT) { 316 *vpp = NULL; 317 goto out; 318 } 319 } 320 321 /* 322 * devfs master lock must not be held across a getnewvnode() call. 323 */ 324 lockmgr(&devfs_lock, LK_RELEASE); 325 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) { 326 lockmgr(&devfs_lock, LK_EXCLUSIVE); 327 goto out; 328 } 329 lockmgr(&devfs_lock, LK_EXCLUSIVE); 330 331 vp = *vpp; 332 333 if (node->v_node != NULL) { 334 vp->v_type = VBAD; 335 vx_put(vp); 336 goto try_again; 337 } 338 339 vp->v_data = node; 340 node->v_node = vp; 341 342 switch (node->node_type) { 343 case Nroot: 344 vsetflags(vp, VROOT); 345 /* fall through */ 346 case Ndir: 347 vp->v_type = VDIR; 348 break; 349 350 case Nlink: 351 vp->v_type = VLNK; 352 break; 353 354 case Nreg: 355 vp->v_type = VREG; 356 break; 357 358 case Ndev: 359 vp->v_type = VCHR; 360 KKASSERT(node->d_dev); 361 362 vp->v_uminor = node->d_dev->si_uminor; 363 vp->v_umajor = node->d_dev->si_umajor; 364 365 v_associate_rdev(vp, node->d_dev); 366 vp->v_ops = &node->mp->mnt_vn_spec_ops; 367 break; 368 369 default: 370 panic("devfs_allocv: unknown node type"); 371 } 372 373 out: 374 return error; 375 } 376 377 /* 378 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 379 * based on the newly created devfs node. 380 */ 381 int 382 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 383 char *name, struct devfs_node *parent, cdev_t dev) 384 { 385 struct devfs_node *node; 386 387 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 388 389 if (node != NULL) 390 devfs_allocv(vpp, node); 391 else 392 *vpp = NULL; 393 394 return 0; 395 } 396 397 /* 398 * Destroy the devfs_node. The node must be unlinked from the topology. 399 * 400 * This function will also destroy any vnode association with the node 401 * and device. 402 * 403 * The cdev_t itself remains intact. 404 * 405 * The core lock is not necessarily held on call and must be temporarily 406 * released if it is to avoid a deadlock. 407 */ 408 int 409 devfs_freep(struct devfs_node *node) 410 { 411 struct vnode *vp; 412 int relock; 413 414 KKASSERT(node); 415 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || 416 (node->node_type == Nroot)); 417 418 /* 419 * Protect against double frees 420 */ 421 KKASSERT((node->flags & DEVFS_DESTROYED) == 0); 422 node->flags |= DEVFS_DESTROYED; 423 424 /* 425 * Avoid deadlocks between devfs_lock and the vnode lock when 426 * disassociating the vnode (stress2 pty vs ls -la /dev/pts). 427 * 428 * This also prevents the vnode reclaim code from double-freeing 429 * the node. The vget() is required to safely modified the vp 430 * and cycle the refs to terminate an inactive vp. 431 */ 432 if (lockstatus(&devfs_lock, curthread) == LK_EXCLUSIVE) { 433 lockmgr(&devfs_lock, LK_RELEASE); 434 relock = 1; 435 } else { 436 relock = 0; 437 } 438 439 while ((vp = node->v_node) != NULL) { 440 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0) 441 break; 442 v_release_rdev(vp); 443 vp->v_data = NULL; 444 node->v_node = NULL; 445 vput(vp); 446 } 447 448 /* 449 * Remaining cleanup 450 */ 451 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->leak_count, 1); 452 if (node->symlink_name) { 453 kfree(node->symlink_name, M_DEVFS); 454 node->symlink_name = NULL; 455 } 456 457 /* 458 * Remove the node from the orphan list if it is still on it. 459 */ 460 if (node->flags & DEVFS_ORPHANED) 461 devfs_tracer_del_orphan(node); 462 463 if (node->d_dir.d_name) { 464 kfree(node->d_dir.d_name, M_DEVFS); 465 node->d_dir.d_name = NULL; 466 } 467 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->file_count, 1); 468 objcache_put(devfs_node_cache, node); 469 470 if (relock) 471 lockmgr(&devfs_lock, LK_EXCLUSIVE); 472 473 return 0; 474 } 475 476 /* 477 * Returns a valid vp associated with the devfs alias node or NULL 478 */ 479 static void *devfs_alias_getvp(struct devfs_node *node) 480 { 481 struct devfs_node *found = node; 482 int depth = 0; 483 484 while ((found->node_type == Nlink) && (found->link_target)) { 485 if (depth >= 8) { 486 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 487 break; 488 } 489 490 found = found->link_target; 491 ++depth; 492 } 493 494 return found->v_node; 495 } 496 497 /* 498 * Unlink the devfs node from the topology and add it to the orphan list. 499 * The node will later be destroyed by freep. 500 * 501 * Any vnode association, including the v_rdev and v_data, remains intact 502 * until the freep. 503 */ 504 int 505 devfs_unlinkp(struct devfs_node *node) 506 { 507 struct vnode *vp; 508 struct devfs_node *parent; 509 KKASSERT(node); 510 511 /* 512 * Add the node to the orphan list, so it is referenced somewhere, to 513 * so we don't leak it. 514 */ 515 devfs_tracer_add_orphan(node); 516 517 parent = node->parent; 518 519 /* 520 * If the parent is known we can unlink the node out of the topology 521 */ 522 if (parent) { 523 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 524 parent->nchildren--; 525 node->flags &= ~DEVFS_NODE_LINKED; 526 } 527 528 node->parent = NULL; 529 530 /* 531 * Namecache invalidation. 532 * devfs alias nodes are special: their v_node entry is always null 533 * and they use the one from their link target. 534 * We thus use the target node's vp to invalidate both alias and target 535 * entries in the namecache. 536 * Doing so for the target is not necessary but it would be more 537 * expensive to resolve only the namecache entry of the alias node 538 * from the information available in this function. 539 */ 540 if (node->node_type == Nlink) 541 vp = devfs_alias_getvp(node); 542 else 543 vp = node->v_node; 544 545 if (vp != NULL) 546 cache_inval_vp(vp, CINV_DESTROY); 547 548 return 0; 549 } 550 551 void * 552 devfs_iterate_topology(struct devfs_node *node, 553 devfs_iterate_callback_t *callback, void *arg1) 554 { 555 struct devfs_node *node1, *node2; 556 void *ret = NULL; 557 558 if ((node->node_type == Nroot) || (node->node_type == Ndir)) { 559 if (node->nchildren > 2) { 560 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 561 link, node2) { 562 if ((ret = devfs_iterate_topology(node1, callback, arg1))) 563 return ret; 564 } 565 } 566 } 567 568 ret = callback(node, arg1); 569 return ret; 570 } 571 572 static void * 573 devfs_alias_reaper_callback(struct devfs_node *node, void *unused) 574 { 575 if (node->node_type == Nlink) { 576 devfs_unlinkp(node); 577 devfs_freep(node); 578 } 579 580 return NULL; 581 } 582 583 /* 584 * devfs_reaperp() is a recursive function that iterates through all the 585 * topology, unlinking and freeing all devfs nodes. 586 */ 587 static void * 588 devfs_reaperp_callback(struct devfs_node *node, void *unused) 589 { 590 devfs_unlinkp(node); 591 devfs_freep(node); 592 593 return NULL; 594 } 595 596 static void * 597 devfs_gc_dirs_callback(struct devfs_node *node, void *unused) 598 { 599 if (node->node_type == Ndir) { 600 if ((node->nchildren == 2) && 601 !(node->flags & DEVFS_USER_CREATED)) { 602 devfs_unlinkp(node); 603 devfs_freep(node); 604 } 605 } 606 607 return NULL; 608 } 609 610 static void * 611 devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target) 612 { 613 if ((node->node_type == Nlink) && (node->link_target == target)) { 614 devfs_unlinkp(node); 615 devfs_freep(node); 616 } 617 618 return NULL; 619 } 620 621 /* 622 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 623 * freeing a node, but also removes empty directories and links that link 624 * via devfs auto-link mechanism to the node being deleted. 625 */ 626 int 627 devfs_gc(struct devfs_node *node) 628 { 629 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 630 631 if (node->nlinks > 0) 632 devfs_iterate_topology(root_node, 633 (devfs_iterate_callback_t *)devfs_gc_links_callback, node); 634 635 devfs_unlinkp(node); 636 devfs_iterate_topology(root_node, 637 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL); 638 639 devfs_freep(node); 640 641 return 0; 642 } 643 644 /* 645 * devfs_create_dev() is the asynchronous entry point for device creation. 646 * It just sends a message with the relevant details to the devfs core. 647 * 648 * This function will reference the passed device. The reference is owned 649 * by devfs and represents all of the device's node associations. 650 */ 651 int 652 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 653 { 654 reference_dev(dev); 655 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 656 657 return 0; 658 } 659 660 /* 661 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 662 * It just sends a message with the relevant details to the devfs core. 663 */ 664 int 665 devfs_destroy_dev(cdev_t dev) 666 { 667 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 668 return 0; 669 } 670 671 /* 672 * devfs_mount_add() is the synchronous entry point for adding a new devfs 673 * mount. It sends a synchronous message with the relevant details to the 674 * devfs core. 675 */ 676 int 677 devfs_mount_add(struct devfs_mnt_data *mnt) 678 { 679 devfs_msg_t msg; 680 681 msg = devfs_msg_get(); 682 msg->mdv_mnt = mnt; 683 devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 684 devfs_msg_put(msg); 685 686 return 0; 687 } 688 689 /* 690 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 691 * It sends a synchronous message with the relevant details to the devfs core. 692 */ 693 int 694 devfs_mount_del(struct devfs_mnt_data *mnt) 695 { 696 devfs_msg_t msg; 697 698 msg = devfs_msg_get(); 699 msg->mdv_mnt = mnt; 700 devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 701 devfs_msg_put(msg); 702 703 return 0; 704 } 705 706 /* 707 * devfs_destroy_related() is the synchronous entry point for device 708 * destruction by subname. It just sends a message with the relevant details to 709 * the devfs core. 710 */ 711 int 712 devfs_destroy_related(cdev_t dev) 713 { 714 devfs_msg_t msg; 715 716 msg = devfs_msg_get(); 717 msg->mdv_load = dev; 718 devfs_msg_send_sync(DEVFS_DESTROY_RELATED, msg); 719 devfs_msg_put(msg); 720 return 0; 721 } 722 723 int 724 devfs_clr_related_flag(cdev_t dev, uint32_t flag) 725 { 726 devfs_msg_t msg; 727 728 msg = devfs_msg_get(); 729 msg->mdv_flags.dev = dev; 730 msg->mdv_flags.flag = flag; 731 devfs_msg_send_sync(DEVFS_CLR_RELATED_FLAG, msg); 732 devfs_msg_put(msg); 733 734 return 0; 735 } 736 737 int 738 devfs_destroy_related_without_flag(cdev_t dev, uint32_t flag) 739 { 740 devfs_msg_t msg; 741 742 msg = devfs_msg_get(); 743 msg->mdv_flags.dev = dev; 744 msg->mdv_flags.flag = flag; 745 devfs_msg_send_sync(DEVFS_DESTROY_RELATED_WO_FLAG, msg); 746 devfs_msg_put(msg); 747 748 return 0; 749 } 750 751 /* 752 * devfs_create_all_dev is the asynchronous entry point to trigger device 753 * node creation. It just sends a message with the relevant details to 754 * the devfs core. 755 */ 756 int 757 devfs_create_all_dev(struct devfs_node *root) 758 { 759 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 760 return 0; 761 } 762 763 /* 764 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 765 * devices with a specific set of dev_ops and minor. It just sends a 766 * message with the relevant details to the devfs core. 767 */ 768 int 769 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 770 { 771 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 772 return 0; 773 } 774 775 /* 776 * devfs_clone_handler_add is the synchronous entry point to add a new 777 * clone handler. It just sends a message with the relevant details to 778 * the devfs core. 779 */ 780 int 781 devfs_clone_handler_add(const char *name, d_clone_t *nhandler) 782 { 783 devfs_msg_t msg; 784 785 msg = devfs_msg_get(); 786 msg->mdv_chandler.name = name; 787 msg->mdv_chandler.nhandler = nhandler; 788 devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 789 devfs_msg_put(msg); 790 return 0; 791 } 792 793 /* 794 * devfs_clone_handler_del is the synchronous entry point to remove a 795 * clone handler. It just sends a message with the relevant details to 796 * the devfs core. 797 */ 798 int 799 devfs_clone_handler_del(const char *name) 800 { 801 devfs_msg_t msg; 802 803 msg = devfs_msg_get(); 804 msg->mdv_chandler.name = name; 805 msg->mdv_chandler.nhandler = NULL; 806 devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 807 devfs_msg_put(msg); 808 return 0; 809 } 810 811 /* 812 * devfs_find_device_by_name is the synchronous entry point to find a 813 * device given its name. It sends a synchronous message with the 814 * relevant details to the devfs core and returns the answer. 815 */ 816 cdev_t 817 devfs_find_device_by_name(const char *fmt, ...) 818 { 819 cdev_t found = NULL; 820 devfs_msg_t msg; 821 char *target; 822 __va_list ap; 823 824 if (fmt == NULL) 825 return NULL; 826 827 __va_start(ap, fmt); 828 kvasnrprintf(&target, PATH_MAX, 10, fmt, ap); 829 __va_end(ap); 830 831 msg = devfs_msg_get(); 832 msg->mdv_name = target; 833 devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 834 found = msg->mdv_cdev; 835 devfs_msg_put(msg); 836 kvasfree(&target); 837 838 return found; 839 } 840 841 /* 842 * devfs_find_device_by_udev is the synchronous entry point to find a 843 * device given its udev number. It sends a synchronous message with 844 * the relevant details to the devfs core and returns the answer. 845 */ 846 cdev_t 847 devfs_find_device_by_udev(udev_t udev) 848 { 849 cdev_t found = NULL; 850 devfs_msg_t msg; 851 852 msg = devfs_msg_get(); 853 msg->mdv_udev = udev; 854 devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 855 found = msg->mdv_cdev; 856 devfs_msg_put(msg); 857 858 devfs_debug(DEVFS_DEBUG_DEBUG, 859 "devfs_find_device_by_udev found? %s -end:3-\n", 860 ((found) ? found->si_name:"NO")); 861 return found; 862 } 863 864 struct vnode * 865 devfs_inode_to_vnode(struct mount *mp, ino_t target) 866 { 867 struct vnode *vp = NULL; 868 devfs_msg_t msg; 869 870 if (mp == NULL) 871 return NULL; 872 873 msg = devfs_msg_get(); 874 msg->mdv_ino.mp = mp; 875 msg->mdv_ino.ino = target; 876 devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 877 vp = msg->mdv_ino.vp; 878 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 879 devfs_msg_put(msg); 880 881 return vp; 882 } 883 884 /* 885 * devfs_make_alias is the asynchronous entry point to register an alias 886 * for a device. It just sends a message with the relevant details to the 887 * devfs core. 888 */ 889 int 890 devfs_make_alias(const char *name, cdev_t dev_target) 891 { 892 struct devfs_alias *alias; 893 size_t len; 894 895 len = strlen(name); 896 897 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 898 alias->name = kstrdup(name, M_DEVFS); 899 alias->namlen = len; 900 alias->dev_target = dev_target; 901 902 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 903 return 0; 904 } 905 906 /* 907 * devfs_destroy_alias is the asynchronous entry point to deregister an alias 908 * for a device. It just sends a message with the relevant details to the 909 * devfs core. 910 */ 911 int 912 devfs_destroy_alias(const char *name, cdev_t dev_target) 913 { 914 struct devfs_alias *alias; 915 size_t len; 916 917 len = strlen(name); 918 919 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 920 alias->name = kstrdup(name, M_DEVFS); 921 alias->namlen = len; 922 alias->dev_target = dev_target; 923 924 devfs_msg_send_generic(DEVFS_DESTROY_ALIAS, alias); 925 return 0; 926 } 927 928 /* 929 * devfs_apply_rules is the asynchronous entry point to trigger application 930 * of all rules. It just sends a message with the relevant details to the 931 * devfs core. 932 */ 933 int 934 devfs_apply_rules(char *mntto) 935 { 936 char *new_name; 937 938 new_name = kstrdup(mntto, M_DEVFS); 939 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 940 941 return 0; 942 } 943 944 /* 945 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 946 * rules. It just sends a message with the relevant details to the devfs core. 947 */ 948 int 949 devfs_reset_rules(char *mntto) 950 { 951 char *new_name; 952 953 new_name = kstrdup(mntto, M_DEVFS); 954 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 955 956 return 0; 957 } 958 959 960 /* 961 * devfs_scan_callback is the asynchronous entry point to call a callback 962 * on all cdevs. 963 * It just sends a message with the relevant details to the devfs core. 964 */ 965 int 966 devfs_scan_callback(devfs_scan_t *callback, void *arg) 967 { 968 devfs_msg_t msg; 969 970 KKASSERT(callback); 971 972 msg = devfs_msg_get(); 973 msg->mdv_load = callback; 974 msg->mdv_load2 = arg; 975 devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 976 devfs_msg_put(msg); 977 978 return 0; 979 } 980 981 982 /* 983 * Acts as a message drain. Any message that is replied to here gets destroyed 984 * and the memory freed. 985 */ 986 static void 987 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 988 { 989 devfs_msg_put((devfs_msg_t)msg); 990 } 991 992 /* 993 * devfs_msg_get allocates a new devfs msg and returns it. 994 */ 995 devfs_msg_t 996 devfs_msg_get(void) 997 { 998 return objcache_get(devfs_msg_cache, M_WAITOK); 999 } 1000 1001 /* 1002 * devfs_msg_put deallocates a given devfs msg. 1003 */ 1004 int 1005 devfs_msg_put(devfs_msg_t msg) 1006 { 1007 objcache_put(devfs_msg_cache, msg); 1008 return 0; 1009 } 1010 1011 /* 1012 * devfs_msg_send is the generic asynchronous message sending facility 1013 * for devfs. By default the reply port is the automatic disposal port. 1014 * 1015 * If the current thread is the devfs_msg_port thread we execute the 1016 * operation synchronously. 1017 */ 1018 void 1019 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 1020 { 1021 lwkt_port_t port = &devfs_msg_port; 1022 1023 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 1024 1025 devfs_msg->hdr.u.ms_result = cmd; 1026 1027 if (port->mpu_td == curthread) { 1028 devfs_msg_exec(devfs_msg); 1029 lwkt_replymsg(&devfs_msg->hdr, 0); 1030 } else { 1031 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 1032 } 1033 } 1034 1035 /* 1036 * devfs_msg_send_sync is the generic synchronous message sending 1037 * facility for devfs. It initializes a local reply port and waits 1038 * for the core's answer. The core will write the answer on the same 1039 * message which is sent back as reply. The caller still has a reference 1040 * to the message, so we don't need to return it. 1041 */ 1042 int 1043 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 1044 { 1045 struct lwkt_port rep_port; 1046 int error; 1047 lwkt_port_t port = &devfs_msg_port; 1048 1049 lwkt_initport_thread(&rep_port, curthread); 1050 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 1051 1052 devfs_msg->hdr.u.ms_result = cmd; 1053 1054 error = lwkt_domsg(port, (lwkt_msg_t)devfs_msg, 0); 1055 1056 return error; 1057 } 1058 1059 /* 1060 * sends a message with a generic argument. 1061 */ 1062 void 1063 devfs_msg_send_generic(uint32_t cmd, void *load) 1064 { 1065 devfs_msg_t devfs_msg = devfs_msg_get(); 1066 1067 devfs_msg->mdv_load = load; 1068 devfs_msg_send(cmd, devfs_msg); 1069 } 1070 1071 /* 1072 * sends a message with a name argument. 1073 */ 1074 void 1075 devfs_msg_send_name(uint32_t cmd, char *name) 1076 { 1077 devfs_msg_t devfs_msg = devfs_msg_get(); 1078 1079 devfs_msg->mdv_name = name; 1080 devfs_msg_send(cmd, devfs_msg); 1081 } 1082 1083 /* 1084 * sends a message with a mount argument. 1085 */ 1086 void 1087 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 1088 { 1089 devfs_msg_t devfs_msg = devfs_msg_get(); 1090 1091 devfs_msg->mdv_mnt = mnt; 1092 devfs_msg_send(cmd, devfs_msg); 1093 } 1094 1095 /* 1096 * sends a message with an ops argument. 1097 */ 1098 void 1099 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 1100 { 1101 devfs_msg_t devfs_msg = devfs_msg_get(); 1102 1103 devfs_msg->mdv_ops.ops = ops; 1104 devfs_msg->mdv_ops.minor = minor; 1105 devfs_msg_send(cmd, devfs_msg); 1106 } 1107 1108 /* 1109 * sends a message with a clone handler argument. 1110 */ 1111 void 1112 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 1113 { 1114 devfs_msg_t devfs_msg = devfs_msg_get(); 1115 1116 devfs_msg->mdv_chandler.name = name; 1117 devfs_msg->mdv_chandler.nhandler = handler; 1118 devfs_msg_send(cmd, devfs_msg); 1119 } 1120 1121 /* 1122 * sends a message with a device argument. 1123 */ 1124 void 1125 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 1126 { 1127 devfs_msg_t devfs_msg = devfs_msg_get(); 1128 1129 devfs_msg->mdv_dev.dev = dev; 1130 devfs_msg->mdv_dev.uid = uid; 1131 devfs_msg->mdv_dev.gid = gid; 1132 devfs_msg->mdv_dev.perms = perms; 1133 1134 devfs_msg_send(cmd, devfs_msg); 1135 } 1136 1137 /* 1138 * sends a message with a link argument. 1139 */ 1140 void 1141 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1142 { 1143 devfs_msg_t devfs_msg = devfs_msg_get(); 1144 1145 devfs_msg->mdv_link.name = name; 1146 devfs_msg->mdv_link.target = target; 1147 devfs_msg->mdv_link.mp = mp; 1148 devfs_msg_send(cmd, devfs_msg); 1149 } 1150 1151 /* 1152 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1153 * and calls the relevant worker functions. By using messages it's assured 1154 * that events occur in the correct order. 1155 */ 1156 static void 1157 devfs_msg_core(void *arg) 1158 { 1159 devfs_msg_t msg; 1160 1161 lwkt_initport_thread(&devfs_msg_port, curthread); 1162 1163 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1164 devfs_run = 1; 1165 wakeup(td_core); 1166 lockmgr(&devfs_lock, LK_RELEASE); 1167 1168 get_mplock(); /* mpsafe yet? */ 1169 1170 while (devfs_run) { 1171 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1172 devfs_debug(DEVFS_DEBUG_DEBUG, 1173 "devfs_msg_core, new msg: %x\n", 1174 (unsigned int)msg->hdr.u.ms_result); 1175 devfs_msg_exec(msg); 1176 lwkt_replymsg(&msg->hdr, 0); 1177 } 1178 1179 rel_mplock(); 1180 wakeup(td_core); 1181 1182 lwkt_exit(); 1183 } 1184 1185 static void 1186 devfs_msg_exec(devfs_msg_t msg) 1187 { 1188 struct devfs_mnt_data *mnt; 1189 struct devfs_node *node; 1190 cdev_t dev; 1191 1192 /* 1193 * Acquire the devfs lock to ensure safety of all called functions 1194 */ 1195 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1196 1197 switch (msg->hdr.u.ms_result) { 1198 case DEVFS_DEVICE_CREATE: 1199 dev = msg->mdv_dev.dev; 1200 devfs_create_dev_worker(dev, 1201 msg->mdv_dev.uid, 1202 msg->mdv_dev.gid, 1203 msg->mdv_dev.perms); 1204 break; 1205 case DEVFS_DEVICE_DESTROY: 1206 dev = msg->mdv_dev.dev; 1207 devfs_destroy_dev_worker(dev); 1208 break; 1209 case DEVFS_DESTROY_RELATED: 1210 devfs_destroy_related_worker(msg->mdv_load); 1211 break; 1212 case DEVFS_DESTROY_DEV_BY_OPS: 1213 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1214 msg->mdv_ops.minor); 1215 break; 1216 case DEVFS_CREATE_ALL_DEV: 1217 node = (struct devfs_node *)msg->mdv_load; 1218 devfs_create_all_dev_worker(node); 1219 break; 1220 case DEVFS_MOUNT_ADD: 1221 mnt = msg->mdv_mnt; 1222 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1223 devfs_create_all_dev_worker(mnt->root_node); 1224 break; 1225 case DEVFS_MOUNT_DEL: 1226 mnt = msg->mdv_mnt; 1227 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1228 /* Be sure to remove all the aliases first */ 1229 devfs_iterate_topology(mnt->root_node, devfs_alias_reaper_callback, 1230 NULL); 1231 devfs_iterate_topology(mnt->root_node, devfs_reaperp_callback, 1232 NULL); 1233 if (mnt->leak_count) { 1234 devfs_debug(DEVFS_DEBUG_SHOW, 1235 "Leaked %ld devfs_node elements!\n", 1236 mnt->leak_count); 1237 } 1238 break; 1239 case DEVFS_CHANDLER_ADD: 1240 devfs_chandler_add_worker(msg->mdv_chandler.name, 1241 msg->mdv_chandler.nhandler); 1242 break; 1243 case DEVFS_CHANDLER_DEL: 1244 devfs_chandler_del_worker(msg->mdv_chandler.name); 1245 break; 1246 case DEVFS_FIND_DEVICE_BY_NAME: 1247 devfs_find_device_by_name_worker(msg); 1248 break; 1249 case DEVFS_FIND_DEVICE_BY_UDEV: 1250 devfs_find_device_by_udev_worker(msg); 1251 break; 1252 case DEVFS_MAKE_ALIAS: 1253 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1254 break; 1255 case DEVFS_DESTROY_ALIAS: 1256 devfs_destroy_alias_worker((struct devfs_alias *)msg->mdv_load); 1257 break; 1258 case DEVFS_APPLY_RULES: 1259 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1260 break; 1261 case DEVFS_RESET_RULES: 1262 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1263 break; 1264 case DEVFS_SCAN_CALLBACK: 1265 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load, 1266 msg->mdv_load2); 1267 break; 1268 case DEVFS_CLR_RELATED_FLAG: 1269 devfs_clr_related_flag_worker(msg->mdv_flags.dev, 1270 msg->mdv_flags.flag); 1271 break; 1272 case DEVFS_DESTROY_RELATED_WO_FLAG: 1273 devfs_destroy_related_without_flag_worker(msg->mdv_flags.dev, 1274 msg->mdv_flags.flag); 1275 break; 1276 case DEVFS_INODE_TO_VNODE: 1277 msg->mdv_ino.vp = devfs_iterate_topology( 1278 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1279 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback, 1280 &msg->mdv_ino.ino); 1281 break; 1282 case DEVFS_TERMINATE_CORE: 1283 devfs_run = 0; 1284 break; 1285 case DEVFS_SYNC: 1286 break; 1287 default: 1288 devfs_debug(DEVFS_DEBUG_WARNING, 1289 "devfs_msg_core: unknown message " 1290 "received at core\n"); 1291 break; 1292 } 1293 lockmgr(&devfs_lock, LK_RELEASE); 1294 } 1295 1296 static void 1297 devfs_devctl_notify(cdev_t dev, const char *ev) 1298 { 1299 static const char prefix[] = "cdev="; 1300 char *data; 1301 int namelen; 1302 1303 namelen = strlen(dev->si_name); 1304 data = kmalloc(namelen + sizeof(prefix), M_TEMP, M_WAITOK); 1305 memcpy(data, prefix, sizeof(prefix) - 1); 1306 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 1307 devctl_notify("DEVFS", "CDEV", ev, data); 1308 kfree(data, M_TEMP); 1309 } 1310 1311 /* 1312 * Worker function to insert a new dev into the dev list and initialize its 1313 * permissions. It also calls devfs_propagate_dev which in turn propagates 1314 * the change to all mount points. 1315 * 1316 * The passed dev is already referenced. This reference is eaten by this 1317 * function and represents the dev's linkage into devfs_dev_list. 1318 */ 1319 static int 1320 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1321 { 1322 KKASSERT(dev); 1323 1324 dev->si_uid = uid; 1325 dev->si_gid = gid; 1326 dev->si_perms = perms; 1327 1328 devfs_link_dev(dev); 1329 devfs_propagate_dev(dev, 1); 1330 1331 udev_event_attach(dev, NULL, 0); 1332 devfs_devctl_notify(dev, "CREATE"); 1333 1334 return 0; 1335 } 1336 1337 /* 1338 * Worker function to delete a dev from the dev list and free the cdev. 1339 * It also calls devfs_propagate_dev which in turn propagates the change 1340 * to all mount points. 1341 */ 1342 static int 1343 devfs_destroy_dev_worker(cdev_t dev) 1344 { 1345 int error; 1346 1347 KKASSERT(dev); 1348 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1349 1350 error = devfs_unlink_dev(dev); 1351 devfs_propagate_dev(dev, 0); 1352 1353 devfs_devctl_notify(dev, "DESTROY"); 1354 udev_event_detach(dev, NULL, 0); 1355 1356 if (error == 0) 1357 release_dev(dev); /* link ref */ 1358 release_dev(dev); 1359 release_dev(dev); 1360 1361 return 0; 1362 } 1363 1364 /* 1365 * Worker function to destroy all devices with a certain basename. 1366 * Calls devfs_destroy_dev_worker for the actual destruction. 1367 */ 1368 static int 1369 devfs_destroy_related_worker(cdev_t needle) 1370 { 1371 cdev_t dev; 1372 1373 restart: 1374 devfs_debug(DEVFS_DEBUG_DEBUG, "related worker: %s\n", 1375 needle->si_name); 1376 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1377 if (dev->si_parent == needle) { 1378 devfs_destroy_related_worker(dev); 1379 devfs_destroy_dev_worker(dev); 1380 goto restart; 1381 } 1382 } 1383 return 0; 1384 } 1385 1386 static int 1387 devfs_clr_related_flag_worker(cdev_t needle, uint32_t flag) 1388 { 1389 cdev_t dev, dev1; 1390 1391 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1392 if (dev->si_parent == needle) { 1393 devfs_clr_related_flag_worker(dev, flag); 1394 dev->si_flags &= ~flag; 1395 } 1396 } 1397 1398 return 0; 1399 } 1400 1401 static int 1402 devfs_destroy_related_without_flag_worker(cdev_t needle, uint32_t flag) 1403 { 1404 cdev_t dev; 1405 1406 restart: 1407 devfs_debug(DEVFS_DEBUG_DEBUG, "related_wo_flag: %s\n", 1408 needle->si_name); 1409 1410 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1411 if (dev->si_parent == needle) { 1412 devfs_destroy_related_without_flag_worker(dev, flag); 1413 if (!(dev->si_flags & flag)) { 1414 devfs_destroy_dev_worker(dev); 1415 devfs_debug(DEVFS_DEBUG_DEBUG, 1416 "related_wo_flag: %s restart\n", dev->si_name); 1417 goto restart; 1418 } 1419 } 1420 } 1421 1422 return 0; 1423 } 1424 1425 /* 1426 * Worker function that creates all device nodes on top of a devfs 1427 * root node. 1428 */ 1429 static int 1430 devfs_create_all_dev_worker(struct devfs_node *root) 1431 { 1432 cdev_t dev; 1433 1434 KKASSERT(root); 1435 1436 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1437 devfs_create_device_node(root, dev, NULL, NULL, NULL); 1438 } 1439 1440 return 0; 1441 } 1442 1443 /* 1444 * Worker function that destroys all devices that match a specific 1445 * dev_ops and/or minor. If minor is less than 0, it is not matched 1446 * against. It also propagates all changes. 1447 */ 1448 static int 1449 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1450 { 1451 cdev_t dev, dev1; 1452 1453 KKASSERT(ops); 1454 1455 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1456 if (dev->si_ops != ops) 1457 continue; 1458 if ((minor < 0) || (dev->si_uminor == minor)) { 1459 devfs_destroy_dev_worker(dev); 1460 } 1461 } 1462 1463 return 0; 1464 } 1465 1466 /* 1467 * Worker function that registers a new clone handler in devfs. 1468 */ 1469 static int 1470 devfs_chandler_add_worker(const char *name, d_clone_t *nhandler) 1471 { 1472 struct devfs_clone_handler *chandler = NULL; 1473 u_char len = strlen(name); 1474 1475 if (len == 0) 1476 return 1; 1477 1478 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1479 if (chandler->namlen != len) 1480 continue; 1481 1482 if (!memcmp(chandler->name, name, len)) { 1483 /* Clonable basename already exists */ 1484 return 1; 1485 } 1486 } 1487 1488 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1489 chandler->name = kstrdup(name, M_DEVFS); 1490 chandler->namlen = len; 1491 chandler->nhandler = nhandler; 1492 1493 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1494 return 0; 1495 } 1496 1497 /* 1498 * Worker function that removes a given clone handler from the 1499 * clone handler list. 1500 */ 1501 static int 1502 devfs_chandler_del_worker(const char *name) 1503 { 1504 struct devfs_clone_handler *chandler, *chandler2; 1505 u_char len = strlen(name); 1506 1507 if (len == 0) 1508 return 1; 1509 1510 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1511 if (chandler->namlen != len) 1512 continue; 1513 if (memcmp(chandler->name, name, len)) 1514 continue; 1515 1516 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1517 kfree(chandler->name, M_DEVFS); 1518 kfree(chandler, M_DEVFS); 1519 break; 1520 } 1521 1522 return 0; 1523 } 1524 1525 /* 1526 * Worker function that finds a given device name and changes 1527 * the message received accordingly so that when replied to, 1528 * the answer is returned to the caller. 1529 */ 1530 static int 1531 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1532 { 1533 struct devfs_alias *alias; 1534 cdev_t dev; 1535 cdev_t found = NULL; 1536 1537 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1538 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1539 found = dev; 1540 break; 1541 } 1542 } 1543 if (found == NULL) { 1544 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1545 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1546 found = alias->dev_target; 1547 break; 1548 } 1549 } 1550 } 1551 devfs_msg->mdv_cdev = found; 1552 1553 return 0; 1554 } 1555 1556 /* 1557 * Worker function that finds a given device udev and changes 1558 * the message received accordingly so that when replied to, 1559 * the answer is returned to the caller. 1560 */ 1561 static int 1562 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1563 { 1564 cdev_t dev, dev1; 1565 cdev_t found = NULL; 1566 1567 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1568 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1569 found = dev; 1570 break; 1571 } 1572 } 1573 devfs_msg->mdv_cdev = found; 1574 1575 return 0; 1576 } 1577 1578 /* 1579 * Worker function that inserts a given alias into the 1580 * alias list, and propagates the alias to all mount 1581 * points. 1582 */ 1583 static int 1584 devfs_make_alias_worker(struct devfs_alias *alias) 1585 { 1586 struct devfs_alias *alias2; 1587 size_t len = strlen(alias->name); 1588 int found = 0; 1589 1590 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1591 if (len != alias2->namlen) 1592 continue; 1593 1594 if (!memcmp(alias->name, alias2->name, len)) { 1595 found = 1; 1596 break; 1597 } 1598 } 1599 1600 if (!found) { 1601 /* 1602 * The alias doesn't exist yet, so we add it to the alias list 1603 */ 1604 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1605 devfs_alias_propagate(alias, 0); 1606 udev_event_attach(alias->dev_target, alias->name, 1); 1607 } else { 1608 devfs_debug(DEVFS_DEBUG_WARNING, 1609 "Warning: duplicate devfs_make_alias for %s\n", 1610 alias->name); 1611 kfree(alias->name, M_DEVFS); 1612 kfree(alias, M_DEVFS); 1613 } 1614 1615 return 0; 1616 } 1617 1618 /* 1619 * Worker function that delete a given alias from the 1620 * alias list, and propagates the removal to all mount 1621 * points. 1622 */ 1623 static int 1624 devfs_destroy_alias_worker(struct devfs_alias *alias) 1625 { 1626 struct devfs_alias *alias2; 1627 int found = 0; 1628 1629 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1630 if (alias->dev_target != alias2->dev_target) 1631 continue; 1632 1633 if (devfs_WildCmp(alias->name, alias2->name) == 0) { 1634 found = 1; 1635 break; 1636 } 1637 } 1638 1639 if (!found) { 1640 devfs_debug(DEVFS_DEBUG_WARNING, 1641 "Warning: devfs_destroy_alias for inexistant alias: %s\n", 1642 alias->name); 1643 kfree(alias->name, M_DEVFS); 1644 kfree(alias, M_DEVFS); 1645 } else { 1646 /* 1647 * The alias exists, so we delete it from the alias list 1648 */ 1649 TAILQ_REMOVE(&devfs_alias_list, alias2, link); 1650 devfs_alias_propagate(alias2, 1); 1651 udev_event_detach(alias2->dev_target, alias2->name, 1); 1652 kfree(alias->name, M_DEVFS); 1653 kfree(alias, M_DEVFS); 1654 kfree(alias2->name, M_DEVFS); 1655 kfree(alias2, M_DEVFS); 1656 } 1657 1658 return 0; 1659 } 1660 1661 /* 1662 * Function that removes and frees all aliases. 1663 */ 1664 static int 1665 devfs_alias_reap(void) 1666 { 1667 struct devfs_alias *alias, *alias2; 1668 1669 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1670 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1671 kfree(alias->name, M_DEVFS); 1672 kfree(alias, M_DEVFS); 1673 } 1674 return 0; 1675 } 1676 1677 /* 1678 * Function that removes an alias matching a specific cdev and frees 1679 * it accordingly. 1680 */ 1681 static int 1682 devfs_alias_remove(cdev_t dev) 1683 { 1684 struct devfs_alias *alias, *alias2; 1685 1686 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1687 if (alias->dev_target == dev) { 1688 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1689 udev_event_detach(alias->dev_target, alias->name, 1); 1690 kfree(alias->name, M_DEVFS); 1691 kfree(alias, M_DEVFS); 1692 } 1693 } 1694 return 0; 1695 } 1696 1697 /* 1698 * This function propagates an alias addition or removal to 1699 * all mount points. 1700 */ 1701 static int 1702 devfs_alias_propagate(struct devfs_alias *alias, int remove) 1703 { 1704 struct devfs_mnt_data *mnt; 1705 1706 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1707 if (remove) { 1708 devfs_destroy_node(mnt->root_node, alias->name); 1709 } else { 1710 devfs_alias_apply(mnt->root_node, alias); 1711 } 1712 } 1713 return 0; 1714 } 1715 1716 /* 1717 * This function is a recursive function iterating through 1718 * all device nodes in the topology and, if applicable, 1719 * creating the relevant alias for a device node. 1720 */ 1721 static int 1722 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1723 { 1724 struct devfs_node *node1, *node2; 1725 1726 KKASSERT(alias != NULL); 1727 1728 if ((node->node_type == Nroot) || (node->node_type == Ndir)) { 1729 if (node->nchildren > 2) { 1730 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1731 devfs_alias_apply(node1, alias); 1732 } 1733 } 1734 } else { 1735 if (node->d_dev == alias->dev_target) 1736 devfs_alias_create(alias->name, node, 0); 1737 } 1738 return 0; 1739 } 1740 1741 /* 1742 * This function checks if any alias possibly is applicable 1743 * to the given node. If so, the alias is created. 1744 */ 1745 static int 1746 devfs_alias_check_create(struct devfs_node *node) 1747 { 1748 struct devfs_alias *alias; 1749 1750 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1751 if (node->d_dev == alias->dev_target) 1752 devfs_alias_create(alias->name, node, 0); 1753 } 1754 return 0; 1755 } 1756 1757 /* 1758 * This function creates an alias with a given name 1759 * linking to a given devfs node. It also increments 1760 * the link count on the target node. 1761 */ 1762 int 1763 devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based) 1764 { 1765 struct mount *mp = target->mp; 1766 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1767 struct devfs_node *linknode; 1768 char *create_path = NULL; 1769 char *name; 1770 char *name_buf; 1771 int result = 0; 1772 1773 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1774 1775 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1776 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1777 1778 if (create_path) 1779 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1780 1781 1782 if (devfs_find_device_node_by_name(parent, name)) { 1783 devfs_debug(DEVFS_DEBUG_WARNING, 1784 "Node already exists: %s " 1785 "(devfs_make_alias_worker)!\n", 1786 name); 1787 result = 1; 1788 goto done; 1789 } 1790 1791 linknode = devfs_allocp(Nlink, name, parent, mp, NULL); 1792 if (linknode == NULL) { 1793 result = 1; 1794 goto done; 1795 } 1796 1797 linknode->link_target = target; 1798 target->nlinks++; 1799 1800 if (rule_based) 1801 linknode->flags |= DEVFS_RULE_CREATED; 1802 1803 done: 1804 kfree(name_buf, M_TEMP); 1805 return (result); 1806 } 1807 1808 /* 1809 * This function is called by the core and handles mount point 1810 * strings. It either calls the relevant worker (devfs_apply_ 1811 * reset_rules_worker) on all mountpoints or only a specific 1812 * one. 1813 */ 1814 static int 1815 devfs_apply_reset_rules_caller(char *mountto, int apply) 1816 { 1817 struct devfs_mnt_data *mnt; 1818 1819 if (mountto[0] == '*') { 1820 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1821 devfs_iterate_topology(mnt->root_node, 1822 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1823 NULL); 1824 } 1825 } else { 1826 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1827 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) { 1828 devfs_iterate_topology(mnt->root_node, 1829 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1830 NULL); 1831 break; 1832 } 1833 } 1834 } 1835 1836 kfree(mountto, M_DEVFS); 1837 return 0; 1838 } 1839 1840 /* 1841 * This function calls a given callback function for 1842 * every dev node in the devfs dev list. 1843 */ 1844 static int 1845 devfs_scan_callback_worker(devfs_scan_t *callback, void *arg) 1846 { 1847 cdev_t dev, dev1; 1848 struct devfs_alias *alias, *alias1; 1849 1850 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1851 callback(dev->si_name, dev, false, arg); 1852 } 1853 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias1) { 1854 callback(alias->name, alias->dev_target, true, arg); 1855 } 1856 1857 return 0; 1858 } 1859 1860 /* 1861 * This function tries to resolve a given directory, or if not 1862 * found and creation requested, creates the given directory. 1863 */ 1864 static struct devfs_node * 1865 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1866 size_t name_len, int create) 1867 { 1868 struct devfs_node *node, *found = NULL; 1869 1870 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1871 if (name_len != node->d_dir.d_namlen) 1872 continue; 1873 1874 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1875 found = node; 1876 break; 1877 } 1878 } 1879 1880 if ((found == NULL) && (create)) { 1881 found = devfs_allocp(Ndir, dir_name, parent, parent->mp, NULL); 1882 } 1883 1884 return found; 1885 } 1886 1887 /* 1888 * This function tries to resolve a complete path. If creation is requested, 1889 * if a given part of the path cannot be resolved (because it doesn't exist), 1890 * it is created. 1891 */ 1892 struct devfs_node * 1893 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1894 { 1895 struct devfs_node *node = parent; 1896 char *buf; 1897 size_t idx = 0; 1898 1899 if (path == NULL) 1900 return parent; 1901 1902 buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1903 1904 while (*path && idx < PATH_MAX - 1) { 1905 if (*path != '/') { 1906 buf[idx++] = *path; 1907 } else { 1908 buf[idx] = '\0'; 1909 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1910 if (node == NULL) { 1911 kfree(buf, M_TEMP); 1912 return NULL; 1913 } 1914 idx = 0; 1915 } 1916 ++path; 1917 } 1918 buf[idx] = '\0'; 1919 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1920 kfree (buf, M_TEMP); 1921 return (node); 1922 } 1923 1924 /* 1925 * Takes a full path and strips it into a directory path and a name. 1926 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 1927 * requires a working buffer with enough size to keep the whole 1928 * fullpath. 1929 */ 1930 int 1931 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 1932 { 1933 char *name = NULL; 1934 char *path = NULL; 1935 size_t len = strlen(fullpath) + 1; 1936 int i; 1937 1938 KKASSERT((fullpath != NULL) && (buf != NULL)); 1939 KKASSERT((pathp != NULL) && (namep != NULL)); 1940 1941 memcpy(buf, fullpath, len); 1942 1943 for (i = len-1; i>= 0; i--) { 1944 if (buf[i] == '/') { 1945 buf[i] = '\0'; 1946 name = &(buf[i+1]); 1947 path = buf; 1948 break; 1949 } 1950 } 1951 1952 *pathp = path; 1953 1954 if (name) { 1955 *namep = name; 1956 } else { 1957 *namep = buf; 1958 } 1959 1960 return 0; 1961 } 1962 1963 /* 1964 * This function creates a new devfs node for a given device. It can 1965 * handle a complete path as device name, and accordingly creates 1966 * the path and the final device node. 1967 * 1968 * The reference count on the passed dev remains unchanged. 1969 */ 1970 struct devfs_node * 1971 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 1972 int *existsp, char *dev_name, char *path_fmt, ...) 1973 { 1974 struct devfs_node *parent, *node = NULL; 1975 char *path = NULL; 1976 char *name; 1977 char *name_buf; 1978 __va_list ap; 1979 int i, found; 1980 char *create_path = NULL; 1981 char *names = "pqrsPQRS"; 1982 1983 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1984 1985 if (existsp) 1986 *existsp = 0; 1987 1988 if (path_fmt != NULL) { 1989 __va_start(ap, path_fmt); 1990 kvasnrprintf(&path, PATH_MAX, 10, path_fmt, ap); 1991 __va_end(ap); 1992 } 1993 1994 parent = devfs_resolve_or_create_path(root, path, 1); 1995 KKASSERT(parent); 1996 1997 devfs_resolve_name_path( 1998 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 1999 name_buf, &create_path, &name); 2000 2001 if (create_path) 2002 parent = devfs_resolve_or_create_path(parent, create_path, 1); 2003 2004 2005 node = devfs_find_device_node_by_name(parent, name); 2006 if (node) { 2007 if (node->d_dev == dev) { 2008 /* 2009 * Allow case where device caches dev after the 2010 * close and might desire to reuse it. 2011 */ 2012 if (existsp) 2013 *existsp = 1; 2014 } else { 2015 devfs_debug(DEVFS_DEBUG_WARNING, 2016 "devfs_create_device_node: " 2017 "DEVICE %s ALREADY EXISTS!!! " 2018 "Ignoring creation request.\n", 2019 name); 2020 node = NULL; 2021 } 2022 goto out; 2023 } 2024 2025 node = devfs_allocp(Ndev, name, parent, parent->mp, dev); 2026 nanotime(&parent->mtime); 2027 2028 /* 2029 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 2030 * directory 2031 */ 2032 if ((dev) && (strlen(dev->si_name) >= 4) && 2033 (!memcmp(dev->si_name, "ptm/", 4))) { 2034 node->parent->flags |= DEVFS_HIDDEN; 2035 node->flags |= DEVFS_HIDDEN; 2036 } 2037 2038 /* 2039 * Ugly pty magic, to tag pty devices as such and hide them if needed. 2040 */ 2041 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 2042 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 2043 2044 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 2045 found = 0; 2046 for (i = 0; i < strlen(names); i++) { 2047 if (name[3] == names[i]) { 2048 found = 1; 2049 break; 2050 } 2051 } 2052 if (found) 2053 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 2054 } 2055 2056 out: 2057 kfree(name_buf, M_TEMP); 2058 kvasfree(&path); 2059 return node; 2060 } 2061 2062 /* 2063 * This function finds a given device node in the topology with a given 2064 * cdev. 2065 */ 2066 void * 2067 devfs_find_device_node_callback(struct devfs_node *node, cdev_t target) 2068 { 2069 if ((node->node_type == Ndev) && (node->d_dev == target)) { 2070 return node; 2071 } 2072 2073 return NULL; 2074 } 2075 2076 /* 2077 * This function finds a device node in the given parent directory by its 2078 * name and returns it. 2079 */ 2080 struct devfs_node * 2081 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 2082 { 2083 struct devfs_node *node, *found = NULL; 2084 size_t len = strlen(target); 2085 2086 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 2087 if (len != node->d_dir.d_namlen) 2088 continue; 2089 2090 if (!memcmp(node->d_dir.d_name, target, len)) { 2091 found = node; 2092 break; 2093 } 2094 } 2095 2096 return found; 2097 } 2098 2099 static void * 2100 devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop) 2101 { 2102 struct vnode *vp = NULL; 2103 ino_t target = *inop; 2104 2105 if (node->d_dir.d_ino == target) { 2106 if (node->v_node) { 2107 vp = node->v_node; 2108 vget(vp, LK_EXCLUSIVE | LK_RETRY); 2109 vn_unlock(vp); 2110 } else { 2111 devfs_allocv(&vp, node); 2112 vn_unlock(vp); 2113 } 2114 } 2115 2116 return vp; 2117 } 2118 2119 /* 2120 * This function takes a cdev and removes its devfs node in the 2121 * given topology. The cdev remains intact. 2122 */ 2123 int 2124 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 2125 { 2126 KKASSERT(target != NULL); 2127 return devfs_destroy_node(root, target->si_name); 2128 } 2129 2130 /* 2131 * This function takes a path to a devfs node, resolves it and 2132 * removes the devfs node from the given topology. 2133 */ 2134 int 2135 devfs_destroy_node(struct devfs_node *root, char *target) 2136 { 2137 struct devfs_node *node, *parent; 2138 char *name; 2139 char *name_buf; 2140 char *create_path = NULL; 2141 2142 KKASSERT(target); 2143 2144 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 2145 ksnprintf(name_buf, PATH_MAX, "%s", target); 2146 2147 devfs_resolve_name_path(target, name_buf, &create_path, &name); 2148 2149 if (create_path) 2150 parent = devfs_resolve_or_create_path(root, create_path, 0); 2151 else 2152 parent = root; 2153 2154 if (parent == NULL) { 2155 kfree(name_buf, M_TEMP); 2156 return 1; 2157 } 2158 2159 node = devfs_find_device_node_by_name(parent, name); 2160 2161 if (node) { 2162 nanotime(&node->parent->mtime); 2163 devfs_gc(node); 2164 } 2165 2166 kfree(name_buf, M_TEMP); 2167 2168 return 0; 2169 } 2170 2171 /* 2172 * Just set perms and ownership for given node. 2173 */ 2174 int 2175 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 2176 u_short mode, u_long flags) 2177 { 2178 node->mode = mode; 2179 node->uid = uid; 2180 node->gid = gid; 2181 2182 return 0; 2183 } 2184 2185 /* 2186 * Propagates a device attach/detach to all mount 2187 * points. Also takes care of automatic alias removal 2188 * for a deleted cdev. 2189 */ 2190 static int 2191 devfs_propagate_dev(cdev_t dev, int attach) 2192 { 2193 struct devfs_mnt_data *mnt; 2194 2195 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 2196 if (attach) { 2197 /* Device is being attached */ 2198 devfs_create_device_node(mnt->root_node, dev, 2199 NULL, NULL, NULL); 2200 } else { 2201 /* Device is being detached */ 2202 devfs_alias_remove(dev); 2203 devfs_destroy_device_node(mnt->root_node, dev); 2204 } 2205 } 2206 return 0; 2207 } 2208 2209 /* 2210 * devfs_clone either returns a basename from a complete name by 2211 * returning the length of the name without trailing digits, or, 2212 * if clone != 0, calls the device's clone handler to get a new 2213 * device, which in turn is returned in devp. 2214 */ 2215 cdev_t 2216 devfs_clone(cdev_t dev, const char *name, size_t len, int mode, 2217 struct ucred *cred) 2218 { 2219 int error; 2220 struct devfs_clone_handler *chandler; 2221 struct dev_clone_args ap; 2222 2223 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 2224 if (chandler->namlen != len) 2225 continue; 2226 if ((!memcmp(chandler->name, name, len)) && (chandler->nhandler)) { 2227 lockmgr(&devfs_lock, LK_RELEASE); 2228 devfs_config(); 2229 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2230 2231 ap.a_head.a_dev = dev; 2232 ap.a_dev = NULL; 2233 ap.a_name = name; 2234 ap.a_namelen = len; 2235 ap.a_mode = mode; 2236 ap.a_cred = cred; 2237 error = (chandler->nhandler)(&ap); 2238 if (error) 2239 continue; 2240 2241 return ap.a_dev; 2242 } 2243 } 2244 2245 return NULL; 2246 } 2247 2248 2249 /* 2250 * Registers a new orphan in the orphan list. 2251 */ 2252 void 2253 devfs_tracer_add_orphan(struct devfs_node *node) 2254 { 2255 struct devfs_orphan *orphan; 2256 2257 KKASSERT(node); 2258 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2259 orphan->node = node; 2260 2261 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2262 node->flags |= DEVFS_ORPHANED; 2263 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2264 } 2265 2266 /* 2267 * Removes an orphan from the orphan list. 2268 */ 2269 void 2270 devfs_tracer_del_orphan(struct devfs_node *node) 2271 { 2272 struct devfs_orphan *orphan; 2273 2274 KKASSERT(node); 2275 2276 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2277 if (orphan->node == node) { 2278 node->flags &= ~DEVFS_ORPHANED; 2279 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2280 kfree(orphan, M_DEVFS); 2281 break; 2282 } 2283 } 2284 } 2285 2286 /* 2287 * Counts the orphans in the orphan list, and if cleanup 2288 * is specified, also frees the orphan and removes it from 2289 * the list. 2290 */ 2291 size_t 2292 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2293 { 2294 struct devfs_orphan *orphan, *orphan2; 2295 size_t count = 0; 2296 2297 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2298 count++; 2299 /* 2300 * If we are instructed to clean up, we do so. 2301 */ 2302 if (cleanup) { 2303 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2304 orphan->node->flags &= ~DEVFS_ORPHANED; 2305 devfs_freep(orphan->node); 2306 kfree(orphan, M_DEVFS); 2307 } 2308 } 2309 2310 return count; 2311 } 2312 2313 /* 2314 * Fetch an ino_t from the global d_ino by increasing it 2315 * while spinlocked. 2316 */ 2317 static ino_t 2318 devfs_fetch_ino(void) 2319 { 2320 ino_t ret; 2321 2322 spin_lock(&ino_lock); 2323 ret = d_ino++; 2324 spin_unlock(&ino_lock); 2325 2326 return ret; 2327 } 2328 2329 /* 2330 * Allocates a new cdev and initializes it's most basic 2331 * fields. 2332 */ 2333 cdev_t 2334 devfs_new_cdev(struct dev_ops *ops, int minor, struct dev_ops *bops) 2335 { 2336 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2337 2338 sysref_activate(&dev->si_sysref); 2339 reference_dev(dev); 2340 bzero(dev, offsetof(struct cdev, si_sysref)); 2341 2342 dev->si_uid = 0; 2343 dev->si_gid = 0; 2344 dev->si_perms = 0; 2345 dev->si_drv1 = NULL; 2346 dev->si_drv2 = NULL; 2347 dev->si_lastread = 0; /* time_uptime */ 2348 dev->si_lastwrite = 0; /* time_uptime */ 2349 2350 dev->si_dict = NULL; 2351 dev->si_parent = NULL; 2352 dev->si_ops = ops; 2353 dev->si_flags = 0; 2354 dev->si_uminor = minor; 2355 dev->si_bops = bops; 2356 2357 /* 2358 * Since the disk subsystem is in the way, we need to 2359 * propagate the D_CANFREE from bops (and ops) to 2360 * si_flags. 2361 */ 2362 if (bops && (bops->head.flags & D_CANFREE)) { 2363 dev->si_flags |= SI_CANFREE; 2364 } else if (ops->head.flags & D_CANFREE) { 2365 dev->si_flags |= SI_CANFREE; 2366 } 2367 2368 /* If there is a backing device, we reference its ops */ 2369 dev->si_inode = makeudev( 2370 devfs_reference_ops((bops)?(bops):(ops)), 2371 minor ); 2372 dev->si_umajor = umajor(dev->si_inode); 2373 2374 return dev; 2375 } 2376 2377 static void 2378 devfs_cdev_terminate(cdev_t dev) 2379 { 2380 int locked = 0; 2381 2382 /* Check if it is locked already. if not, we acquire the devfs lock */ 2383 if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 2384 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2385 locked = 1; 2386 } 2387 2388 /* 2389 * Make sure the node isn't linked anymore. Otherwise we've screwed 2390 * up somewhere, since normal devs are unlinked on the call to 2391 * destroy_dev and only-cdevs that have not been used for cloning 2392 * are not linked in the first place. only-cdevs used for cloning 2393 * will be linked in, too, and should only be destroyed via 2394 * destroy_dev, not destroy_only_dev, so we catch that problem, too. 2395 */ 2396 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2397 2398 /* If we acquired the lock, we also get rid of it */ 2399 if (locked) 2400 lockmgr(&devfs_lock, LK_RELEASE); 2401 2402 /* If there is a backing device, we release the backing device's ops */ 2403 devfs_release_ops((dev->si_bops)?(dev->si_bops):(dev->si_ops)); 2404 2405 /* Finally destroy the device */ 2406 sysref_put(&dev->si_sysref); 2407 } 2408 2409 /* 2410 * Dummies for now (individual locks for MPSAFE) 2411 */ 2412 static void 2413 devfs_cdev_lock(cdev_t dev) 2414 { 2415 } 2416 2417 static void 2418 devfs_cdev_unlock(cdev_t dev) 2419 { 2420 } 2421 2422 static int 2423 devfs_detached_filter_eof(struct knote *kn, long hint) 2424 { 2425 kn->kn_flags |= (EV_EOF | EV_NODATA); 2426 return (1); 2427 } 2428 2429 static void 2430 devfs_detached_filter_detach(struct knote *kn) 2431 { 2432 cdev_t dev = (cdev_t)kn->kn_hook; 2433 2434 knote_remove(&dev->si_kqinfo.ki_note, kn); 2435 } 2436 2437 static struct filterops devfs_detached_filterops = 2438 { FILTEROP_ISFD, NULL, 2439 devfs_detached_filter_detach, 2440 devfs_detached_filter_eof }; 2441 2442 /* 2443 * Delegates knote filter handling responsibility to devfs 2444 * 2445 * Any device that implements kqfilter event handling and could be detached 2446 * or shut down out from under the kevent subsystem must allow devfs to 2447 * assume responsibility for any knotes it may hold. 2448 */ 2449 void 2450 devfs_assume_knotes(cdev_t dev, struct kqinfo *kqi) 2451 { 2452 /* 2453 * Let kern/kern_event.c do the heavy lifting. 2454 */ 2455 knote_assume_knotes(kqi, &dev->si_kqinfo, 2456 &devfs_detached_filterops, (void *)dev); 2457 2458 /* 2459 * These should probably be activated individually, but doing so 2460 * would require refactoring kq's public in-kernel interface. 2461 */ 2462 KNOTE(&dev->si_kqinfo.ki_note, 0); 2463 } 2464 2465 /* 2466 * Links a given cdev into the dev list. 2467 */ 2468 int 2469 devfs_link_dev(cdev_t dev) 2470 { 2471 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2472 dev->si_flags |= SI_DEVFS_LINKED; 2473 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2474 2475 return 0; 2476 } 2477 2478 /* 2479 * Removes a given cdev from the dev list. The caller is responsible for 2480 * releasing the reference on the device associated with the linkage. 2481 * 2482 * Returns EALREADY if the dev has already been unlinked. 2483 */ 2484 static int 2485 devfs_unlink_dev(cdev_t dev) 2486 { 2487 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2488 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2489 dev->si_flags &= ~SI_DEVFS_LINKED; 2490 return (0); 2491 } 2492 return (EALREADY); 2493 } 2494 2495 int 2496 devfs_node_is_accessible(struct devfs_node *node) 2497 { 2498 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2499 return 1; 2500 else 2501 return 0; 2502 } 2503 2504 int 2505 devfs_reference_ops(struct dev_ops *ops) 2506 { 2507 int unit; 2508 struct devfs_dev_ops *found = NULL; 2509 struct devfs_dev_ops *devops; 2510 2511 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2512 if (devops->ops == ops) { 2513 found = devops; 2514 break; 2515 } 2516 } 2517 2518 if (!found) { 2519 found = kmalloc(sizeof(struct devfs_dev_ops), M_DEVFS, M_WAITOK); 2520 found->ops = ops; 2521 found->ref_count = 0; 2522 TAILQ_INSERT_TAIL(&devfs_dev_ops_list, found, link); 2523 } 2524 2525 KKASSERT(found); 2526 2527 if (found->ref_count == 0) { 2528 found->id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2529 if (found->id == -1) { 2530 /* Ran out of unique ids */ 2531 devfs_debug(DEVFS_DEBUG_WARNING, 2532 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2533 } 2534 } 2535 unit = found->id; 2536 ++found->ref_count; 2537 2538 return unit; 2539 } 2540 2541 void 2542 devfs_release_ops(struct dev_ops *ops) 2543 { 2544 struct devfs_dev_ops *found = NULL; 2545 struct devfs_dev_ops *devops; 2546 2547 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2548 if (devops->ops == ops) { 2549 found = devops; 2550 break; 2551 } 2552 } 2553 2554 KKASSERT(found); 2555 2556 --found->ref_count; 2557 2558 if (found->ref_count == 0) { 2559 TAILQ_REMOVE(&devfs_dev_ops_list, found, link); 2560 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), found->id); 2561 kfree(found, M_DEVFS); 2562 } 2563 } 2564 2565 /* 2566 * Wait for asynchronous messages to complete in the devfs helper 2567 * thread, then return. Do nothing if the helper thread is dead 2568 * or we are being indirectly called from the helper thread itself. 2569 */ 2570 void 2571 devfs_config(void) 2572 { 2573 devfs_msg_t msg; 2574 2575 if (devfs_run && curthread != td_core) { 2576 msg = devfs_msg_get(); 2577 devfs_msg_send_sync(DEVFS_SYNC, msg); 2578 devfs_msg_put(msg); 2579 } 2580 } 2581 2582 /* 2583 * Called on init of devfs; creates the objcaches and 2584 * spawns off the devfs core thread. Also initializes 2585 * locks. 2586 */ 2587 static void 2588 devfs_init(void) 2589 { 2590 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2591 /* Create objcaches for nodes, msgs and devs */ 2592 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2593 NULL, NULL, NULL, 2594 objcache_malloc_alloc, 2595 objcache_malloc_free, 2596 &devfs_node_malloc_args ); 2597 2598 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2599 NULL, NULL, NULL, 2600 objcache_malloc_alloc, 2601 objcache_malloc_free, 2602 &devfs_msg_malloc_args ); 2603 2604 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2605 NULL, NULL, NULL, 2606 objcache_malloc_alloc, 2607 objcache_malloc_free, 2608 &devfs_dev_malloc_args ); 2609 2610 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2611 2612 /* Initialize the reply-only port which acts as a message drain */ 2613 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2614 2615 /* Initialize *THE* devfs lock */ 2616 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2617 2618 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2619 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2620 0, -1, "devfs_msg_core"); 2621 while (devfs_run == 0) 2622 lksleep(td_core, &devfs_lock, 0, "devfsc", 0); 2623 lockmgr(&devfs_lock, LK_RELEASE); 2624 2625 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2626 } 2627 2628 /* 2629 * Called on unload of devfs; takes care of destroying the core 2630 * and the objcaches. Also removes aliases that are no longer needed. 2631 */ 2632 static void 2633 devfs_uninit(void) 2634 { 2635 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2636 2637 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2638 while (devfs_run) 2639 tsleep(td_core, 0, "devfsc", hz*10); 2640 tsleep(td_core, 0, "devfsc", hz); 2641 2642 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2643 2644 /* Destroy the objcaches */ 2645 objcache_destroy(devfs_msg_cache); 2646 objcache_destroy(devfs_node_cache); 2647 objcache_destroy(devfs_dev_cache); 2648 2649 devfs_alias_reap(); 2650 } 2651 2652 /* 2653 * This is a sysctl handler to assist userland devname(3) to 2654 * find the device name for a given udev. 2655 */ 2656 static int 2657 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2658 { 2659 udev_t udev; 2660 cdev_t found; 2661 int error; 2662 2663 2664 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2665 return (error); 2666 2667 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); 2668 2669 if (udev == NOUDEV) 2670 return(EINVAL); 2671 2672 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2673 return(ENOENT); 2674 2675 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2676 } 2677 2678 2679 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 2680 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); 2681 2682 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2683 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2684 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2685 0, "Enable DevFS debugging"); 2686 2687 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, 2688 devfs_init, NULL); 2689 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, 2690 devfs_uninit, NULL); 2691 2692 /* 2693 * WildCmp() - compare wild string to sane string 2694 * 2695 * Returns 0 on success, -1 on failure. 2696 */ 2697 static int 2698 wildCmp(const char **mary, int d, const char *w, const char *s) 2699 { 2700 int i; 2701 2702 /* 2703 * skip fixed portion 2704 */ 2705 for (;;) { 2706 switch(*w) { 2707 case '*': 2708 /* 2709 * optimize terminator 2710 */ 2711 if (w[1] == 0) 2712 return(0); 2713 if (w[1] != '?' && w[1] != '*') { 2714 /* 2715 * optimize * followed by non-wild 2716 */ 2717 for (i = 0; s + i < mary[d]; ++i) { 2718 if (s[i] == w[1] && wildCmp(mary, d + 1, w + 1, s + i) == 0) 2719 return(0); 2720 } 2721 } else { 2722 /* 2723 * less-optimal 2724 */ 2725 for (i = 0; s + i < mary[d]; ++i) { 2726 if (wildCmp(mary, d + 1, w + 1, s + i) == 0) 2727 return(0); 2728 } 2729 } 2730 mary[d] = s; 2731 return(-1); 2732 case '?': 2733 if (*s == 0) 2734 return(-1); 2735 ++w; 2736 ++s; 2737 break; 2738 default: 2739 if (*w != *s) 2740 return(-1); 2741 if (*w == 0) /* terminator */ 2742 return(0); 2743 ++w; 2744 ++s; 2745 break; 2746 } 2747 } 2748 /* not reached */ 2749 return(-1); 2750 } 2751 2752 2753 /* 2754 * WildCaseCmp() - compare wild string to sane string, case insensitive 2755 * 2756 * Returns 0 on success, -1 on failure. 2757 */ 2758 static int 2759 wildCaseCmp(const char **mary, int d, const char *w, const char *s) 2760 { 2761 int i; 2762 2763 /* 2764 * skip fixed portion 2765 */ 2766 for (;;) { 2767 switch(*w) { 2768 case '*': 2769 /* 2770 * optimize terminator 2771 */ 2772 if (w[1] == 0) 2773 return(0); 2774 if (w[1] != '?' && w[1] != '*') { 2775 /* 2776 * optimize * followed by non-wild 2777 */ 2778 for (i = 0; s + i < mary[d]; ++i) { 2779 if (s[i] == w[1] && wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2780 return(0); 2781 } 2782 } else { 2783 /* 2784 * less-optimal 2785 */ 2786 for (i = 0; s + i < mary[d]; ++i) { 2787 if (wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2788 return(0); 2789 } 2790 } 2791 mary[d] = s; 2792 return(-1); 2793 case '?': 2794 if (*s == 0) 2795 return(-1); 2796 ++w; 2797 ++s; 2798 break; 2799 default: 2800 if (*w != *s) { 2801 #define tolower(x) ((x >= 'A' && x <= 'Z')?(x+('a'-'A')):(x)) 2802 if (tolower(*w) != tolower(*s)) 2803 return(-1); 2804 } 2805 if (*w == 0) /* terminator */ 2806 return(0); 2807 ++w; 2808 ++s; 2809 break; 2810 } 2811 } 2812 /* not reached */ 2813 return(-1); 2814 } 2815 2816 struct cdev_privdata { 2817 void *cdpd_data; 2818 cdevpriv_dtr_t cdpd_dtr; 2819 }; 2820 2821 int 2822 devfs_get_cdevpriv(struct file *fp, void **datap) 2823 { 2824 int error; 2825 2826 if (fp == NULL) 2827 return(EBADF); 2828 2829 spin_lock_shared(&fp->f_spin); 2830 if (fp->f_data1 == NULL) { 2831 *datap = NULL; 2832 error = ENOENT; 2833 } else { 2834 struct cdev_privdata *p = fp->f_data1; 2835 2836 *datap = p->cdpd_data; 2837 error = 0; 2838 } 2839 spin_unlock_shared(&fp->f_spin); 2840 2841 return (error); 2842 } 2843 2844 int 2845 devfs_set_cdevpriv(struct file *fp, void *priv, cdevpriv_dtr_t dtr) 2846 { 2847 struct cdev_privdata *p; 2848 int error; 2849 2850 if (fp == NULL) 2851 return (ENOENT); 2852 2853 p = kmalloc(sizeof(struct cdev_privdata), M_DEVFS, M_WAITOK); 2854 p->cdpd_data = priv; 2855 p->cdpd_dtr = dtr; 2856 2857 spin_lock(&fp->f_spin); 2858 if (fp->f_data1 == NULL) { 2859 fp->f_data1 = p; 2860 error = 0; 2861 } else { 2862 error = EBUSY; 2863 } 2864 spin_unlock(&fp->f_spin); 2865 2866 if (error) 2867 kfree(p, M_DEVFS); 2868 2869 return error; 2870 } 2871 2872 void 2873 devfs_clear_cdevpriv(struct file *fp) 2874 { 2875 struct cdev_privdata *p; 2876 2877 if (fp == NULL) 2878 return; 2879 2880 spin_lock(&fp->f_spin); 2881 p = fp->f_data1; 2882 fp->f_data1 = NULL; 2883 spin_unlock(&fp->f_spin); 2884 2885 if (p != NULL) { 2886 p->cdpd_dtr(p->cdpd_data); 2887 kfree(p, M_DEVFS); 2888 } 2889 } 2890 2891 int 2892 devfs_WildCmp(const char *w, const char *s) 2893 { 2894 int i; 2895 int c; 2896 int slen = strlen(s); 2897 const char **mary; 2898 2899 for (i = c = 0; w[i]; ++i) { 2900 if (w[i] == '*') 2901 ++c; 2902 } 2903 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 2904 for (i = 0; i < c; ++i) 2905 mary[i] = s + slen; 2906 i = wildCmp(mary, 0, w, s); 2907 kfree(mary, M_DEVFS); 2908 return(i); 2909 } 2910 2911 int 2912 devfs_WildCaseCmp(const char *w, const char *s) 2913 { 2914 int i; 2915 int c; 2916 int slen = strlen(s); 2917 const char **mary; 2918 2919 for (i = c = 0; w[i]; ++i) { 2920 if (w[i] == '*') 2921 ++c; 2922 } 2923 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 2924 for (i = 0; i < c; ++i) 2925 mary[i] = s + slen; 2926 i = wildCaseCmp(mary, 0, w, s); 2927 kfree(mary, M_DEVFS); 2928 return(i); 2929 } 2930 2931