1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/mount.h> 39 #include <sys/vnode.h> 40 #include <sys/types.h> 41 #include <sys/lock.h> 42 #include <sys/file.h> 43 #include <sys/msgport.h> 44 #include <sys/sysctl.h> 45 #include <sys/ucred.h> 46 #include <sys/devfs.h> 47 #include <sys/devfs_rules.h> 48 #include <sys/udev.h> 49 50 #include <sys/msgport2.h> 51 #include <sys/spinlock2.h> 52 #include <sys/mplock2.h> 53 #include <sys/sysref2.h> 54 55 MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); 56 DEVFS_DECLARE_CLONE_BITMAP(ops_id); 57 /* 58 * SYSREF Integration - reference counting, allocation, 59 * sysid and syslink integration. 60 */ 61 static void devfs_cdev_terminate(cdev_t dev); 62 static void devfs_cdev_lock(cdev_t dev); 63 static void devfs_cdev_unlock(cdev_t dev); 64 static struct sysref_class cdev_sysref_class = { 65 .name = "cdev", 66 .mtype = M_DEVFS, 67 .proto = SYSREF_PROTO_DEV, 68 .offset = offsetof(struct cdev, si_sysref), 69 .objsize = sizeof(struct cdev), 70 .nom_cache = 32, 71 .flags = 0, 72 .ops = { 73 .terminate = (sysref_terminate_func_t)devfs_cdev_terminate, 74 .lock = (sysref_lock_func_t)devfs_cdev_lock, 75 .unlock = (sysref_unlock_func_t)devfs_cdev_unlock 76 } 77 }; 78 79 static struct objcache *devfs_node_cache; 80 static struct objcache *devfs_msg_cache; 81 static struct objcache *devfs_dev_cache; 82 83 static struct objcache_malloc_args devfs_node_malloc_args = { 84 sizeof(struct devfs_node), M_DEVFS }; 85 struct objcache_malloc_args devfs_msg_malloc_args = { 86 sizeof(struct devfs_msg), M_DEVFS }; 87 struct objcache_malloc_args devfs_dev_malloc_args = { 88 sizeof(struct cdev), M_DEVFS }; 89 90 static struct devfs_dev_head devfs_dev_list = 91 TAILQ_HEAD_INITIALIZER(devfs_dev_list); 92 static struct devfs_mnt_head devfs_mnt_list = 93 TAILQ_HEAD_INITIALIZER(devfs_mnt_list); 94 static struct devfs_chandler_head devfs_chandler_list = 95 TAILQ_HEAD_INITIALIZER(devfs_chandler_list); 96 static struct devfs_alias_head devfs_alias_list = 97 TAILQ_HEAD_INITIALIZER(devfs_alias_list); 98 static struct devfs_dev_ops_head devfs_dev_ops_list = 99 TAILQ_HEAD_INITIALIZER(devfs_dev_ops_list); 100 101 struct lock devfs_lock; 102 static struct lwkt_port devfs_dispose_port; 103 static struct lwkt_port devfs_msg_port; 104 static struct thread *td_core; 105 106 static struct spinlock ino_lock; 107 static ino_t d_ino; 108 static int devfs_debug_enable; 109 static int devfs_run; 110 111 static ino_t devfs_fetch_ino(void); 112 static int devfs_create_all_dev_worker(struct devfs_node *); 113 static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); 114 static int devfs_destroy_dev_worker(cdev_t); 115 static int devfs_destroy_related_worker(cdev_t); 116 static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); 117 static int devfs_propagate_dev(cdev_t, int); 118 static int devfs_unlink_dev(cdev_t dev); 119 static void devfs_msg_exec(devfs_msg_t msg); 120 121 static int devfs_chandler_add_worker(const char *, d_clone_t *); 122 static int devfs_chandler_del_worker(const char *); 123 124 static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 125 static void devfs_msg_core(void *); 126 127 static int devfs_find_device_by_name_worker(devfs_msg_t); 128 static int devfs_find_device_by_udev_worker(devfs_msg_t); 129 130 static int devfs_apply_reset_rules_caller(char *, int); 131 132 static int devfs_scan_callback_worker(devfs_scan_t *, void *); 133 134 static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, 135 char *, size_t, int); 136 137 static int devfs_make_alias_worker(struct devfs_alias *); 138 static int devfs_destroy_alias_worker(struct devfs_alias *); 139 static int devfs_alias_remove(cdev_t); 140 static int devfs_alias_reap(void); 141 static int devfs_alias_propagate(struct devfs_alias *, int); 142 static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); 143 static int devfs_alias_check_create(struct devfs_node *); 144 145 static int devfs_clr_related_flag_worker(cdev_t, uint32_t); 146 static int devfs_destroy_related_without_flag_worker(cdev_t, uint32_t); 147 148 static void *devfs_reaperp_callback(struct devfs_node *, void *); 149 static void *devfs_gc_dirs_callback(struct devfs_node *, void *); 150 static void *devfs_gc_links_callback(struct devfs_node *, struct devfs_node *); 151 static void * 152 devfs_inode_to_vnode_worker_callback(struct devfs_node *, ino_t *); 153 154 /* 155 * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function 156 * using kvprintf 157 */ 158 int 159 devfs_debug(int level, char *fmt, ...) 160 { 161 __va_list ap; 162 163 __va_start(ap, fmt); 164 if (level <= devfs_debug_enable) 165 kvprintf(fmt, ap); 166 __va_end(ap); 167 168 return 0; 169 } 170 171 /* 172 * devfs_allocp() Allocates a new devfs node with the specified 173 * parameters. The node is also automatically linked into the topology 174 * if a parent is specified. It also calls the rule and alias stuff to 175 * be applied on the new node 176 */ 177 struct devfs_node * 178 devfs_allocp(devfs_nodetype devfsnodetype, char *name, 179 struct devfs_node *parent, struct mount *mp, cdev_t dev) 180 { 181 struct devfs_node *node = NULL; 182 size_t namlen = strlen(name); 183 184 node = objcache_get(devfs_node_cache, M_WAITOK); 185 bzero(node, sizeof(*node)); 186 187 atomic_add_long(&DEVFS_MNTDATA(mp)->leak_count, 1); 188 189 node->d_dev = NULL; 190 node->nchildren = 1; 191 node->mp = mp; 192 node->d_dir.d_ino = devfs_fetch_ino(); 193 194 /* 195 * Cookie jar for children. Leave 0 and 1 for '.' and '..' entries 196 * respectively. 197 */ 198 node->cookie_jar = 2; 199 200 /* 201 * Access Control members 202 */ 203 node->mode = DEVFS_DEFAULT_MODE; 204 node->uid = DEVFS_DEFAULT_UID; 205 node->gid = DEVFS_DEFAULT_GID; 206 207 switch (devfsnodetype) { 208 case Nroot: 209 /* 210 * Ensure that we don't recycle the root vnode by marking it as 211 * linked into the topology. 212 */ 213 node->flags |= DEVFS_NODE_LINKED; 214 case Ndir: 215 TAILQ_INIT(DEVFS_DENODE_HEAD(node)); 216 node->d_dir.d_type = DT_DIR; 217 node->nchildren = 2; 218 break; 219 220 case Nlink: 221 node->d_dir.d_type = DT_LNK; 222 break; 223 224 case Nreg: 225 node->d_dir.d_type = DT_REG; 226 break; 227 228 case Ndev: 229 if (dev != NULL) { 230 node->d_dir.d_type = DT_CHR; 231 node->d_dev = dev; 232 233 node->mode = dev->si_perms; 234 node->uid = dev->si_uid; 235 node->gid = dev->si_gid; 236 237 devfs_alias_check_create(node); 238 } 239 break; 240 241 default: 242 panic("devfs_allocp: unknown node type"); 243 } 244 245 node->v_node = NULL; 246 node->node_type = devfsnodetype; 247 248 /* Initialize the dirent structure of each devfs vnode */ 249 node->d_dir.d_namlen = namlen; 250 node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); 251 memcpy(node->d_dir.d_name, name, namlen); 252 node->d_dir.d_name[namlen] = '\0'; 253 254 /* Initialize the parent node element */ 255 node->parent = parent; 256 257 /* Initialize *time members */ 258 nanotime(&node->atime); 259 node->mtime = node->ctime = node->atime; 260 261 /* 262 * Associate with parent as last step, clean out namecache 263 * reference. 264 */ 265 if ((parent != NULL) && 266 ((parent->node_type == Nroot) || (parent->node_type == Ndir))) { 267 parent->nchildren++; 268 node->cookie = parent->cookie_jar++; 269 node->flags |= DEVFS_NODE_LINKED; 270 TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); 271 272 /* This forces negative namecache lookups to clear */ 273 ++mp->mnt_namecache_gen; 274 } 275 276 /* Apply rules */ 277 devfs_rule_check_apply(node, NULL); 278 279 atomic_add_long(&DEVFS_MNTDATA(mp)->file_count, 1); 280 281 return node; 282 } 283 284 /* 285 * devfs_allocv() allocates a new vnode based on a devfs node. 286 */ 287 int 288 devfs_allocv(struct vnode **vpp, struct devfs_node *node) 289 { 290 struct vnode *vp; 291 int error = 0; 292 293 KKASSERT(node); 294 295 /* 296 * devfs master lock must not be held across a vget() call, we have 297 * to hold our ad-hoc vp to avoid a free race from destroying the 298 * contents of the structure. The vget() will interlock recycles 299 * for us. 300 */ 301 try_again: 302 while ((vp = node->v_node) != NULL) { 303 vhold(vp); 304 lockmgr(&devfs_lock, LK_RELEASE); 305 error = vget(vp, LK_EXCLUSIVE); 306 vdrop(vp); 307 lockmgr(&devfs_lock, LK_EXCLUSIVE); 308 if (error == 0) { 309 *vpp = vp; 310 goto out; 311 } 312 if (error != ENOENT) { 313 *vpp = NULL; 314 goto out; 315 } 316 } 317 318 /* 319 * devfs master lock must not be held across a getnewvnode() call. 320 */ 321 lockmgr(&devfs_lock, LK_RELEASE); 322 if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) { 323 lockmgr(&devfs_lock, LK_EXCLUSIVE); 324 goto out; 325 } 326 lockmgr(&devfs_lock, LK_EXCLUSIVE); 327 328 vp = *vpp; 329 330 if (node->v_node != NULL) { 331 vp->v_type = VBAD; 332 vx_put(vp); 333 goto try_again; 334 } 335 336 vp->v_data = node; 337 node->v_node = vp; 338 339 switch (node->node_type) { 340 case Nroot: 341 vsetflags(vp, VROOT); 342 /* fall through */ 343 case Ndir: 344 vp->v_type = VDIR; 345 break; 346 347 case Nlink: 348 vp->v_type = VLNK; 349 break; 350 351 case Nreg: 352 vp->v_type = VREG; 353 break; 354 355 case Ndev: 356 vp->v_type = VCHR; 357 KKASSERT(node->d_dev); 358 359 vp->v_uminor = node->d_dev->si_uminor; 360 vp->v_umajor = node->d_dev->si_umajor; 361 362 v_associate_rdev(vp, node->d_dev); 363 vp->v_ops = &node->mp->mnt_vn_spec_ops; 364 break; 365 366 default: 367 panic("devfs_allocv: unknown node type"); 368 } 369 370 out: 371 return error; 372 } 373 374 /* 375 * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode 376 * based on the newly created devfs node. 377 */ 378 int 379 devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, 380 char *name, struct devfs_node *parent, cdev_t dev) 381 { 382 struct devfs_node *node; 383 384 node = devfs_allocp(devfsnodetype, name, parent, mp, dev); 385 386 if (node != NULL) 387 devfs_allocv(vpp, node); 388 else 389 *vpp = NULL; 390 391 return 0; 392 } 393 394 /* 395 * Destroy the devfs_node. The node must be unlinked from the topology. 396 * 397 * This function will also destroy any vnode association with the node 398 * and device. 399 * 400 * The cdev_t itself remains intact. 401 * 402 * The core lock is not necessarily held on call and must be temporarily 403 * released if it is to avoid a deadlock. 404 */ 405 int 406 devfs_freep(struct devfs_node *node) 407 { 408 struct vnode *vp; 409 int relock; 410 411 KKASSERT(node); 412 KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || 413 (node->node_type == Nroot)); 414 415 /* 416 * Protect against double frees 417 */ 418 KKASSERT((node->flags & DEVFS_DESTROYED) == 0); 419 node->flags |= DEVFS_DESTROYED; 420 421 /* 422 * Avoid deadlocks between devfs_lock and the vnode lock when 423 * disassociating the vnode (stress2 pty vs ls -la /dev/pts). 424 * 425 * This also prevents the vnode reclaim code from double-freeing 426 * the node. The vget() is required to safely modified the vp 427 * and cycle the refs to terminate an inactive vp. 428 */ 429 if (lockstatus(&devfs_lock, curthread) == LK_EXCLUSIVE) { 430 lockmgr(&devfs_lock, LK_RELEASE); 431 relock = 1; 432 } else { 433 relock = 0; 434 } 435 436 while ((vp = node->v_node) != NULL) { 437 if (vget(vp, LK_EXCLUSIVE | LK_RETRY) != 0) 438 break; 439 v_release_rdev(vp); 440 vp->v_data = NULL; 441 node->v_node = NULL; 442 cache_inval_vp(vp, CINV_DESTROY); 443 vput(vp); 444 } 445 446 /* 447 * Remaining cleanup 448 */ 449 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->leak_count, 1); 450 if (node->symlink_name) { 451 kfree(node->symlink_name, M_DEVFS); 452 node->symlink_name = NULL; 453 } 454 455 /* 456 * Remove the node from the orphan list if it is still on it. 457 */ 458 if (node->flags & DEVFS_ORPHANED) 459 devfs_tracer_del_orphan(node); 460 461 if (node->d_dir.d_name) { 462 kfree(node->d_dir.d_name, M_DEVFS); 463 node->d_dir.d_name = NULL; 464 } 465 atomic_subtract_long(&DEVFS_MNTDATA(node->mp)->file_count, 1); 466 objcache_put(devfs_node_cache, node); 467 468 if (relock) 469 lockmgr(&devfs_lock, LK_EXCLUSIVE); 470 471 return 0; 472 } 473 474 /* 475 * Unlink the devfs node from the topology and add it to the orphan list. 476 * The node will later be destroyed by freep. 477 * 478 * Any vnode association, including the v_rdev and v_data, remains intact 479 * until the freep. 480 */ 481 int 482 devfs_unlinkp(struct devfs_node *node) 483 { 484 struct devfs_node *parent; 485 KKASSERT(node); 486 487 /* 488 * Add the node to the orphan list, so it is referenced somewhere, to 489 * so we don't leak it. 490 */ 491 devfs_tracer_add_orphan(node); 492 493 parent = node->parent; 494 495 /* 496 * If the parent is known we can unlink the node out of the topology 497 */ 498 if (parent) { 499 TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); 500 parent->nchildren--; 501 node->flags &= ~DEVFS_NODE_LINKED; 502 } 503 504 node->parent = NULL; 505 return 0; 506 } 507 508 void * 509 devfs_iterate_topology(struct devfs_node *node, 510 devfs_iterate_callback_t *callback, void *arg1) 511 { 512 struct devfs_node *node1, *node2; 513 void *ret = NULL; 514 515 if ((node->node_type == Nroot) || (node->node_type == Ndir)) { 516 if (node->nchildren > 2) { 517 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), 518 link, node2) { 519 if ((ret = devfs_iterate_topology(node1, callback, arg1))) 520 return ret; 521 } 522 } 523 } 524 525 ret = callback(node, arg1); 526 return ret; 527 } 528 529 /* 530 * devfs_reaperp() is a recursive function that iterates through all the 531 * topology, unlinking and freeing all devfs nodes. 532 */ 533 static void * 534 devfs_reaperp_callback(struct devfs_node *node, void *unused) 535 { 536 devfs_unlinkp(node); 537 devfs_freep(node); 538 539 return NULL; 540 } 541 542 static void * 543 devfs_gc_dirs_callback(struct devfs_node *node, void *unused) 544 { 545 if (node->node_type == Ndir) { 546 if ((node->nchildren == 2) && 547 !(node->flags & DEVFS_USER_CREATED)) { 548 devfs_unlinkp(node); 549 devfs_freep(node); 550 } 551 } 552 553 return NULL; 554 } 555 556 static void * 557 devfs_gc_links_callback(struct devfs_node *node, struct devfs_node *target) 558 { 559 if ((node->node_type == Nlink) && (node->link_target == target)) { 560 devfs_unlinkp(node); 561 devfs_freep(node); 562 } 563 564 return NULL; 565 } 566 567 /* 568 * devfs_gc() is devfs garbage collector. It takes care of unlinking and 569 * freeing a node, but also removes empty directories and links that link 570 * via devfs auto-link mechanism to the node being deleted. 571 */ 572 int 573 devfs_gc(struct devfs_node *node) 574 { 575 struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; 576 577 if (node->nlinks > 0) 578 devfs_iterate_topology(root_node, 579 (devfs_iterate_callback_t *)devfs_gc_links_callback, node); 580 581 devfs_unlinkp(node); 582 devfs_iterate_topology(root_node, 583 (devfs_iterate_callback_t *)devfs_gc_dirs_callback, NULL); 584 585 devfs_freep(node); 586 587 return 0; 588 } 589 590 /* 591 * devfs_create_dev() is the asynchronous entry point for device creation. 592 * It just sends a message with the relevant details to the devfs core. 593 * 594 * This function will reference the passed device. The reference is owned 595 * by devfs and represents all of the device's node associations. 596 */ 597 int 598 devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) 599 { 600 reference_dev(dev); 601 devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); 602 603 return 0; 604 } 605 606 /* 607 * devfs_destroy_dev() is the asynchronous entry point for device destruction. 608 * It just sends a message with the relevant details to the devfs core. 609 */ 610 int 611 devfs_destroy_dev(cdev_t dev) 612 { 613 devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); 614 return 0; 615 } 616 617 /* 618 * devfs_mount_add() is the synchronous entry point for adding a new devfs 619 * mount. It sends a synchronous message with the relevant details to the 620 * devfs core. 621 */ 622 int 623 devfs_mount_add(struct devfs_mnt_data *mnt) 624 { 625 devfs_msg_t msg; 626 627 msg = devfs_msg_get(); 628 msg->mdv_mnt = mnt; 629 msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); 630 devfs_msg_put(msg); 631 632 return 0; 633 } 634 635 /* 636 * devfs_mount_del() is the synchronous entry point for removing a devfs mount. 637 * It sends a synchronous message with the relevant details to the devfs core. 638 */ 639 int 640 devfs_mount_del(struct devfs_mnt_data *mnt) 641 { 642 devfs_msg_t msg; 643 644 msg = devfs_msg_get(); 645 msg->mdv_mnt = mnt; 646 msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); 647 devfs_msg_put(msg); 648 649 return 0; 650 } 651 652 /* 653 * devfs_destroy_related() is the synchronous entry point for device 654 * destruction by subname. It just sends a message with the relevant details to 655 * the devfs core. 656 */ 657 int 658 devfs_destroy_related(cdev_t dev) 659 { 660 devfs_msg_t msg; 661 662 msg = devfs_msg_get(); 663 msg->mdv_load = dev; 664 msg = devfs_msg_send_sync(DEVFS_DESTROY_RELATED, msg); 665 devfs_msg_put(msg); 666 return 0; 667 } 668 669 int 670 devfs_clr_related_flag(cdev_t dev, uint32_t flag) 671 { 672 devfs_msg_t msg; 673 674 msg = devfs_msg_get(); 675 msg->mdv_flags.dev = dev; 676 msg->mdv_flags.flag = flag; 677 msg = devfs_msg_send_sync(DEVFS_CLR_RELATED_FLAG, msg); 678 devfs_msg_put(msg); 679 680 return 0; 681 } 682 683 int 684 devfs_destroy_related_without_flag(cdev_t dev, uint32_t flag) 685 { 686 devfs_msg_t msg; 687 688 msg = devfs_msg_get(); 689 msg->mdv_flags.dev = dev; 690 msg->mdv_flags.flag = flag; 691 msg = devfs_msg_send_sync(DEVFS_DESTROY_RELATED_WO_FLAG, msg); 692 devfs_msg_put(msg); 693 694 return 0; 695 } 696 697 /* 698 * devfs_create_all_dev is the asynchronous entry point to trigger device 699 * node creation. It just sends a message with the relevant details to 700 * the devfs core. 701 */ 702 int 703 devfs_create_all_dev(struct devfs_node *root) 704 { 705 devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); 706 return 0; 707 } 708 709 /* 710 * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all 711 * devices with a specific set of dev_ops and minor. It just sends a 712 * message with the relevant details to the devfs core. 713 */ 714 int 715 devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) 716 { 717 devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); 718 return 0; 719 } 720 721 /* 722 * devfs_clone_handler_add is the synchronous entry point to add a new 723 * clone handler. It just sends a message with the relevant details to 724 * the devfs core. 725 */ 726 int 727 devfs_clone_handler_add(const char *name, d_clone_t *nhandler) 728 { 729 devfs_msg_t msg; 730 731 msg = devfs_msg_get(); 732 msg->mdv_chandler.name = name; 733 msg->mdv_chandler.nhandler = nhandler; 734 msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); 735 devfs_msg_put(msg); 736 return 0; 737 } 738 739 /* 740 * devfs_clone_handler_del is the synchronous entry point to remove a 741 * clone handler. It just sends a message with the relevant details to 742 * the devfs core. 743 */ 744 int 745 devfs_clone_handler_del(const char *name) 746 { 747 devfs_msg_t msg; 748 749 msg = devfs_msg_get(); 750 msg->mdv_chandler.name = name; 751 msg->mdv_chandler.nhandler = NULL; 752 msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); 753 devfs_msg_put(msg); 754 return 0; 755 } 756 757 /* 758 * devfs_find_device_by_name is the synchronous entry point to find a 759 * device given its name. It sends a synchronous message with the 760 * relevant details to the devfs core and returns the answer. 761 */ 762 cdev_t 763 devfs_find_device_by_name(const char *fmt, ...) 764 { 765 cdev_t found = NULL; 766 devfs_msg_t msg; 767 char *target; 768 __va_list ap; 769 770 if (fmt == NULL) 771 return NULL; 772 773 __va_start(ap, fmt); 774 kvasnrprintf(&target, PATH_MAX, 10, fmt, ap); 775 __va_end(ap); 776 777 msg = devfs_msg_get(); 778 msg->mdv_name = target; 779 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); 780 found = msg->mdv_cdev; 781 devfs_msg_put(msg); 782 kvasfree(&target); 783 784 return found; 785 } 786 787 /* 788 * devfs_find_device_by_udev is the synchronous entry point to find a 789 * device given its udev number. It sends a synchronous message with 790 * the relevant details to the devfs core and returns the answer. 791 */ 792 cdev_t 793 devfs_find_device_by_udev(udev_t udev) 794 { 795 cdev_t found = NULL; 796 devfs_msg_t msg; 797 798 msg = devfs_msg_get(); 799 msg->mdv_udev = udev; 800 msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); 801 found = msg->mdv_cdev; 802 devfs_msg_put(msg); 803 804 devfs_debug(DEVFS_DEBUG_DEBUG, 805 "devfs_find_device_by_udev found? %s -end:3-\n", 806 ((found) ? found->si_name:"NO")); 807 return found; 808 } 809 810 struct vnode * 811 devfs_inode_to_vnode(struct mount *mp, ino_t target) 812 { 813 struct vnode *vp = NULL; 814 devfs_msg_t msg; 815 816 if (mp == NULL) 817 return NULL; 818 819 msg = devfs_msg_get(); 820 msg->mdv_ino.mp = mp; 821 msg->mdv_ino.ino = target; 822 msg = devfs_msg_send_sync(DEVFS_INODE_TO_VNODE, msg); 823 vp = msg->mdv_ino.vp; 824 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 825 devfs_msg_put(msg); 826 827 return vp; 828 } 829 830 /* 831 * devfs_make_alias is the asynchronous entry point to register an alias 832 * for a device. It just sends a message with the relevant details to the 833 * devfs core. 834 */ 835 int 836 devfs_make_alias(const char *name, cdev_t dev_target) 837 { 838 struct devfs_alias *alias; 839 size_t len; 840 841 len = strlen(name); 842 843 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 844 alias->name = kstrdup(name, M_DEVFS); 845 alias->namlen = len; 846 alias->dev_target = dev_target; 847 848 devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); 849 return 0; 850 } 851 852 /* 853 * devfs_destroy_alias is the asynchronous entry point to deregister an alias 854 * for a device. It just sends a message with the relevant details to the 855 * devfs core. 856 */ 857 int 858 devfs_destroy_alias(const char *name, cdev_t dev_target) 859 { 860 struct devfs_alias *alias; 861 size_t len; 862 863 len = strlen(name); 864 865 alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); 866 alias->name = kstrdup(name, M_DEVFS); 867 alias->namlen = len; 868 alias->dev_target = dev_target; 869 870 devfs_msg_send_generic(DEVFS_DESTROY_ALIAS, alias); 871 return 0; 872 } 873 874 /* 875 * devfs_apply_rules is the asynchronous entry point to trigger application 876 * of all rules. It just sends a message with the relevant details to the 877 * devfs core. 878 */ 879 int 880 devfs_apply_rules(char *mntto) 881 { 882 char *new_name; 883 884 new_name = kstrdup(mntto, M_DEVFS); 885 devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); 886 887 return 0; 888 } 889 890 /* 891 * devfs_reset_rules is the asynchronous entry point to trigger reset of all 892 * rules. It just sends a message with the relevant details to the devfs core. 893 */ 894 int 895 devfs_reset_rules(char *mntto) 896 { 897 char *new_name; 898 899 new_name = kstrdup(mntto, M_DEVFS); 900 devfs_msg_send_name(DEVFS_RESET_RULES, new_name); 901 902 return 0; 903 } 904 905 906 /* 907 * devfs_scan_callback is the asynchronous entry point to call a callback 908 * on all cdevs. 909 * It just sends a message with the relevant details to the devfs core. 910 */ 911 int 912 devfs_scan_callback(devfs_scan_t *callback, void *arg) 913 { 914 devfs_msg_t msg; 915 916 KKASSERT(callback); 917 918 msg = devfs_msg_get(); 919 msg->mdv_load = callback; 920 msg->mdv_load2 = arg; 921 msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); 922 devfs_msg_put(msg); 923 924 return 0; 925 } 926 927 928 /* 929 * Acts as a message drain. Any message that is replied to here gets destroyed 930 * and the memory freed. 931 */ 932 static void 933 devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 934 { 935 devfs_msg_put((devfs_msg_t)msg); 936 } 937 938 /* 939 * devfs_msg_get allocates a new devfs msg and returns it. 940 */ 941 devfs_msg_t 942 devfs_msg_get(void) 943 { 944 return objcache_get(devfs_msg_cache, M_WAITOK); 945 } 946 947 /* 948 * devfs_msg_put deallocates a given devfs msg. 949 */ 950 int 951 devfs_msg_put(devfs_msg_t msg) 952 { 953 objcache_put(devfs_msg_cache, msg); 954 return 0; 955 } 956 957 /* 958 * devfs_msg_send is the generic asynchronous message sending facility 959 * for devfs. By default the reply port is the automatic disposal port. 960 * 961 * If the current thread is the devfs_msg_port thread we execute the 962 * operation synchronously. 963 */ 964 void 965 devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) 966 { 967 lwkt_port_t port = &devfs_msg_port; 968 969 lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); 970 971 devfs_msg->hdr.u.ms_result = cmd; 972 973 if (port->mpu_td == curthread) { 974 devfs_msg_exec(devfs_msg); 975 lwkt_replymsg(&devfs_msg->hdr, 0); 976 } else { 977 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 978 } 979 } 980 981 /* 982 * devfs_msg_send_sync is the generic synchronous message sending 983 * facility for devfs. It initializes a local reply port and waits 984 * for the core's answer. This answer is then returned. 985 */ 986 devfs_msg_t 987 devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) 988 { 989 struct lwkt_port rep_port; 990 devfs_msg_t msg_incoming; 991 lwkt_port_t port = &devfs_msg_port; 992 993 lwkt_initport_thread(&rep_port, curthread); 994 lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); 995 996 devfs_msg->hdr.u.ms_result = cmd; 997 998 lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); 999 msg_incoming = lwkt_waitport(&rep_port, 0); 1000 1001 return msg_incoming; 1002 } 1003 1004 /* 1005 * sends a message with a generic argument. 1006 */ 1007 void 1008 devfs_msg_send_generic(uint32_t cmd, void *load) 1009 { 1010 devfs_msg_t devfs_msg = devfs_msg_get(); 1011 1012 devfs_msg->mdv_load = load; 1013 devfs_msg_send(cmd, devfs_msg); 1014 } 1015 1016 /* 1017 * sends a message with a name argument. 1018 */ 1019 void 1020 devfs_msg_send_name(uint32_t cmd, char *name) 1021 { 1022 devfs_msg_t devfs_msg = devfs_msg_get(); 1023 1024 devfs_msg->mdv_name = name; 1025 devfs_msg_send(cmd, devfs_msg); 1026 } 1027 1028 /* 1029 * sends a message with a mount argument. 1030 */ 1031 void 1032 devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) 1033 { 1034 devfs_msg_t devfs_msg = devfs_msg_get(); 1035 1036 devfs_msg->mdv_mnt = mnt; 1037 devfs_msg_send(cmd, devfs_msg); 1038 } 1039 1040 /* 1041 * sends a message with an ops argument. 1042 */ 1043 void 1044 devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) 1045 { 1046 devfs_msg_t devfs_msg = devfs_msg_get(); 1047 1048 devfs_msg->mdv_ops.ops = ops; 1049 devfs_msg->mdv_ops.minor = minor; 1050 devfs_msg_send(cmd, devfs_msg); 1051 } 1052 1053 /* 1054 * sends a message with a clone handler argument. 1055 */ 1056 void 1057 devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) 1058 { 1059 devfs_msg_t devfs_msg = devfs_msg_get(); 1060 1061 devfs_msg->mdv_chandler.name = name; 1062 devfs_msg->mdv_chandler.nhandler = handler; 1063 devfs_msg_send(cmd, devfs_msg); 1064 } 1065 1066 /* 1067 * sends a message with a device argument. 1068 */ 1069 void 1070 devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) 1071 { 1072 devfs_msg_t devfs_msg = devfs_msg_get(); 1073 1074 devfs_msg->mdv_dev.dev = dev; 1075 devfs_msg->mdv_dev.uid = uid; 1076 devfs_msg->mdv_dev.gid = gid; 1077 devfs_msg->mdv_dev.perms = perms; 1078 1079 devfs_msg_send(cmd, devfs_msg); 1080 } 1081 1082 /* 1083 * sends a message with a link argument. 1084 */ 1085 void 1086 devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) 1087 { 1088 devfs_msg_t devfs_msg = devfs_msg_get(); 1089 1090 devfs_msg->mdv_link.name = name; 1091 devfs_msg->mdv_link.target = target; 1092 devfs_msg->mdv_link.mp = mp; 1093 devfs_msg_send(cmd, devfs_msg); 1094 } 1095 1096 /* 1097 * devfs_msg_core is the main devfs thread. It handles all incoming messages 1098 * and calls the relevant worker functions. By using messages it's assured 1099 * that events occur in the correct order. 1100 */ 1101 static void 1102 devfs_msg_core(void *arg) 1103 { 1104 devfs_msg_t msg; 1105 1106 lwkt_initport_thread(&devfs_msg_port, curthread); 1107 1108 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1109 devfs_run = 1; 1110 wakeup(td_core); 1111 lockmgr(&devfs_lock, LK_RELEASE); 1112 1113 get_mplock(); /* mpsafe yet? */ 1114 1115 while (devfs_run) { 1116 msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); 1117 devfs_debug(DEVFS_DEBUG_DEBUG, 1118 "devfs_msg_core, new msg: %x\n", 1119 (unsigned int)msg->hdr.u.ms_result); 1120 devfs_msg_exec(msg); 1121 lwkt_replymsg(&msg->hdr, 0); 1122 } 1123 1124 rel_mplock(); 1125 wakeup(td_core); 1126 1127 lwkt_exit(); 1128 } 1129 1130 static void 1131 devfs_msg_exec(devfs_msg_t msg) 1132 { 1133 struct devfs_mnt_data *mnt; 1134 struct devfs_node *node; 1135 cdev_t dev; 1136 1137 /* 1138 * Acquire the devfs lock to ensure safety of all called functions 1139 */ 1140 lockmgr(&devfs_lock, LK_EXCLUSIVE); 1141 1142 switch (msg->hdr.u.ms_result) { 1143 case DEVFS_DEVICE_CREATE: 1144 dev = msg->mdv_dev.dev; 1145 devfs_create_dev_worker(dev, 1146 msg->mdv_dev.uid, 1147 msg->mdv_dev.gid, 1148 msg->mdv_dev.perms); 1149 break; 1150 case DEVFS_DEVICE_DESTROY: 1151 dev = msg->mdv_dev.dev; 1152 devfs_destroy_dev_worker(dev); 1153 break; 1154 case DEVFS_DESTROY_RELATED: 1155 devfs_destroy_related_worker(msg->mdv_load); 1156 break; 1157 case DEVFS_DESTROY_DEV_BY_OPS: 1158 devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, 1159 msg->mdv_ops.minor); 1160 break; 1161 case DEVFS_CREATE_ALL_DEV: 1162 node = (struct devfs_node *)msg->mdv_load; 1163 devfs_create_all_dev_worker(node); 1164 break; 1165 case DEVFS_MOUNT_ADD: 1166 mnt = msg->mdv_mnt; 1167 TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); 1168 devfs_create_all_dev_worker(mnt->root_node); 1169 break; 1170 case DEVFS_MOUNT_DEL: 1171 mnt = msg->mdv_mnt; 1172 TAILQ_REMOVE(&devfs_mnt_list, mnt, link); 1173 devfs_iterate_topology(mnt->root_node, devfs_reaperp_callback, 1174 NULL); 1175 if (mnt->leak_count) { 1176 devfs_debug(DEVFS_DEBUG_SHOW, 1177 "Leaked %ld devfs_node elements!\n", 1178 mnt->leak_count); 1179 } 1180 break; 1181 case DEVFS_CHANDLER_ADD: 1182 devfs_chandler_add_worker(msg->mdv_chandler.name, 1183 msg->mdv_chandler.nhandler); 1184 break; 1185 case DEVFS_CHANDLER_DEL: 1186 devfs_chandler_del_worker(msg->mdv_chandler.name); 1187 break; 1188 case DEVFS_FIND_DEVICE_BY_NAME: 1189 devfs_find_device_by_name_worker(msg); 1190 break; 1191 case DEVFS_FIND_DEVICE_BY_UDEV: 1192 devfs_find_device_by_udev_worker(msg); 1193 break; 1194 case DEVFS_MAKE_ALIAS: 1195 devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); 1196 break; 1197 case DEVFS_DESTROY_ALIAS: 1198 devfs_destroy_alias_worker((struct devfs_alias *)msg->mdv_load); 1199 break; 1200 case DEVFS_APPLY_RULES: 1201 devfs_apply_reset_rules_caller(msg->mdv_name, 1); 1202 break; 1203 case DEVFS_RESET_RULES: 1204 devfs_apply_reset_rules_caller(msg->mdv_name, 0); 1205 break; 1206 case DEVFS_SCAN_CALLBACK: 1207 devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load, 1208 msg->mdv_load2); 1209 break; 1210 case DEVFS_CLR_RELATED_FLAG: 1211 devfs_clr_related_flag_worker(msg->mdv_flags.dev, 1212 msg->mdv_flags.flag); 1213 break; 1214 case DEVFS_DESTROY_RELATED_WO_FLAG: 1215 devfs_destroy_related_without_flag_worker(msg->mdv_flags.dev, 1216 msg->mdv_flags.flag); 1217 break; 1218 case DEVFS_INODE_TO_VNODE: 1219 msg->mdv_ino.vp = devfs_iterate_topology( 1220 DEVFS_MNTDATA(msg->mdv_ino.mp)->root_node, 1221 (devfs_iterate_callback_t *)devfs_inode_to_vnode_worker_callback, 1222 &msg->mdv_ino.ino); 1223 break; 1224 case DEVFS_TERMINATE_CORE: 1225 devfs_run = 0; 1226 break; 1227 case DEVFS_SYNC: 1228 break; 1229 default: 1230 devfs_debug(DEVFS_DEBUG_WARNING, 1231 "devfs_msg_core: unknown message " 1232 "received at core\n"); 1233 break; 1234 } 1235 lockmgr(&devfs_lock, LK_RELEASE); 1236 } 1237 1238 static void 1239 devfs_devctl_notify(cdev_t dev, const char *ev) 1240 { 1241 static const char prefix[] = "cdev="; 1242 char *data; 1243 int namelen; 1244 1245 namelen = strlen(dev->si_name); 1246 data = kmalloc(namelen + sizeof(prefix), M_TEMP, M_WAITOK); 1247 memcpy(data, prefix, sizeof(prefix) - 1); 1248 memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); 1249 devctl_notify("DEVFS", "CDEV", ev, data); 1250 kfree(data, M_TEMP); 1251 } 1252 1253 /* 1254 * Worker function to insert a new dev into the dev list and initialize its 1255 * permissions. It also calls devfs_propagate_dev which in turn propagates 1256 * the change to all mount points. 1257 * 1258 * The passed dev is already referenced. This reference is eaten by this 1259 * function and represents the dev's linkage into devfs_dev_list. 1260 */ 1261 static int 1262 devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) 1263 { 1264 KKASSERT(dev); 1265 1266 dev->si_uid = uid; 1267 dev->si_gid = gid; 1268 dev->si_perms = perms; 1269 1270 devfs_link_dev(dev); 1271 devfs_propagate_dev(dev, 1); 1272 1273 udev_event_attach(dev, NULL, 0); 1274 devfs_devctl_notify(dev, "CREATE"); 1275 1276 return 0; 1277 } 1278 1279 /* 1280 * Worker function to delete a dev from the dev list and free the cdev. 1281 * It also calls devfs_propagate_dev which in turn propagates the change 1282 * to all mount points. 1283 */ 1284 static int 1285 devfs_destroy_dev_worker(cdev_t dev) 1286 { 1287 int error; 1288 1289 KKASSERT(dev); 1290 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1291 1292 error = devfs_unlink_dev(dev); 1293 devfs_propagate_dev(dev, 0); 1294 1295 devfs_devctl_notify(dev, "DESTROY"); 1296 udev_event_detach(dev, NULL, 0); 1297 1298 if (error == 0) 1299 release_dev(dev); /* link ref */ 1300 release_dev(dev); 1301 release_dev(dev); 1302 1303 return 0; 1304 } 1305 1306 /* 1307 * Worker function to destroy all devices with a certain basename. 1308 * Calls devfs_destroy_dev_worker for the actual destruction. 1309 */ 1310 static int 1311 devfs_destroy_related_worker(cdev_t needle) 1312 { 1313 cdev_t dev; 1314 1315 restart: 1316 devfs_debug(DEVFS_DEBUG_DEBUG, "related worker: %s\n", 1317 needle->si_name); 1318 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1319 if (dev->si_parent == needle) { 1320 devfs_destroy_related_worker(dev); 1321 devfs_destroy_dev_worker(dev); 1322 goto restart; 1323 } 1324 } 1325 return 0; 1326 } 1327 1328 static int 1329 devfs_clr_related_flag_worker(cdev_t needle, uint32_t flag) 1330 { 1331 cdev_t dev, dev1; 1332 1333 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1334 if (dev->si_parent == needle) { 1335 devfs_clr_related_flag_worker(dev, flag); 1336 dev->si_flags &= ~flag; 1337 } 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int 1344 devfs_destroy_related_without_flag_worker(cdev_t needle, uint32_t flag) 1345 { 1346 cdev_t dev; 1347 1348 restart: 1349 devfs_debug(DEVFS_DEBUG_DEBUG, "related_wo_flag: %s\n", 1350 needle->si_name); 1351 1352 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1353 if (dev->si_parent == needle) { 1354 devfs_destroy_related_without_flag_worker(dev, flag); 1355 if (!(dev->si_flags & flag)) { 1356 devfs_destroy_dev_worker(dev); 1357 devfs_debug(DEVFS_DEBUG_DEBUG, 1358 "related_wo_flag: %s restart\n", dev->si_name); 1359 goto restart; 1360 } 1361 } 1362 } 1363 1364 return 0; 1365 } 1366 1367 /* 1368 * Worker function that creates all device nodes on top of a devfs 1369 * root node. 1370 */ 1371 static int 1372 devfs_create_all_dev_worker(struct devfs_node *root) 1373 { 1374 cdev_t dev; 1375 1376 KKASSERT(root); 1377 1378 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1379 devfs_create_device_node(root, dev, NULL, NULL); 1380 } 1381 1382 return 0; 1383 } 1384 1385 /* 1386 * Worker function that destroys all devices that match a specific 1387 * dev_ops and/or minor. If minor is less than 0, it is not matched 1388 * against. It also propagates all changes. 1389 */ 1390 static int 1391 devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) 1392 { 1393 cdev_t dev, dev1; 1394 1395 KKASSERT(ops); 1396 1397 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1398 if (dev->si_ops != ops) 1399 continue; 1400 if ((minor < 0) || (dev->si_uminor == minor)) { 1401 devfs_destroy_dev_worker(dev); 1402 } 1403 } 1404 1405 return 0; 1406 } 1407 1408 /* 1409 * Worker function that registers a new clone handler in devfs. 1410 */ 1411 static int 1412 devfs_chandler_add_worker(const char *name, d_clone_t *nhandler) 1413 { 1414 struct devfs_clone_handler *chandler = NULL; 1415 u_char len = strlen(name); 1416 1417 if (len == 0) 1418 return 1; 1419 1420 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 1421 if (chandler->namlen != len) 1422 continue; 1423 1424 if (!memcmp(chandler->name, name, len)) { 1425 /* Clonable basename already exists */ 1426 return 1; 1427 } 1428 } 1429 1430 chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); 1431 chandler->name = kstrdup(name, M_DEVFS); 1432 chandler->namlen = len; 1433 chandler->nhandler = nhandler; 1434 1435 TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); 1436 return 0; 1437 } 1438 1439 /* 1440 * Worker function that removes a given clone handler from the 1441 * clone handler list. 1442 */ 1443 static int 1444 devfs_chandler_del_worker(const char *name) 1445 { 1446 struct devfs_clone_handler *chandler, *chandler2; 1447 u_char len = strlen(name); 1448 1449 if (len == 0) 1450 return 1; 1451 1452 TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { 1453 if (chandler->namlen != len) 1454 continue; 1455 if (memcmp(chandler->name, name, len)) 1456 continue; 1457 1458 TAILQ_REMOVE(&devfs_chandler_list, chandler, link); 1459 kfree(chandler->name, M_DEVFS); 1460 kfree(chandler, M_DEVFS); 1461 break; 1462 } 1463 1464 return 0; 1465 } 1466 1467 /* 1468 * Worker function that finds a given device name and changes 1469 * the message received accordingly so that when replied to, 1470 * the answer is returned to the caller. 1471 */ 1472 static int 1473 devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) 1474 { 1475 struct devfs_alias *alias; 1476 cdev_t dev; 1477 cdev_t found = NULL; 1478 1479 TAILQ_FOREACH(dev, &devfs_dev_list, link) { 1480 if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { 1481 found = dev; 1482 break; 1483 } 1484 } 1485 if (found == NULL) { 1486 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1487 if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { 1488 found = alias->dev_target; 1489 break; 1490 } 1491 } 1492 } 1493 devfs_msg->mdv_cdev = found; 1494 1495 return 0; 1496 } 1497 1498 /* 1499 * Worker function that finds a given device udev and changes 1500 * the message received accordingly so that when replied to, 1501 * the answer is returned to the caller. 1502 */ 1503 static int 1504 devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) 1505 { 1506 cdev_t dev, dev1; 1507 cdev_t found = NULL; 1508 1509 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1510 if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { 1511 found = dev; 1512 break; 1513 } 1514 } 1515 devfs_msg->mdv_cdev = found; 1516 1517 return 0; 1518 } 1519 1520 /* 1521 * Worker function that inserts a given alias into the 1522 * alias list, and propagates the alias to all mount 1523 * points. 1524 */ 1525 static int 1526 devfs_make_alias_worker(struct devfs_alias *alias) 1527 { 1528 struct devfs_alias *alias2; 1529 size_t len = strlen(alias->name); 1530 int found = 0; 1531 1532 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1533 if (len != alias2->namlen) 1534 continue; 1535 1536 if (!memcmp(alias->name, alias2->name, len)) { 1537 found = 1; 1538 break; 1539 } 1540 } 1541 1542 if (!found) { 1543 /* 1544 * The alias doesn't exist yet, so we add it to the alias list 1545 */ 1546 TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); 1547 devfs_alias_propagate(alias, 0); 1548 udev_event_attach(alias->dev_target, alias->name, 1); 1549 } else { 1550 devfs_debug(DEVFS_DEBUG_WARNING, 1551 "Warning: duplicate devfs_make_alias for %s\n", 1552 alias->name); 1553 kfree(alias->name, M_DEVFS); 1554 kfree(alias, M_DEVFS); 1555 } 1556 1557 return 0; 1558 } 1559 1560 /* 1561 * Worker function that delete a given alias from the 1562 * alias list, and propagates the removal to all mount 1563 * points. 1564 */ 1565 static int 1566 devfs_destroy_alias_worker(struct devfs_alias *alias) 1567 { 1568 struct devfs_alias *alias2; 1569 int found = 0; 1570 1571 TAILQ_FOREACH(alias2, &devfs_alias_list, link) { 1572 if (alias->dev_target != alias2->dev_target) 1573 continue; 1574 1575 if (devfs_WildCmp(alias->name, alias2->name) == 0) { 1576 found = 1; 1577 break; 1578 } 1579 } 1580 1581 if (!found) { 1582 devfs_debug(DEVFS_DEBUG_WARNING, 1583 "Warning: devfs_destroy_alias for inexistant alias: %s\n", 1584 alias->name); 1585 kfree(alias->name, M_DEVFS); 1586 kfree(alias, M_DEVFS); 1587 } else { 1588 /* 1589 * The alias exists, so we delete it from the alias list 1590 */ 1591 TAILQ_REMOVE(&devfs_alias_list, alias2, link); 1592 devfs_alias_propagate(alias2, 1); 1593 udev_event_detach(alias2->dev_target, alias2->name, 1); 1594 kfree(alias->name, M_DEVFS); 1595 kfree(alias, M_DEVFS); 1596 kfree(alias2->name, M_DEVFS); 1597 kfree(alias2, M_DEVFS); 1598 } 1599 1600 return 0; 1601 } 1602 1603 /* 1604 * Function that removes and frees all aliases. 1605 */ 1606 static int 1607 devfs_alias_reap(void) 1608 { 1609 struct devfs_alias *alias, *alias2; 1610 1611 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1612 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1613 kfree(alias->name, M_DEVFS); 1614 kfree(alias, M_DEVFS); 1615 } 1616 return 0; 1617 } 1618 1619 /* 1620 * Function that removes an alias matching a specific cdev and frees 1621 * it accordingly. 1622 */ 1623 static int 1624 devfs_alias_remove(cdev_t dev) 1625 { 1626 struct devfs_alias *alias, *alias2; 1627 1628 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { 1629 if (alias->dev_target == dev) { 1630 TAILQ_REMOVE(&devfs_alias_list, alias, link); 1631 udev_event_detach(alias->dev_target, alias->name, 1); 1632 kfree(alias->name, M_DEVFS); 1633 kfree(alias, M_DEVFS); 1634 } 1635 } 1636 return 0; 1637 } 1638 1639 /* 1640 * This function propagates an alias addition or removal to 1641 * all mount points. 1642 */ 1643 static int 1644 devfs_alias_propagate(struct devfs_alias *alias, int remove) 1645 { 1646 struct devfs_mnt_data *mnt; 1647 1648 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1649 if (remove) { 1650 devfs_destroy_node(mnt->root_node, alias->name); 1651 } else { 1652 devfs_alias_apply(mnt->root_node, alias); 1653 } 1654 } 1655 return 0; 1656 } 1657 1658 /* 1659 * This function is a recursive function iterating through 1660 * all device nodes in the topology and, if applicable, 1661 * creating the relevant alias for a device node. 1662 */ 1663 static int 1664 devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) 1665 { 1666 struct devfs_node *node1, *node2; 1667 1668 KKASSERT(alias != NULL); 1669 1670 if ((node->node_type == Nroot) || (node->node_type == Ndir)) { 1671 if (node->nchildren > 2) { 1672 TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { 1673 devfs_alias_apply(node1, alias); 1674 } 1675 } 1676 } else { 1677 if (node->d_dev == alias->dev_target) 1678 devfs_alias_create(alias->name, node, 0); 1679 } 1680 return 0; 1681 } 1682 1683 /* 1684 * This function checks if any alias possibly is applicable 1685 * to the given node. If so, the alias is created. 1686 */ 1687 static int 1688 devfs_alias_check_create(struct devfs_node *node) 1689 { 1690 struct devfs_alias *alias; 1691 1692 TAILQ_FOREACH(alias, &devfs_alias_list, link) { 1693 if (node->d_dev == alias->dev_target) 1694 devfs_alias_create(alias->name, node, 0); 1695 } 1696 return 0; 1697 } 1698 1699 /* 1700 * This function creates an alias with a given name 1701 * linking to a given devfs node. It also increments 1702 * the link count on the target node. 1703 */ 1704 int 1705 devfs_alias_create(char *name_orig, struct devfs_node *target, int rule_based) 1706 { 1707 struct mount *mp = target->mp; 1708 struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; 1709 struct devfs_node *linknode; 1710 char *create_path = NULL; 1711 char *name; 1712 char *name_buf; 1713 int result = 0; 1714 1715 KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); 1716 1717 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1718 devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); 1719 1720 if (create_path) 1721 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1722 1723 1724 if (devfs_find_device_node_by_name(parent, name)) { 1725 devfs_debug(DEVFS_DEBUG_WARNING, 1726 "Node already exists: %s " 1727 "(devfs_make_alias_worker)!\n", 1728 name); 1729 result = 1; 1730 goto done; 1731 } 1732 1733 linknode = devfs_allocp(Nlink, name, parent, mp, NULL); 1734 if (linknode == NULL) { 1735 result = 1; 1736 goto done; 1737 } 1738 1739 linknode->link_target = target; 1740 target->nlinks++; 1741 1742 if (rule_based) 1743 linknode->flags |= DEVFS_RULE_CREATED; 1744 1745 done: 1746 kfree(name_buf, M_TEMP); 1747 return (result); 1748 } 1749 1750 /* 1751 * This function is called by the core and handles mount point 1752 * strings. It either calls the relevant worker (devfs_apply_ 1753 * reset_rules_worker) on all mountpoints or only a specific 1754 * one. 1755 */ 1756 static int 1757 devfs_apply_reset_rules_caller(char *mountto, int apply) 1758 { 1759 struct devfs_mnt_data *mnt; 1760 1761 if (mountto[0] == '*') { 1762 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1763 devfs_iterate_topology(mnt->root_node, 1764 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1765 NULL); 1766 } 1767 } else { 1768 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 1769 if (!strcmp(mnt->mp->mnt_stat.f_mntonname, mountto)) { 1770 devfs_iterate_topology(mnt->root_node, 1771 (apply)?(devfs_rule_check_apply):(devfs_rule_reset_node), 1772 NULL); 1773 break; 1774 } 1775 } 1776 } 1777 1778 kfree(mountto, M_DEVFS); 1779 return 0; 1780 } 1781 1782 /* 1783 * This function calls a given callback function for 1784 * every dev node in the devfs dev list. 1785 */ 1786 static int 1787 devfs_scan_callback_worker(devfs_scan_t *callback, void *arg) 1788 { 1789 cdev_t dev, dev1; 1790 struct devfs_alias *alias, *alias1; 1791 1792 TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { 1793 callback(dev->si_name, dev, false, arg); 1794 } 1795 TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias1) { 1796 callback(alias->name, alias->dev_target, true, arg); 1797 } 1798 1799 return 0; 1800 } 1801 1802 /* 1803 * This function tries to resolve a given directory, or if not 1804 * found and creation requested, creates the given directory. 1805 */ 1806 static struct devfs_node * 1807 devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, 1808 size_t name_len, int create) 1809 { 1810 struct devfs_node *node, *found = NULL; 1811 1812 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 1813 if (name_len != node->d_dir.d_namlen) 1814 continue; 1815 1816 if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { 1817 found = node; 1818 break; 1819 } 1820 } 1821 1822 if ((found == NULL) && (create)) { 1823 found = devfs_allocp(Ndir, dir_name, parent, parent->mp, NULL); 1824 } 1825 1826 return found; 1827 } 1828 1829 /* 1830 * This function tries to resolve a complete path. If creation is requested, 1831 * if a given part of the path cannot be resolved (because it doesn't exist), 1832 * it is created. 1833 */ 1834 struct devfs_node * 1835 devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) 1836 { 1837 struct devfs_node *node = parent; 1838 char *buf; 1839 size_t idx = 0; 1840 1841 if (path == NULL) 1842 return parent; 1843 1844 buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1845 1846 while (*path && idx < PATH_MAX - 1) { 1847 if (*path != '/') { 1848 buf[idx++] = *path; 1849 } else { 1850 buf[idx] = '\0'; 1851 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1852 if (node == NULL) { 1853 kfree(buf, M_TEMP); 1854 return NULL; 1855 } 1856 idx = 0; 1857 } 1858 ++path; 1859 } 1860 buf[idx] = '\0'; 1861 node = devfs_resolve_or_create_dir(node, buf, idx, create); 1862 kfree (buf, M_TEMP); 1863 return (node); 1864 } 1865 1866 /* 1867 * Takes a full path and strips it into a directory path and a name. 1868 * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It 1869 * requires a working buffer with enough size to keep the whole 1870 * fullpath. 1871 */ 1872 int 1873 devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) 1874 { 1875 char *name = NULL; 1876 char *path = NULL; 1877 size_t len = strlen(fullpath) + 1; 1878 int i; 1879 1880 KKASSERT((fullpath != NULL) && (buf != NULL)); 1881 KKASSERT((pathp != NULL) && (namep != NULL)); 1882 1883 memcpy(buf, fullpath, len); 1884 1885 for (i = len-1; i>= 0; i--) { 1886 if (buf[i] == '/') { 1887 buf[i] = '\0'; 1888 name = &(buf[i+1]); 1889 path = buf; 1890 break; 1891 } 1892 } 1893 1894 *pathp = path; 1895 1896 if (name) { 1897 *namep = name; 1898 } else { 1899 *namep = buf; 1900 } 1901 1902 return 0; 1903 } 1904 1905 /* 1906 * This function creates a new devfs node for a given device. It can 1907 * handle a complete path as device name, and accordingly creates 1908 * the path and the final device node. 1909 * 1910 * The reference count on the passed dev remains unchanged. 1911 */ 1912 struct devfs_node * 1913 devfs_create_device_node(struct devfs_node *root, cdev_t dev, 1914 char *dev_name, char *path_fmt, ...) 1915 { 1916 struct devfs_node *parent, *node = NULL; 1917 char *path = NULL; 1918 char *name; 1919 char *name_buf; 1920 __va_list ap; 1921 int i, found; 1922 char *create_path = NULL; 1923 char *names = "pqrsPQRS"; 1924 1925 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 1926 1927 if (path_fmt != NULL) { 1928 __va_start(ap, path_fmt); 1929 kvasnrprintf(&path, PATH_MAX, 10, path_fmt, ap); 1930 __va_end(ap); 1931 } 1932 1933 parent = devfs_resolve_or_create_path(root, path, 1); 1934 KKASSERT(parent); 1935 1936 devfs_resolve_name_path( 1937 ((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), 1938 name_buf, &create_path, &name); 1939 1940 if (create_path) 1941 parent = devfs_resolve_or_create_path(parent, create_path, 1); 1942 1943 1944 if (devfs_find_device_node_by_name(parent, name)) { 1945 devfs_debug(DEVFS_DEBUG_WARNING, "devfs_create_device_node: " 1946 "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name); 1947 goto out; 1948 } 1949 1950 node = devfs_allocp(Ndev, name, parent, parent->mp, dev); 1951 nanotime(&parent->mtime); 1952 1953 /* 1954 * Ugly unix98 pty magic, to hide pty master (ptm) devices and their 1955 * directory 1956 */ 1957 if ((dev) && (strlen(dev->si_name) >= 4) && 1958 (!memcmp(dev->si_name, "ptm/", 4))) { 1959 node->parent->flags |= DEVFS_HIDDEN; 1960 node->flags |= DEVFS_HIDDEN; 1961 } 1962 1963 /* 1964 * Ugly pty magic, to tag pty devices as such and hide them if needed. 1965 */ 1966 if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) 1967 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1968 1969 if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { 1970 found = 0; 1971 for (i = 0; i < strlen(names); i++) { 1972 if (name[3] == names[i]) { 1973 found = 1; 1974 break; 1975 } 1976 } 1977 if (found) 1978 node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); 1979 } 1980 1981 out: 1982 kfree(name_buf, M_TEMP); 1983 kvasfree(&path); 1984 return node; 1985 } 1986 1987 /* 1988 * This function finds a given device node in the topology with a given 1989 * cdev. 1990 */ 1991 void * 1992 devfs_find_device_node_callback(struct devfs_node *node, cdev_t target) 1993 { 1994 if ((node->node_type == Ndev) && (node->d_dev == target)) { 1995 return node; 1996 } 1997 1998 return NULL; 1999 } 2000 2001 /* 2002 * This function finds a device node in the given parent directory by its 2003 * name and returns it. 2004 */ 2005 struct devfs_node * 2006 devfs_find_device_node_by_name(struct devfs_node *parent, char *target) 2007 { 2008 struct devfs_node *node, *found = NULL; 2009 size_t len = strlen(target); 2010 2011 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { 2012 if (len != node->d_dir.d_namlen) 2013 continue; 2014 2015 if (!memcmp(node->d_dir.d_name, target, len)) { 2016 found = node; 2017 break; 2018 } 2019 } 2020 2021 return found; 2022 } 2023 2024 static void * 2025 devfs_inode_to_vnode_worker_callback(struct devfs_node *node, ino_t *inop) 2026 { 2027 struct vnode *vp = NULL; 2028 ino_t target = *inop; 2029 2030 if (node->d_dir.d_ino == target) { 2031 if (node->v_node) { 2032 vp = node->v_node; 2033 vget(vp, LK_EXCLUSIVE | LK_RETRY); 2034 vn_unlock(vp); 2035 } else { 2036 devfs_allocv(&vp, node); 2037 vn_unlock(vp); 2038 } 2039 } 2040 2041 return vp; 2042 } 2043 2044 /* 2045 * This function takes a cdev and removes its devfs node in the 2046 * given topology. The cdev remains intact. 2047 */ 2048 int 2049 devfs_destroy_device_node(struct devfs_node *root, cdev_t target) 2050 { 2051 KKASSERT(target != NULL); 2052 return devfs_destroy_node(root, target->si_name); 2053 } 2054 2055 /* 2056 * This function takes a path to a devfs node, resolves it and 2057 * removes the devfs node from the given topology. 2058 */ 2059 int 2060 devfs_destroy_node(struct devfs_node *root, char *target) 2061 { 2062 struct devfs_node *node, *parent; 2063 char *name; 2064 char *name_buf; 2065 char *create_path = NULL; 2066 2067 KKASSERT(target); 2068 2069 name_buf = kmalloc(PATH_MAX, M_TEMP, M_WAITOK); 2070 ksnprintf(name_buf, PATH_MAX, "%s", target); 2071 2072 devfs_resolve_name_path(target, name_buf, &create_path, &name); 2073 2074 if (create_path) 2075 parent = devfs_resolve_or_create_path(root, create_path, 0); 2076 else 2077 parent = root; 2078 2079 if (parent == NULL) { 2080 kfree(name_buf, M_TEMP); 2081 return 1; 2082 } 2083 2084 node = devfs_find_device_node_by_name(parent, name); 2085 2086 if (node) { 2087 nanotime(&node->parent->mtime); 2088 devfs_gc(node); 2089 } 2090 2091 kfree(name_buf, M_TEMP); 2092 2093 return 0; 2094 } 2095 2096 /* 2097 * Just set perms and ownership for given node. 2098 */ 2099 int 2100 devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, 2101 u_short mode, u_long flags) 2102 { 2103 node->mode = mode; 2104 node->uid = uid; 2105 node->gid = gid; 2106 2107 return 0; 2108 } 2109 2110 /* 2111 * Propagates a device attach/detach to all mount 2112 * points. Also takes care of automatic alias removal 2113 * for a deleted cdev. 2114 */ 2115 static int 2116 devfs_propagate_dev(cdev_t dev, int attach) 2117 { 2118 struct devfs_mnt_data *mnt; 2119 2120 TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { 2121 if (attach) { 2122 /* Device is being attached */ 2123 devfs_create_device_node(mnt->root_node, dev, 2124 NULL, NULL ); 2125 } else { 2126 /* Device is being detached */ 2127 devfs_alias_remove(dev); 2128 devfs_destroy_device_node(mnt->root_node, dev); 2129 } 2130 } 2131 return 0; 2132 } 2133 2134 /* 2135 * devfs_clone either returns a basename from a complete name by 2136 * returning the length of the name without trailing digits, or, 2137 * if clone != 0, calls the device's clone handler to get a new 2138 * device, which in turn is returned in devp. 2139 */ 2140 cdev_t 2141 devfs_clone(cdev_t dev, const char *name, size_t len, int mode, 2142 struct ucred *cred) 2143 { 2144 int error; 2145 struct devfs_clone_handler *chandler; 2146 struct dev_clone_args ap; 2147 2148 TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { 2149 if (chandler->namlen != len) 2150 continue; 2151 if ((!memcmp(chandler->name, name, len)) && (chandler->nhandler)) { 2152 lockmgr(&devfs_lock, LK_RELEASE); 2153 devfs_config(); 2154 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2155 2156 ap.a_head.a_dev = dev; 2157 ap.a_dev = NULL; 2158 ap.a_name = name; 2159 ap.a_namelen = len; 2160 ap.a_mode = mode; 2161 ap.a_cred = cred; 2162 error = (chandler->nhandler)(&ap); 2163 if (error) 2164 continue; 2165 2166 return ap.a_dev; 2167 } 2168 } 2169 2170 return NULL; 2171 } 2172 2173 2174 /* 2175 * Registers a new orphan in the orphan list. 2176 */ 2177 void 2178 devfs_tracer_add_orphan(struct devfs_node *node) 2179 { 2180 struct devfs_orphan *orphan; 2181 2182 KKASSERT(node); 2183 orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); 2184 orphan->node = node; 2185 2186 KKASSERT((node->flags & DEVFS_ORPHANED) == 0); 2187 node->flags |= DEVFS_ORPHANED; 2188 TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); 2189 } 2190 2191 /* 2192 * Removes an orphan from the orphan list. 2193 */ 2194 void 2195 devfs_tracer_del_orphan(struct devfs_node *node) 2196 { 2197 struct devfs_orphan *orphan; 2198 2199 KKASSERT(node); 2200 2201 TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { 2202 if (orphan->node == node) { 2203 node->flags &= ~DEVFS_ORPHANED; 2204 TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); 2205 kfree(orphan, M_DEVFS); 2206 break; 2207 } 2208 } 2209 } 2210 2211 /* 2212 * Counts the orphans in the orphan list, and if cleanup 2213 * is specified, also frees the orphan and removes it from 2214 * the list. 2215 */ 2216 size_t 2217 devfs_tracer_orphan_count(struct mount *mp, int cleanup) 2218 { 2219 struct devfs_orphan *orphan, *orphan2; 2220 size_t count = 0; 2221 2222 TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { 2223 count++; 2224 /* 2225 * If we are instructed to clean up, we do so. 2226 */ 2227 if (cleanup) { 2228 TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); 2229 orphan->node->flags &= ~DEVFS_ORPHANED; 2230 devfs_freep(orphan->node); 2231 kfree(orphan, M_DEVFS); 2232 } 2233 } 2234 2235 return count; 2236 } 2237 2238 /* 2239 * Fetch an ino_t from the global d_ino by increasing it 2240 * while spinlocked. 2241 */ 2242 static ino_t 2243 devfs_fetch_ino(void) 2244 { 2245 ino_t ret; 2246 2247 spin_lock(&ino_lock); 2248 ret = d_ino++; 2249 spin_unlock(&ino_lock); 2250 2251 return ret; 2252 } 2253 2254 /* 2255 * Allocates a new cdev and initializes it's most basic 2256 * fields. 2257 */ 2258 cdev_t 2259 devfs_new_cdev(struct dev_ops *ops, int minor, struct dev_ops *bops) 2260 { 2261 cdev_t dev = sysref_alloc(&cdev_sysref_class); 2262 2263 sysref_activate(&dev->si_sysref); 2264 reference_dev(dev); 2265 bzero(dev, offsetof(struct cdev, si_sysref)); 2266 2267 dev->si_uid = 0; 2268 dev->si_gid = 0; 2269 dev->si_perms = 0; 2270 dev->si_drv1 = NULL; 2271 dev->si_drv2 = NULL; 2272 dev->si_lastread = 0; /* time_uptime */ 2273 dev->si_lastwrite = 0; /* time_uptime */ 2274 2275 dev->si_dict = NULL; 2276 dev->si_parent = NULL; 2277 dev->si_ops = ops; 2278 dev->si_flags = 0; 2279 dev->si_uminor = minor; 2280 dev->si_bops = bops; 2281 2282 /* 2283 * Since the disk subsystem is in the way, we need to 2284 * propagate the D_CANFREE from bops (and ops) to 2285 * si_flags. 2286 */ 2287 if (bops && (bops->head.flags & D_CANFREE)) { 2288 dev->si_flags |= SI_CANFREE; 2289 } else if (ops->head.flags & D_CANFREE) { 2290 dev->si_flags |= SI_CANFREE; 2291 } 2292 2293 /* If there is a backing device, we reference its ops */ 2294 dev->si_inode = makeudev( 2295 devfs_reference_ops((bops)?(bops):(ops)), 2296 minor ); 2297 dev->si_umajor = umajor(dev->si_inode); 2298 2299 return dev; 2300 } 2301 2302 static void 2303 devfs_cdev_terminate(cdev_t dev) 2304 { 2305 int locked = 0; 2306 2307 /* Check if it is locked already. if not, we acquire the devfs lock */ 2308 if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 2309 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2310 locked = 1; 2311 } 2312 2313 /* 2314 * Make sure the node isn't linked anymore. Otherwise we've screwed 2315 * up somewhere, since normal devs are unlinked on the call to 2316 * destroy_dev and only-cdevs that have not been used for cloning 2317 * are not linked in the first place. only-cdevs used for cloning 2318 * will be linked in, too, and should only be destroyed via 2319 * destroy_dev, not destroy_only_dev, so we catch that problem, too. 2320 */ 2321 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2322 2323 /* If we acquired the lock, we also get rid of it */ 2324 if (locked) 2325 lockmgr(&devfs_lock, LK_RELEASE); 2326 2327 /* If there is a backing device, we release the backing device's ops */ 2328 devfs_release_ops((dev->si_bops)?(dev->si_bops):(dev->si_ops)); 2329 2330 /* Finally destroy the device */ 2331 sysref_put(&dev->si_sysref); 2332 } 2333 2334 /* 2335 * Dummies for now (individual locks for MPSAFE) 2336 */ 2337 static void 2338 devfs_cdev_lock(cdev_t dev) 2339 { 2340 } 2341 2342 static void 2343 devfs_cdev_unlock(cdev_t dev) 2344 { 2345 } 2346 2347 static int 2348 devfs_detached_filter_eof(struct knote *kn, long hint) 2349 { 2350 kn->kn_flags |= (EV_EOF | EV_NODATA); 2351 return (1); 2352 } 2353 2354 static void 2355 devfs_detached_filter_detach(struct knote *kn) 2356 { 2357 cdev_t dev = (cdev_t)kn->kn_hook; 2358 2359 knote_remove(&dev->si_kqinfo.ki_note, kn); 2360 } 2361 2362 static struct filterops devfs_detached_filterops = 2363 { FILTEROP_ISFD, NULL, 2364 devfs_detached_filter_detach, 2365 devfs_detached_filter_eof }; 2366 2367 /* 2368 * Delegates knote filter handling responsibility to devfs 2369 * 2370 * Any device that implements kqfilter event handling and could be detached 2371 * or shut down out from under the kevent subsystem must allow devfs to 2372 * assume responsibility for any knotes it may hold. 2373 */ 2374 void 2375 devfs_assume_knotes(cdev_t dev, struct kqinfo *kqi) 2376 { 2377 /* 2378 * Let kern/kern_event.c do the heavy lifting. 2379 */ 2380 knote_assume_knotes(kqi, &dev->si_kqinfo, 2381 &devfs_detached_filterops, (void *)dev); 2382 2383 /* 2384 * These should probably be activated individually, but doing so 2385 * would require refactoring kq's public in-kernel interface. 2386 */ 2387 KNOTE(&dev->si_kqinfo.ki_note, 0); 2388 } 2389 2390 /* 2391 * Links a given cdev into the dev list. 2392 */ 2393 int 2394 devfs_link_dev(cdev_t dev) 2395 { 2396 KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); 2397 dev->si_flags |= SI_DEVFS_LINKED; 2398 TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); 2399 2400 return 0; 2401 } 2402 2403 /* 2404 * Removes a given cdev from the dev list. The caller is responsible for 2405 * releasing the reference on the device associated with the linkage. 2406 * 2407 * Returns EALREADY if the dev has already been unlinked. 2408 */ 2409 static int 2410 devfs_unlink_dev(cdev_t dev) 2411 { 2412 if ((dev->si_flags & SI_DEVFS_LINKED)) { 2413 TAILQ_REMOVE(&devfs_dev_list, dev, link); 2414 dev->si_flags &= ~SI_DEVFS_LINKED; 2415 return (0); 2416 } 2417 return (EALREADY); 2418 } 2419 2420 int 2421 devfs_node_is_accessible(struct devfs_node *node) 2422 { 2423 if ((node) && (!(node->flags & DEVFS_HIDDEN))) 2424 return 1; 2425 else 2426 return 0; 2427 } 2428 2429 int 2430 devfs_reference_ops(struct dev_ops *ops) 2431 { 2432 int unit; 2433 struct devfs_dev_ops *found = NULL; 2434 struct devfs_dev_ops *devops; 2435 2436 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2437 if (devops->ops == ops) { 2438 found = devops; 2439 break; 2440 } 2441 } 2442 2443 if (!found) { 2444 found = kmalloc(sizeof(struct devfs_dev_ops), M_DEVFS, M_WAITOK); 2445 found->ops = ops; 2446 found->ref_count = 0; 2447 TAILQ_INSERT_TAIL(&devfs_dev_ops_list, found, link); 2448 } 2449 2450 KKASSERT(found); 2451 2452 if (found->ref_count == 0) { 2453 found->id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); 2454 if (found->id == -1) { 2455 /* Ran out of unique ids */ 2456 devfs_debug(DEVFS_DEBUG_WARNING, 2457 "devfs_reference_ops: WARNING: ran out of unique ids\n"); 2458 } 2459 } 2460 unit = found->id; 2461 ++found->ref_count; 2462 2463 return unit; 2464 } 2465 2466 void 2467 devfs_release_ops(struct dev_ops *ops) 2468 { 2469 struct devfs_dev_ops *found = NULL; 2470 struct devfs_dev_ops *devops; 2471 2472 TAILQ_FOREACH(devops, &devfs_dev_ops_list, link) { 2473 if (devops->ops == ops) { 2474 found = devops; 2475 break; 2476 } 2477 } 2478 2479 KKASSERT(found); 2480 2481 --found->ref_count; 2482 2483 if (found->ref_count == 0) { 2484 TAILQ_REMOVE(&devfs_dev_ops_list, found, link); 2485 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), found->id); 2486 kfree(found, M_DEVFS); 2487 } 2488 } 2489 2490 /* 2491 * Wait for asynchronous messages to complete in the devfs helper 2492 * thread, then return. Do nothing if the helper thread is dead 2493 * or we are being indirectly called from the helper thread itself. 2494 */ 2495 void 2496 devfs_config(void) 2497 { 2498 devfs_msg_t msg; 2499 2500 if (devfs_run && curthread != td_core) { 2501 msg = devfs_msg_get(); 2502 msg = devfs_msg_send_sync(DEVFS_SYNC, msg); 2503 devfs_msg_put(msg); 2504 } 2505 } 2506 2507 /* 2508 * Called on init of devfs; creates the objcaches and 2509 * spawns off the devfs core thread. Also initializes 2510 * locks. 2511 */ 2512 static void 2513 devfs_init(void) 2514 { 2515 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); 2516 /* Create objcaches for nodes, msgs and devs */ 2517 devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, 2518 NULL, NULL, NULL, 2519 objcache_malloc_alloc, 2520 objcache_malloc_free, 2521 &devfs_node_malloc_args ); 2522 2523 devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, 2524 NULL, NULL, NULL, 2525 objcache_malloc_alloc, 2526 objcache_malloc_free, 2527 &devfs_msg_malloc_args ); 2528 2529 devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, 2530 NULL, NULL, NULL, 2531 objcache_malloc_alloc, 2532 objcache_malloc_free, 2533 &devfs_dev_malloc_args ); 2534 2535 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); 2536 2537 /* Initialize the reply-only port which acts as a message drain */ 2538 lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); 2539 2540 /* Initialize *THE* devfs lock */ 2541 lockinit(&devfs_lock, "devfs_core lock", 0, 0); 2542 2543 lockmgr(&devfs_lock, LK_EXCLUSIVE); 2544 lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 2545 0, -1, "devfs_msg_core"); 2546 while (devfs_run == 0) 2547 lksleep(td_core, &devfs_lock, 0, "devfsc", 0); 2548 lockmgr(&devfs_lock, LK_RELEASE); 2549 2550 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); 2551 } 2552 2553 /* 2554 * Called on unload of devfs; takes care of destroying the core 2555 * and the objcaches. Also removes aliases that are no longer needed. 2556 */ 2557 static void 2558 devfs_uninit(void) 2559 { 2560 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); 2561 2562 devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); 2563 while (devfs_run) 2564 tsleep(td_core, 0, "devfsc", hz*10); 2565 tsleep(td_core, 0, "devfsc", hz); 2566 2567 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); 2568 2569 /* Destroy the objcaches */ 2570 objcache_destroy(devfs_msg_cache); 2571 objcache_destroy(devfs_node_cache); 2572 objcache_destroy(devfs_dev_cache); 2573 2574 devfs_alias_reap(); 2575 } 2576 2577 /* 2578 * This is a sysctl handler to assist userland devname(3) to 2579 * find the device name for a given udev. 2580 */ 2581 static int 2582 devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) 2583 { 2584 udev_t udev; 2585 cdev_t found; 2586 int error; 2587 2588 2589 if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) 2590 return (error); 2591 2592 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); 2593 2594 if (udev == NOUDEV) 2595 return(EINVAL); 2596 2597 if ((found = devfs_find_device_by_udev(udev)) == NULL) 2598 return(ENOENT); 2599 2600 return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); 2601 } 2602 2603 2604 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, 2605 NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); 2606 2607 SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2608 TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); 2609 SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 2610 0, "Enable DevFS debugging"); 2611 2612 SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, 2613 devfs_init, NULL); 2614 SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, 2615 devfs_uninit, NULL); 2616 2617 /* 2618 * WildCmp() - compare wild string to sane string 2619 * 2620 * Returns 0 on success, -1 on failure. 2621 */ 2622 static int 2623 wildCmp(const char **mary, int d, const char *w, const char *s) 2624 { 2625 int i; 2626 2627 /* 2628 * skip fixed portion 2629 */ 2630 for (;;) { 2631 switch(*w) { 2632 case '*': 2633 /* 2634 * optimize terminator 2635 */ 2636 if (w[1] == 0) 2637 return(0); 2638 if (w[1] != '?' && w[1] != '*') { 2639 /* 2640 * optimize * followed by non-wild 2641 */ 2642 for (i = 0; s + i < mary[d]; ++i) { 2643 if (s[i] == w[1] && wildCmp(mary, d + 1, w + 1, s + i) == 0) 2644 return(0); 2645 } 2646 } else { 2647 /* 2648 * less-optimal 2649 */ 2650 for (i = 0; s + i < mary[d]; ++i) { 2651 if (wildCmp(mary, d + 1, w + 1, s + i) == 0) 2652 return(0); 2653 } 2654 } 2655 mary[d] = s; 2656 return(-1); 2657 case '?': 2658 if (*s == 0) 2659 return(-1); 2660 ++w; 2661 ++s; 2662 break; 2663 default: 2664 if (*w != *s) 2665 return(-1); 2666 if (*w == 0) /* terminator */ 2667 return(0); 2668 ++w; 2669 ++s; 2670 break; 2671 } 2672 } 2673 /* not reached */ 2674 return(-1); 2675 } 2676 2677 2678 /* 2679 * WildCaseCmp() - compare wild string to sane string, case insensitive 2680 * 2681 * Returns 0 on success, -1 on failure. 2682 */ 2683 static int 2684 wildCaseCmp(const char **mary, int d, const char *w, const char *s) 2685 { 2686 int i; 2687 2688 /* 2689 * skip fixed portion 2690 */ 2691 for (;;) { 2692 switch(*w) { 2693 case '*': 2694 /* 2695 * optimize terminator 2696 */ 2697 if (w[1] == 0) 2698 return(0); 2699 if (w[1] != '?' && w[1] != '*') { 2700 /* 2701 * optimize * followed by non-wild 2702 */ 2703 for (i = 0; s + i < mary[d]; ++i) { 2704 if (s[i] == w[1] && wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2705 return(0); 2706 } 2707 } else { 2708 /* 2709 * less-optimal 2710 */ 2711 for (i = 0; s + i < mary[d]; ++i) { 2712 if (wildCaseCmp(mary, d + 1, w + 1, s + i) == 0) 2713 return(0); 2714 } 2715 } 2716 mary[d] = s; 2717 return(-1); 2718 case '?': 2719 if (*s == 0) 2720 return(-1); 2721 ++w; 2722 ++s; 2723 break; 2724 default: 2725 if (*w != *s) { 2726 #define tolower(x) ((x >= 'A' && x <= 'Z')?(x+('a'-'A')):(x)) 2727 if (tolower(*w) != tolower(*s)) 2728 return(-1); 2729 } 2730 if (*w == 0) /* terminator */ 2731 return(0); 2732 ++w; 2733 ++s; 2734 break; 2735 } 2736 } 2737 /* not reached */ 2738 return(-1); 2739 } 2740 2741 struct cdev_privdata { 2742 void *cdpd_data; 2743 cdevpriv_dtr_t cdpd_dtr; 2744 }; 2745 2746 int devfs_get_cdevpriv(struct file *fp, void **datap) 2747 { 2748 struct cdev_privdata *p; 2749 int error; 2750 2751 if (fp == NULL) 2752 return(EBADF); 2753 p = (struct cdev_privdata*) fp->f_data1; 2754 if (p != NULL) { 2755 error = 0; 2756 *datap = p->cdpd_data; 2757 } else 2758 error = ENOENT; 2759 return (error); 2760 } 2761 2762 int devfs_set_cdevpriv(struct file *fp, void *priv, cdevpriv_dtr_t dtr) 2763 { 2764 struct cdev_privdata *p; 2765 int error; 2766 2767 if (fp == NULL) 2768 return (ENOENT); 2769 2770 p = kmalloc(sizeof(struct cdev_privdata), M_DEVFS, M_WAITOK); 2771 p->cdpd_data = priv; 2772 p->cdpd_dtr = dtr; 2773 2774 spin_lock(&fp->f_spin); 2775 if (fp->f_data1 == NULL) { 2776 fp->f_data1 = p; 2777 error = 0; 2778 } else 2779 error = EBUSY; 2780 spin_unlock(&fp->f_spin); 2781 2782 if (error) 2783 kfree(p, M_DEVFS); 2784 2785 return error; 2786 } 2787 2788 void devfs_clear_cdevpriv(struct file *fp) 2789 { 2790 struct cdev_privdata *p; 2791 2792 if (fp == NULL) 2793 return; 2794 2795 spin_lock(&fp->f_spin); 2796 p = fp->f_data1; 2797 fp->f_data1 = NULL; 2798 spin_unlock(&fp->f_spin); 2799 2800 if (p != NULL) { 2801 (p->cdpd_dtr)(p->cdpd_data); 2802 kfree(p, M_DEVFS); 2803 } 2804 } 2805 2806 int 2807 devfs_WildCmp(const char *w, const char *s) 2808 { 2809 int i; 2810 int c; 2811 int slen = strlen(s); 2812 const char **mary; 2813 2814 for (i = c = 0; w[i]; ++i) { 2815 if (w[i] == '*') 2816 ++c; 2817 } 2818 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 2819 for (i = 0; i < c; ++i) 2820 mary[i] = s + slen; 2821 i = wildCmp(mary, 0, w, s); 2822 kfree(mary, M_DEVFS); 2823 return(i); 2824 } 2825 2826 int 2827 devfs_WildCaseCmp(const char *w, const char *s) 2828 { 2829 int i; 2830 int c; 2831 int slen = strlen(s); 2832 const char **mary; 2833 2834 for (i = c = 0; w[i]; ++i) { 2835 if (w[i] == '*') 2836 ++c; 2837 } 2838 mary = kmalloc(sizeof(char *) * (c + 1), M_DEVFS, M_WAITOK); 2839 for (i = 0; i < c; ++i) 2840 mary[i] = s + slen; 2841 i = wildCaseCmp(mary, 0, w, s); 2842 kfree(mary, M_DEVFS); 2843 return(i); 2844 } 2845 2846