121864bc5SMatthew Dillon /* 29f889dc4SMatthew Dillon * (MPSAFE) 39f889dc4SMatthew Dillon * 421864bc5SMatthew Dillon * Copyright (c) 2009 The DragonFly Project. All rights reserved. 521864bc5SMatthew Dillon * 621864bc5SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 721864bc5SMatthew Dillon * by Alex Hornung <ahornung@gmail.com> 821864bc5SMatthew Dillon * 921864bc5SMatthew Dillon * Redistribution and use in source and binary forms, with or without 1021864bc5SMatthew Dillon * modification, are permitted provided that the following conditions 1121864bc5SMatthew Dillon * are met: 1221864bc5SMatthew Dillon * 1321864bc5SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 1421864bc5SMatthew Dillon * notice, this list of conditions and the following disclaimer. 1521864bc5SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 1621864bc5SMatthew Dillon * notice, this list of conditions and the following disclaimer in 1721864bc5SMatthew Dillon * the documentation and/or other materials provided with the 1821864bc5SMatthew Dillon * distribution. 1921864bc5SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 2021864bc5SMatthew Dillon * contributors may be used to endorse or promote products derived 2121864bc5SMatthew Dillon * from this software without specific, prior written permission. 2221864bc5SMatthew Dillon * 2321864bc5SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2421864bc5SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 2521864bc5SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 2621864bc5SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 2721864bc5SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 2821864bc5SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 2921864bc5SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 3021864bc5SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 3121864bc5SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 3221864bc5SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 3321864bc5SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3421864bc5SMatthew Dillon * SUCH DAMAGE. 3521864bc5SMatthew Dillon */ 3621864bc5SMatthew Dillon #include <sys/param.h> 3721864bc5SMatthew Dillon #include <sys/systm.h> 3821864bc5SMatthew Dillon #include <sys/time.h> 3921864bc5SMatthew Dillon #include <sys/kernel.h> 4021864bc5SMatthew Dillon #include <sys/lock.h> 4121864bc5SMatthew Dillon #include <sys/fcntl.h> 4221864bc5SMatthew Dillon #include <sys/proc.h> 4321864bc5SMatthew Dillon #include <sys/priv.h> 4421864bc5SMatthew Dillon #include <sys/signalvar.h> 4521864bc5SMatthew Dillon #include <sys/vnode.h> 4621864bc5SMatthew Dillon #include <sys/uio.h> 4721864bc5SMatthew Dillon #include <sys/mount.h> 4821864bc5SMatthew Dillon #include <sys/file.h> 4921864bc5SMatthew Dillon #include <sys/dirent.h> 5021864bc5SMatthew Dillon #include <sys/malloc.h> 5121864bc5SMatthew Dillon #include <sys/stat.h> 5221864bc5SMatthew Dillon #include <sys/reg.h> 5321864bc5SMatthew Dillon #include <vm/vm_pager.h> 5421864bc5SMatthew Dillon #include <vm/vm_zone.h> 5521864bc5SMatthew Dillon #include <vm/vm_object.h> 5621864bc5SMatthew Dillon #include <sys/filio.h> 5721864bc5SMatthew Dillon #include <sys/ttycom.h> 5821864bc5SMatthew Dillon #include <sys/tty.h> 592d076755SAlex Hornung #include <sys/diskslice.h> 603a1032a6SAlex Hornung #include <sys/sysctl.h> 612c1e28ddSAlex Hornung #include <sys/devfs.h> 6221864bc5SMatthew Dillon #include <sys/pioctl.h> 63c705e298SSamuel J. Greear #include <vfs/fifofs/fifo.h> 6421864bc5SMatthew Dillon 6521864bc5SMatthew Dillon #include <machine/limits.h> 66684a93c4SMatthew Dillon 671a54183bSMatthew Dillon #include <sys/buf2.h> 68684a93c4SMatthew Dillon #include <vm/vm_page2.h> 6921864bc5SMatthew Dillon 70752b2d38SSascha Wildner #ifndef SPEC_CHAIN_DEBUG 71752b2d38SSascha Wildner #define SPEC_CHAIN_DEBUG 0 72752b2d38SSascha Wildner #endif 73752b2d38SSascha Wildner 7421864bc5SMatthew Dillon MALLOC_DECLARE(M_DEVFS); 759f889dc4SMatthew Dillon #define DEVFS_BADOP (void *)devfs_vop_badop 7621864bc5SMatthew Dillon 779f889dc4SMatthew Dillon static int devfs_vop_badop(struct vop_generic_args *); 789f889dc4SMatthew Dillon static int devfs_vop_access(struct vop_access_args *); 799f889dc4SMatthew Dillon static int devfs_vop_inactive(struct vop_inactive_args *); 809f889dc4SMatthew Dillon static int devfs_vop_reclaim(struct vop_reclaim_args *); 819f889dc4SMatthew Dillon static int devfs_vop_readdir(struct vop_readdir_args *); 829f889dc4SMatthew Dillon static int devfs_vop_getattr(struct vop_getattr_args *); 839f889dc4SMatthew Dillon static int devfs_vop_setattr(struct vop_setattr_args *); 849f889dc4SMatthew Dillon static int devfs_vop_readlink(struct vop_readlink_args *); 859f889dc4SMatthew Dillon static int devfs_vop_print(struct vop_print_args *); 8621864bc5SMatthew Dillon 879f889dc4SMatthew Dillon static int devfs_vop_nresolve(struct vop_nresolve_args *); 889f889dc4SMatthew Dillon static int devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *); 899f889dc4SMatthew Dillon static int devfs_vop_nmkdir(struct vop_nmkdir_args *); 909f889dc4SMatthew Dillon static int devfs_vop_nsymlink(struct vop_nsymlink_args *); 919f889dc4SMatthew Dillon static int devfs_vop_nrmdir(struct vop_nrmdir_args *); 929f889dc4SMatthew Dillon static int devfs_vop_nremove(struct vop_nremove_args *); 9321864bc5SMatthew Dillon 9421864bc5SMatthew Dillon static int devfs_spec_open(struct vop_open_args *); 9521864bc5SMatthew Dillon static int devfs_spec_close(struct vop_close_args *); 9621864bc5SMatthew Dillon static int devfs_spec_fsync(struct vop_fsync_args *); 9721864bc5SMatthew Dillon 9821864bc5SMatthew Dillon static int devfs_spec_read(struct vop_read_args *); 9921864bc5SMatthew Dillon static int devfs_spec_write(struct vop_write_args *); 10021864bc5SMatthew Dillon static int devfs_spec_ioctl(struct vop_ioctl_args *); 10121864bc5SMatthew Dillon static int devfs_spec_kqfilter(struct vop_kqfilter_args *); 10221864bc5SMatthew Dillon static int devfs_spec_strategy(struct vop_strategy_args *); 10321864bc5SMatthew Dillon static void devfs_spec_strategy_done(struct bio *); 10421864bc5SMatthew Dillon static int devfs_spec_freeblks(struct vop_freeblks_args *); 10521864bc5SMatthew Dillon static int devfs_spec_bmap(struct vop_bmap_args *); 10621864bc5SMatthew Dillon static int devfs_spec_advlock(struct vop_advlock_args *); 10721864bc5SMatthew Dillon static void devfs_spec_getpages_iodone(struct bio *); 10821864bc5SMatthew Dillon static int devfs_spec_getpages(struct vop_getpages_args *); 10921864bc5SMatthew Dillon 1109f889dc4SMatthew Dillon static int devfs_fo_close(struct file *); 1119f889dc4SMatthew Dillon static int devfs_fo_read(struct file *, struct uio *, struct ucred *, int); 1129f889dc4SMatthew Dillon static int devfs_fo_write(struct file *, struct uio *, struct ucred *, int); 1139f889dc4SMatthew Dillon static int devfs_fo_stat(struct file *, struct stat *, struct ucred *); 1149f889dc4SMatthew Dillon static int devfs_fo_kqfilter(struct file *, struct knote *); 1159f889dc4SMatthew Dillon static int devfs_fo_ioctl(struct file *, u_long, caddr_t, 11687baaf0cSMatthew Dillon struct ucred *, struct sysmsg *); 11721864bc5SMatthew Dillon static __inline int sequential_heuristic(struct uio *, struct file *); 11887baaf0cSMatthew Dillon 11921864bc5SMatthew Dillon extern struct lock devfs_lock; 12021864bc5SMatthew Dillon 12121864bc5SMatthew Dillon /* 1229f889dc4SMatthew Dillon * devfs vnode operations for regular files. All vnode ops are MPSAFE. 12321864bc5SMatthew Dillon */ 12421864bc5SMatthew Dillon struct vop_ops devfs_vnode_norm_vops = { 12521864bc5SMatthew Dillon .vop_default = vop_defaultop, 1269f889dc4SMatthew Dillon .vop_access = devfs_vop_access, 12721864bc5SMatthew Dillon .vop_advlock = DEVFS_BADOP, 12821864bc5SMatthew Dillon .vop_bmap = DEVFS_BADOP, 12921864bc5SMatthew Dillon .vop_close = vop_stdclose, 1309f889dc4SMatthew Dillon .vop_getattr = devfs_vop_getattr, 1319f889dc4SMatthew Dillon .vop_inactive = devfs_vop_inactive, 13221864bc5SMatthew Dillon .vop_ncreate = DEVFS_BADOP, 1339f889dc4SMatthew Dillon .vop_nresolve = devfs_vop_nresolve, 1349f889dc4SMatthew Dillon .vop_nlookupdotdot = devfs_vop_nlookupdotdot, 13521864bc5SMatthew Dillon .vop_nlink = DEVFS_BADOP, 1369f889dc4SMatthew Dillon .vop_nmkdir = devfs_vop_nmkdir, 13721864bc5SMatthew Dillon .vop_nmknod = DEVFS_BADOP, 1389f889dc4SMatthew Dillon .vop_nremove = devfs_vop_nremove, 13921864bc5SMatthew Dillon .vop_nrename = DEVFS_BADOP, 1409f889dc4SMatthew Dillon .vop_nrmdir = devfs_vop_nrmdir, 1419f889dc4SMatthew Dillon .vop_nsymlink = devfs_vop_nsymlink, 14221864bc5SMatthew Dillon .vop_open = vop_stdopen, 14321864bc5SMatthew Dillon .vop_pathconf = vop_stdpathconf, 1449f889dc4SMatthew Dillon .vop_print = devfs_vop_print, 14521864bc5SMatthew Dillon .vop_read = DEVFS_BADOP, 1469f889dc4SMatthew Dillon .vop_readdir = devfs_vop_readdir, 1479f889dc4SMatthew Dillon .vop_readlink = devfs_vop_readlink, 1489a20a70dSMatthew Dillon .vop_reallocblks = DEVFS_BADOP, 1499f889dc4SMatthew Dillon .vop_reclaim = devfs_vop_reclaim, 1509f889dc4SMatthew Dillon .vop_setattr = devfs_vop_setattr, 15121864bc5SMatthew Dillon .vop_write = DEVFS_BADOP, 15221864bc5SMatthew Dillon .vop_ioctl = DEVFS_BADOP 15321864bc5SMatthew Dillon }; 15421864bc5SMatthew Dillon 15521864bc5SMatthew Dillon /* 1569f889dc4SMatthew Dillon * devfs vnode operations for character devices. All vnode ops are MPSAFE. 15721864bc5SMatthew Dillon */ 15821864bc5SMatthew Dillon struct vop_ops devfs_vnode_dev_vops = { 15921864bc5SMatthew Dillon .vop_default = vop_defaultop, 1609f889dc4SMatthew Dillon .vop_access = devfs_vop_access, 16121864bc5SMatthew Dillon .vop_advlock = devfs_spec_advlock, 16221864bc5SMatthew Dillon .vop_bmap = devfs_spec_bmap, 16321864bc5SMatthew Dillon .vop_close = devfs_spec_close, 16421864bc5SMatthew Dillon .vop_freeblks = devfs_spec_freeblks, 16521864bc5SMatthew Dillon .vop_fsync = devfs_spec_fsync, 1669f889dc4SMatthew Dillon .vop_getattr = devfs_vop_getattr, 16721864bc5SMatthew Dillon .vop_getpages = devfs_spec_getpages, 1689f889dc4SMatthew Dillon .vop_inactive = devfs_vop_inactive, 16921864bc5SMatthew Dillon .vop_open = devfs_spec_open, 17021864bc5SMatthew Dillon .vop_pathconf = vop_stdpathconf, 1719f889dc4SMatthew Dillon .vop_print = devfs_vop_print, 17221864bc5SMatthew Dillon .vop_kqfilter = devfs_spec_kqfilter, 17321864bc5SMatthew Dillon .vop_read = devfs_spec_read, 17421864bc5SMatthew Dillon .vop_readdir = DEVFS_BADOP, 17521864bc5SMatthew Dillon .vop_readlink = DEVFS_BADOP, 1769a20a70dSMatthew Dillon .vop_reallocblks = DEVFS_BADOP, 1779f889dc4SMatthew Dillon .vop_reclaim = devfs_vop_reclaim, 1789f889dc4SMatthew Dillon .vop_setattr = devfs_vop_setattr, 17921864bc5SMatthew Dillon .vop_strategy = devfs_spec_strategy, 18021864bc5SMatthew Dillon .vop_write = devfs_spec_write, 18121864bc5SMatthew Dillon .vop_ioctl = devfs_spec_ioctl 18221864bc5SMatthew Dillon }; 18321864bc5SMatthew Dillon 1849f889dc4SMatthew Dillon /* 1859f889dc4SMatthew Dillon * devfs file pointer operations. All fileops are MPSAFE. 1869f889dc4SMatthew Dillon */ 18721864bc5SMatthew Dillon struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops; 18821864bc5SMatthew Dillon 18921864bc5SMatthew Dillon struct fileops devfs_dev_fileops = { 1909f889dc4SMatthew Dillon .fo_read = devfs_fo_read, 1919f889dc4SMatthew Dillon .fo_write = devfs_fo_write, 1929f889dc4SMatthew Dillon .fo_ioctl = devfs_fo_ioctl, 1939f889dc4SMatthew Dillon .fo_kqfilter = devfs_fo_kqfilter, 1949f889dc4SMatthew Dillon .fo_stat = devfs_fo_stat, 1959f889dc4SMatthew Dillon .fo_close = devfs_fo_close, 19621864bc5SMatthew Dillon .fo_shutdown = nofo_shutdown 19721864bc5SMatthew Dillon }; 19821864bc5SMatthew Dillon 1994062d050SMatthew Dillon /* 2009f889dc4SMatthew Dillon * These two functions are possibly temporary hacks for devices (aka 2019f889dc4SMatthew Dillon * the pty code) which want to control the node attributes themselves. 2024062d050SMatthew Dillon * 2034062d050SMatthew Dillon * XXX we may ultimately desire to simply remove the uid/gid/mode 2044062d050SMatthew Dillon * from the node entirely. 2059f889dc4SMatthew Dillon * 2069f889dc4SMatthew Dillon * MPSAFE - sorta. Theoretically the overwrite can compete since they 2079f889dc4SMatthew Dillon * are loading from the same fields. 2084062d050SMatthew Dillon */ 2094062d050SMatthew Dillon static __inline void 2104062d050SMatthew Dillon node_sync_dev_get(struct devfs_node *node) 2114062d050SMatthew Dillon { 2124062d050SMatthew Dillon cdev_t dev; 2134062d050SMatthew Dillon 2144062d050SMatthew Dillon if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 2154062d050SMatthew Dillon node->uid = dev->si_uid; 2164062d050SMatthew Dillon node->gid = dev->si_gid; 2174062d050SMatthew Dillon node->mode = dev->si_perms; 2184062d050SMatthew Dillon } 2194062d050SMatthew Dillon } 2204062d050SMatthew Dillon 2214062d050SMatthew Dillon static __inline void 2224062d050SMatthew Dillon node_sync_dev_set(struct devfs_node *node) 2234062d050SMatthew Dillon { 2244062d050SMatthew Dillon cdev_t dev; 2254062d050SMatthew Dillon 2264062d050SMatthew Dillon if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 2274062d050SMatthew Dillon dev->si_uid = node->uid; 2284062d050SMatthew Dillon dev->si_gid = node->gid; 2294062d050SMatthew Dillon dev->si_perms = node->mode; 2304062d050SMatthew Dillon } 2314062d050SMatthew Dillon } 23221864bc5SMatthew Dillon 23321864bc5SMatthew Dillon /* 23421864bc5SMatthew Dillon * generic entry point for unsupported operations 23521864bc5SMatthew Dillon */ 23621864bc5SMatthew Dillon static int 2379f889dc4SMatthew Dillon devfs_vop_badop(struct vop_generic_args *ap) 23821864bc5SMatthew Dillon { 23921864bc5SMatthew Dillon return (EIO); 24021864bc5SMatthew Dillon } 24121864bc5SMatthew Dillon 24221864bc5SMatthew Dillon 24321864bc5SMatthew Dillon static int 2449f889dc4SMatthew Dillon devfs_vop_access(struct vop_access_args *ap) 24521864bc5SMatthew Dillon { 24621864bc5SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 247898c91eeSMatthew Dillon int error; 24821864bc5SMatthew Dillon 249894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 250894bbb25SAlex Hornung return ENOENT; 2514062d050SMatthew Dillon node_sync_dev_get(node); 25221864bc5SMatthew Dillon error = vop_helper_access(ap, node->uid, node->gid, 25321864bc5SMatthew Dillon node->mode, node->flags); 25421864bc5SMatthew Dillon 25521864bc5SMatthew Dillon return error; 25621864bc5SMatthew Dillon } 25721864bc5SMatthew Dillon 25821864bc5SMatthew Dillon 25921864bc5SMatthew Dillon static int 2609f889dc4SMatthew Dillon devfs_vop_inactive(struct vop_inactive_args *ap) 26121864bc5SMatthew Dillon { 262ca8d7677SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 26321864bc5SMatthew Dillon 264ca8d7677SMatthew Dillon if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0) 26521864bc5SMatthew Dillon vrecycle(ap->a_vp); 26621864bc5SMatthew Dillon return 0; 26721864bc5SMatthew Dillon } 26821864bc5SMatthew Dillon 26921864bc5SMatthew Dillon 27021864bc5SMatthew Dillon static int 2719f889dc4SMatthew Dillon devfs_vop_reclaim(struct vop_reclaim_args *ap) 27221864bc5SMatthew Dillon { 273be6f2e86SMatthew Dillon struct devfs_node *node; 274be6f2e86SMatthew Dillon struct vnode *vp; 275be6f2e86SMatthew Dillon int locked; 276be6f2e86SMatthew Dillon 277be6f2e86SMatthew Dillon /* 278be6f2e86SMatthew Dillon * Check if it is locked already. if not, we acquire the devfs lock 279be6f2e86SMatthew Dillon */ 280ab08ac79SSascha Wildner if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 28121864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 28221864bc5SMatthew Dillon locked = 1; 283be6f2e86SMatthew Dillon } else { 284be6f2e86SMatthew Dillon locked = 0; 28521864bc5SMatthew Dillon } 28621864bc5SMatthew Dillon 287be6f2e86SMatthew Dillon /* 288be6f2e86SMatthew Dillon * Get rid of the devfs_node if it is no longer linked into the 289731fd4ccSMatthew Dillon * topology. Interlocked by devfs_lock. However, be careful 290731fd4ccSMatthew Dillon * interposing other operations between cleaning out v_data and 291731fd4ccSMatthew Dillon * devfs_freep() as the node is only protected by devfs_lock 292731fd4ccSMatthew Dillon * once the vnode is disassociated. 293be6f2e86SMatthew Dillon */ 294be6f2e86SMatthew Dillon vp = ap->a_vp; 295770f8279SMatthew Dillon node = DEVFS_NODE(vp); 296770f8279SMatthew Dillon 297770f8279SMatthew Dillon if (node) { 298731fd4ccSMatthew Dillon if (node->v_node != vp) { 299731fd4ccSMatthew Dillon kprintf("NODE->V_NODE MISMATCH VP=%p NODEVP=%p\n", 300731fd4ccSMatthew Dillon vp, node->v_node); 301731fd4ccSMatthew Dillon } 302731fd4ccSMatthew Dillon vp->v_data = NULL; 303731fd4ccSMatthew Dillon node->v_node = NULL; 3044062d050SMatthew Dillon if ((node->flags & DEVFS_NODE_LINKED) == 0) 3054062d050SMatthew Dillon devfs_freep(node); 30621864bc5SMatthew Dillon } 307731fd4ccSMatthew Dillon v_release_rdev(vp); 30821864bc5SMatthew Dillon 30921864bc5SMatthew Dillon if (locked) 31021864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 31121864bc5SMatthew Dillon 312be6f2e86SMatthew Dillon /* 3139b823501SAlex Hornung * v_rdev needs to be properly released using v_release_rdev 3149b823501SAlex Hornung * Make sure v_data is NULL as well. 315be6f2e86SMatthew Dillon */ 31621864bc5SMatthew Dillon return 0; 31721864bc5SMatthew Dillon } 31821864bc5SMatthew Dillon 31921864bc5SMatthew Dillon 32021864bc5SMatthew Dillon static int 3219f889dc4SMatthew Dillon devfs_vop_readdir(struct vop_readdir_args *ap) 32221864bc5SMatthew Dillon { 323898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_vp); 32421864bc5SMatthew Dillon struct devfs_node *node; 32521864bc5SMatthew Dillon int cookie_index; 32621864bc5SMatthew Dillon int ncookies; 327898c91eeSMatthew Dillon int error2; 328898c91eeSMatthew Dillon int error; 329898c91eeSMatthew Dillon int r; 33021864bc5SMatthew Dillon off_t *cookies; 33121864bc5SMatthew Dillon off_t saveoff; 33221864bc5SMatthew Dillon 33321864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n"); 33421864bc5SMatthew Dillon 33521864bc5SMatthew Dillon if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX) 33621864bc5SMatthew Dillon return (EINVAL); 337b458d1abSMatthew Dillon error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY | LK_FAILRECLAIM); 338b458d1abSMatthew Dillon if (error) 33921864bc5SMatthew Dillon return (error); 34021864bc5SMatthew Dillon 341c512ab96SMatthew Dillon if (!devfs_node_is_accessible(dnode)) { 342c512ab96SMatthew Dillon vn_unlock(ap->a_vp); 343ca8d7677SMatthew Dillon return ENOENT; 344c512ab96SMatthew Dillon } 345ca8d7677SMatthew Dillon 346ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 347ca8d7677SMatthew Dillon 34821864bc5SMatthew Dillon saveoff = ap->a_uio->uio_offset; 34921864bc5SMatthew Dillon 35021864bc5SMatthew Dillon if (ap->a_ncookies) { 35121864bc5SMatthew Dillon ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */ 35221864bc5SMatthew Dillon if (ncookies > 256) 35321864bc5SMatthew Dillon ncookies = 256; 35421864bc5SMatthew Dillon cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK); 35521864bc5SMatthew Dillon cookie_index = 0; 35621864bc5SMatthew Dillon } else { 35721864bc5SMatthew Dillon ncookies = -1; 35821864bc5SMatthew Dillon cookies = NULL; 35921864bc5SMatthew Dillon cookie_index = 0; 36021864bc5SMatthew Dillon } 36121864bc5SMatthew Dillon 362d489a79aSMatthew Dillon vfs_timestamp(&dnode->atime); 36321864bc5SMatthew Dillon 36421864bc5SMatthew Dillon if (saveoff == 0) { 365898c91eeSMatthew Dillon r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, 366898c91eeSMatthew Dillon DT_DIR, 1, "."); 36721864bc5SMatthew Dillon if (r) 36821864bc5SMatthew Dillon goto done; 36921864bc5SMatthew Dillon if (cookies) 37021864bc5SMatthew Dillon cookies[cookie_index] = saveoff; 37121864bc5SMatthew Dillon saveoff++; 37221864bc5SMatthew Dillon cookie_index++; 37321864bc5SMatthew Dillon if (cookie_index == ncookies) 37421864bc5SMatthew Dillon goto done; 37521864bc5SMatthew Dillon } 37621864bc5SMatthew Dillon 37721864bc5SMatthew Dillon if (saveoff == 1) { 378898c91eeSMatthew Dillon if (dnode->parent) { 37921864bc5SMatthew Dillon r = vop_write_dirent(&error, ap->a_uio, 380898c91eeSMatthew Dillon dnode->parent->d_dir.d_ino, 38121864bc5SMatthew Dillon DT_DIR, 2, ".."); 38221864bc5SMatthew Dillon } else { 38321864bc5SMatthew Dillon r = vop_write_dirent(&error, ap->a_uio, 384898c91eeSMatthew Dillon dnode->d_dir.d_ino, 385898c91eeSMatthew Dillon DT_DIR, 2, ".."); 38621864bc5SMatthew Dillon } 38721864bc5SMatthew Dillon if (r) 38821864bc5SMatthew Dillon goto done; 38921864bc5SMatthew Dillon if (cookies) 39021864bc5SMatthew Dillon cookies[cookie_index] = saveoff; 39121864bc5SMatthew Dillon saveoff++; 39221864bc5SMatthew Dillon cookie_index++; 39321864bc5SMatthew Dillon if (cookie_index == ncookies) 39421864bc5SMatthew Dillon goto done; 39521864bc5SMatthew Dillon } 39621864bc5SMatthew Dillon 397898c91eeSMatthew Dillon TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 398898c91eeSMatthew Dillon if ((node->flags & DEVFS_HIDDEN) || 399898c91eeSMatthew Dillon (node->flags & DEVFS_INVISIBLE)) { 40021864bc5SMatthew Dillon continue; 401898c91eeSMatthew Dillon } 40221864bc5SMatthew Dillon 403f7e8960cSAlex Hornung /* 4049f889dc4SMatthew Dillon * If the node type is a valid devfs alias, then we make 4059f889dc4SMatthew Dillon * sure that the target isn't hidden. If it is, we don't 4069f889dc4SMatthew Dillon * show the link in the directory listing. 407f7e8960cSAlex Hornung */ 4088e78a293SSascha Wildner if ((node->node_type == Nlink) && (node->link_target != NULL) && 409f7e8960cSAlex Hornung (node->link_target->flags & DEVFS_HIDDEN)) 410f7e8960cSAlex Hornung continue; 411f7e8960cSAlex Hornung 41221864bc5SMatthew Dillon if (node->cookie < saveoff) 41321864bc5SMatthew Dillon continue; 414f7e8960cSAlex Hornung 41521864bc5SMatthew Dillon saveoff = node->cookie; 41621864bc5SMatthew Dillon 417898c91eeSMatthew Dillon error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino, 418898c91eeSMatthew Dillon node->d_dir.d_type, 419898c91eeSMatthew Dillon node->d_dir.d_namlen, 420898c91eeSMatthew Dillon node->d_dir.d_name); 42121864bc5SMatthew Dillon 42221864bc5SMatthew Dillon if (error2) 42321864bc5SMatthew Dillon break; 42421864bc5SMatthew Dillon 42521864bc5SMatthew Dillon saveoff++; 42621864bc5SMatthew Dillon 42721864bc5SMatthew Dillon if (cookies) 42821864bc5SMatthew Dillon cookies[cookie_index] = node->cookie; 42921864bc5SMatthew Dillon ++cookie_index; 43021864bc5SMatthew Dillon if (cookie_index == ncookies) 43121864bc5SMatthew Dillon break; 43221864bc5SMatthew Dillon } 43321864bc5SMatthew Dillon 43421864bc5SMatthew Dillon done: 435ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 43621864bc5SMatthew Dillon vn_unlock(ap->a_vp); 43721864bc5SMatthew Dillon 43821864bc5SMatthew Dillon ap->a_uio->uio_offset = saveoff; 43921864bc5SMatthew Dillon if (error && cookie_index == 0) { 44021864bc5SMatthew Dillon if (cookies) { 44121864bc5SMatthew Dillon kfree(cookies, M_TEMP); 44221864bc5SMatthew Dillon *ap->a_ncookies = 0; 44321864bc5SMatthew Dillon *ap->a_cookies = NULL; 44421864bc5SMatthew Dillon } 44521864bc5SMatthew Dillon } else { 44621864bc5SMatthew Dillon if (cookies) { 44721864bc5SMatthew Dillon *ap->a_ncookies = cookie_index; 44821864bc5SMatthew Dillon *ap->a_cookies = cookies; 44921864bc5SMatthew Dillon } 45021864bc5SMatthew Dillon } 45121864bc5SMatthew Dillon return (error); 45221864bc5SMatthew Dillon } 45321864bc5SMatthew Dillon 45421864bc5SMatthew Dillon 45521864bc5SMatthew Dillon static int 4569f889dc4SMatthew Dillon devfs_vop_nresolve(struct vop_nresolve_args *ap) 45721864bc5SMatthew Dillon { 458898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 45921864bc5SMatthew Dillon struct devfs_node *node, *found = NULL; 46021864bc5SMatthew Dillon struct namecache *ncp; 46121864bc5SMatthew Dillon struct vnode *vp = NULL; 46221864bc5SMatthew Dillon int error = 0; 46321864bc5SMatthew Dillon int len; 464260e4e8bSAlex Hornung int depth; 46521864bc5SMatthew Dillon 46621864bc5SMatthew Dillon ncp = ap->a_nch->ncp; 46721864bc5SMatthew Dillon len = ncp->nc_nlen; 46821864bc5SMatthew Dillon 469898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 470ca8d7677SMatthew Dillon return ENOENT; 471ca8d7677SMatthew Dillon 47221864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 47321864bc5SMatthew Dillon 4748e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) { 475e23485a5SMatthew Dillon error = ENOENT; 47621864bc5SMatthew Dillon cache_setvp(ap->a_nch, NULL); 47721864bc5SMatthew Dillon goto out; 47821864bc5SMatthew Dillon } 47921864bc5SMatthew Dillon 480898c91eeSMatthew Dillon TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 48121864bc5SMatthew Dillon if (len == node->d_dir.d_namlen) { 48221864bc5SMatthew Dillon if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { 48321864bc5SMatthew Dillon found = node; 48421864bc5SMatthew Dillon break; 48521864bc5SMatthew Dillon } 48621864bc5SMatthew Dillon } 48721864bc5SMatthew Dillon } 48821864bc5SMatthew Dillon 48921864bc5SMatthew Dillon if (found) { 490260e4e8bSAlex Hornung depth = 0; 4918e78a293SSascha Wildner while ((found->node_type == Nlink) && (found->link_target)) { 492260e4e8bSAlex Hornung if (depth >= 8) { 493260e4e8bSAlex Hornung devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 494260e4e8bSAlex Hornung break; 495260e4e8bSAlex Hornung } 496260e4e8bSAlex Hornung 49721864bc5SMatthew Dillon found = found->link_target; 498260e4e8bSAlex Hornung ++depth; 499260e4e8bSAlex Hornung } 50021864bc5SMatthew Dillon 50121864bc5SMatthew Dillon if (!(found->flags & DEVFS_HIDDEN)) 50221864bc5SMatthew Dillon devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); 50321864bc5SMatthew Dillon } 50421864bc5SMatthew Dillon 50521864bc5SMatthew Dillon if (vp == NULL) { 50621864bc5SMatthew Dillon error = ENOENT; 50721864bc5SMatthew Dillon cache_setvp(ap->a_nch, NULL); 50821864bc5SMatthew Dillon goto out; 50921864bc5SMatthew Dillon 51021864bc5SMatthew Dillon } 51121864bc5SMatthew Dillon KKASSERT(vp); 51221864bc5SMatthew Dillon vn_unlock(vp); 51321864bc5SMatthew Dillon cache_setvp(ap->a_nch, vp); 51421864bc5SMatthew Dillon vrele(vp); 51521864bc5SMatthew Dillon out: 51621864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 517898c91eeSMatthew Dillon 51821864bc5SMatthew Dillon return error; 51921864bc5SMatthew Dillon } 52021864bc5SMatthew Dillon 52121864bc5SMatthew Dillon 52221864bc5SMatthew Dillon static int 5239f889dc4SMatthew Dillon devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 52421864bc5SMatthew Dillon { 525898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 52621864bc5SMatthew Dillon 527898c91eeSMatthew Dillon *ap->a_vpp = NULL; 528898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 529894bbb25SAlex Hornung return ENOENT; 530894bbb25SAlex Hornung 53121864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 532898c91eeSMatthew Dillon if (dnode->parent != NULL) { 533898c91eeSMatthew Dillon devfs_allocv(ap->a_vpp, dnode->parent); 53421864bc5SMatthew Dillon vn_unlock(*ap->a_vpp); 53521864bc5SMatthew Dillon } 53621864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 53721864bc5SMatthew Dillon 53821864bc5SMatthew Dillon return ((*ap->a_vpp == NULL) ? ENOENT : 0); 53921864bc5SMatthew Dillon } 54021864bc5SMatthew Dillon 54121864bc5SMatthew Dillon 542845bd036SMatthew Dillon /* 543845bd036SMatthew Dillon * getattr() - Does not need a lock since the vp is refd 544845bd036SMatthew Dillon */ 54521864bc5SMatthew Dillon static int 5469f889dc4SMatthew Dillon devfs_vop_getattr(struct vop_getattr_args *ap) 54721864bc5SMatthew Dillon { 54821864bc5SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 549898c91eeSMatthew Dillon struct vattr *vap = ap->a_vap; 5502d076755SAlex Hornung struct partinfo pinfo; 55121864bc5SMatthew Dillon int error = 0; 55221864bc5SMatthew Dillon 553952f0188SAlex Hornung #if 0 554894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 555ca8d7677SMatthew Dillon return ENOENT; 556952f0188SAlex Hornung #endif 557758d6a9eSPeeter Must 558758d6a9eSPeeter Must /* 559758d6a9eSPeeter Must * XXX This is a temporary hack to prevent crashes when the device is 560758d6a9eSPeeter Must * being destroyed (and so the underlying node will be gone) while 561758d6a9eSPeeter Must * a userland program is blocked in a read(). 562758d6a9eSPeeter Must */ 563758d6a9eSPeeter Must if (node == NULL) 564758d6a9eSPeeter Must return EIO; 565758d6a9eSPeeter Must 5664062d050SMatthew Dillon node_sync_dev_get(node); 567ca8d7677SMatthew Dillon 56821864bc5SMatthew Dillon /* start by zeroing out the attributes */ 56921864bc5SMatthew Dillon VATTR_NULL(vap); 57021864bc5SMatthew Dillon 57121864bc5SMatthew Dillon /* next do all the common fields */ 57221864bc5SMatthew Dillon vap->va_type = ap->a_vp->v_type; 57321864bc5SMatthew Dillon vap->va_mode = node->mode; 57421864bc5SMatthew Dillon vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ; 575a3c7ebb9SMatthew Dillon vap->va_flags = 0; 57621864bc5SMatthew Dillon vap->va_blocksize = DEV_BSIZE; 577a3c7ebb9SMatthew Dillon vap->va_bytes = vap->va_size = 0; 57821864bc5SMatthew Dillon 57921864bc5SMatthew Dillon vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 58021864bc5SMatthew Dillon 58121864bc5SMatthew Dillon vap->va_atime = node->atime; 58221864bc5SMatthew Dillon vap->va_mtime = node->mtime; 58321864bc5SMatthew Dillon vap->va_ctime = node->ctime; 58421864bc5SMatthew Dillon 58521864bc5SMatthew Dillon vap->va_nlink = 1; /* number of references to file */ 58621864bc5SMatthew Dillon 58721864bc5SMatthew Dillon vap->va_uid = node->uid; 58821864bc5SMatthew Dillon vap->va_gid = node->gid; 58921864bc5SMatthew Dillon 59021864bc5SMatthew Dillon vap->va_rmajor = 0; 59121864bc5SMatthew Dillon vap->va_rminor = 0; 59221864bc5SMatthew Dillon 5938e78a293SSascha Wildner if ((node->node_type == Ndev) && node->d_dev) { 594898c91eeSMatthew Dillon reference_dev(node->d_dev); 595898c91eeSMatthew Dillon vap->va_rminor = node->d_dev->si_uminor; 596898c91eeSMatthew Dillon release_dev(node->d_dev); 59721864bc5SMatthew Dillon } 59821864bc5SMatthew Dillon 59921864bc5SMatthew Dillon /* For a softlink the va_size is the length of the softlink */ 600898c91eeSMatthew Dillon if (node->symlink_name != 0) { 6012d076755SAlex Hornung vap->va_bytes = vap->va_size = node->symlink_namelen; 60221864bc5SMatthew Dillon } 6032d076755SAlex Hornung 6042d076755SAlex Hornung /* 6052d076755SAlex Hornung * For a disk-type device, va_size is the size of the underlying 6062d076755SAlex Hornung * device, so that lseek() works properly. 6072d076755SAlex Hornung */ 6082d076755SAlex Hornung if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) { 6092d076755SAlex Hornung bzero(&pinfo, sizeof(pinfo)); 6102d076755SAlex Hornung error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo, 6118c530b23SJohannes Hofmann 0, proc0.p_ucred, NULL, NULL); 6122d076755SAlex Hornung if ((error == 0) && (pinfo.media_blksize != 0)) { 6132d076755SAlex Hornung vap->va_size = pinfo.media_size; 6142d076755SAlex Hornung } else { 6152d076755SAlex Hornung vap->va_size = 0; 6162d076755SAlex Hornung error = 0; 6172d076755SAlex Hornung } 6182d076755SAlex Hornung } 6192d076755SAlex Hornung 620894bbb25SAlex Hornung return (error); 62121864bc5SMatthew Dillon } 62221864bc5SMatthew Dillon 62321864bc5SMatthew Dillon static int 6249f889dc4SMatthew Dillon devfs_vop_setattr(struct vop_setattr_args *ap) 62521864bc5SMatthew Dillon { 626898c91eeSMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 62721864bc5SMatthew Dillon struct vattr *vap; 628dffaed1bSAlex Hornung uid_t cur_uid; 629dffaed1bSAlex Hornung gid_t cur_gid; 630dffaed1bSAlex Hornung mode_t cur_mode; 63121864bc5SMatthew Dillon int error = 0; 63221864bc5SMatthew Dillon 633894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 634ca8d7677SMatthew Dillon return ENOENT; 6354062d050SMatthew Dillon node_sync_dev_get(node); 63621864bc5SMatthew Dillon 63721864bc5SMatthew Dillon vap = ap->a_vap; 63821864bc5SMatthew Dillon 639dffaed1bSAlex Hornung if ((vap->va_uid != (uid_t)VNOVAL) || (vap->va_gid != (gid_t)VNOVAL)) { 640dffaed1bSAlex Hornung cur_uid = node->uid; 641dffaed1bSAlex Hornung cur_gid = node->gid; 642dffaed1bSAlex Hornung cur_mode = node->mode; 643dffaed1bSAlex Hornung error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 644dffaed1bSAlex Hornung ap->a_cred, &cur_uid, &cur_gid, &cur_mode); 645898c91eeSMatthew Dillon if (error) 64621864bc5SMatthew Dillon goto out; 64721864bc5SMatthew Dillon 648dffaed1bSAlex Hornung if (node->uid != cur_uid || node->gid != cur_gid) { 649dffaed1bSAlex Hornung node->uid = cur_uid; 650dffaed1bSAlex Hornung node->gid = cur_gid; 651dffaed1bSAlex Hornung node->mode = cur_mode; 65221864bc5SMatthew Dillon } 65321864bc5SMatthew Dillon } 65421864bc5SMatthew Dillon 65521864bc5SMatthew Dillon if (vap->va_mode != (mode_t)VNOVAL) { 656dffaed1bSAlex Hornung cur_mode = node->mode; 657dffaed1bSAlex Hornung error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 658dffaed1bSAlex Hornung node->uid, node->gid, &cur_mode); 659dffaed1bSAlex Hornung if (error == 0 && node->mode != cur_mode) { 660dffaed1bSAlex Hornung node->mode = cur_mode; 66121864bc5SMatthew Dillon } 66221864bc5SMatthew Dillon } 66321864bc5SMatthew Dillon 66421864bc5SMatthew Dillon out: 6654062d050SMatthew Dillon node_sync_dev_set(node); 666d489a79aSMatthew Dillon vfs_timestamp(&node->ctime); 667898c91eeSMatthew Dillon 66821864bc5SMatthew Dillon return error; 66921864bc5SMatthew Dillon } 67021864bc5SMatthew Dillon 67121864bc5SMatthew Dillon 67221864bc5SMatthew Dillon static int 6739f889dc4SMatthew Dillon devfs_vop_readlink(struct vop_readlink_args *ap) 67421864bc5SMatthew Dillon { 67521864bc5SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 676ca8d7677SMatthew Dillon int ret; 677ca8d7677SMatthew Dillon 678894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 679ca8d7677SMatthew Dillon return ENOENT; 68021864bc5SMatthew Dillon 681845bd036SMatthew Dillon lockmgr(&devfs_lock, LK_SHARED); 682ca8d7677SMatthew Dillon ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio); 683ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 684ca8d7677SMatthew Dillon 685ca8d7677SMatthew Dillon return ret; 68621864bc5SMatthew Dillon } 68721864bc5SMatthew Dillon 68821864bc5SMatthew Dillon 68921864bc5SMatthew Dillon static int 6909f889dc4SMatthew Dillon devfs_vop_print(struct vop_print_args *ap) 69121864bc5SMatthew Dillon { 69221864bc5SMatthew Dillon return (0); 69321864bc5SMatthew Dillon } 69421864bc5SMatthew Dillon 69539a08947SAlex Hornung static int 6969f889dc4SMatthew Dillon devfs_vop_nmkdir(struct vop_nmkdir_args *ap) 69739a08947SAlex Hornung { 69839a08947SAlex Hornung struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 69939a08947SAlex Hornung struct devfs_node *node; 70039a08947SAlex Hornung 70139a08947SAlex Hornung if (!devfs_node_is_accessible(dnode)) 70239a08947SAlex Hornung return ENOENT; 70339a08947SAlex Hornung 7048e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 70539a08947SAlex Hornung goto out; 70639a08947SAlex Hornung 70739a08947SAlex Hornung lockmgr(&devfs_lock, LK_EXCLUSIVE); 7088e78a293SSascha Wildner devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Ndir, 70939a08947SAlex Hornung ap->a_nch->ncp->nc_name, dnode, NULL); 71039a08947SAlex Hornung 71139a08947SAlex Hornung if (*ap->a_vpp) { 71239a08947SAlex Hornung node = DEVFS_NODE(*ap->a_vpp); 71339a08947SAlex Hornung node->flags |= DEVFS_USER_CREATED; 71439a08947SAlex Hornung cache_setunresolved(ap->a_nch); 71539a08947SAlex Hornung cache_setvp(ap->a_nch, *ap->a_vpp); 71639a08947SAlex Hornung } 71739a08947SAlex Hornung lockmgr(&devfs_lock, LK_RELEASE); 71839a08947SAlex Hornung out: 71939a08947SAlex Hornung return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 72039a08947SAlex Hornung } 72121864bc5SMatthew Dillon 72221864bc5SMatthew Dillon static int 7239f889dc4SMatthew Dillon devfs_vop_nsymlink(struct vop_nsymlink_args *ap) 72421864bc5SMatthew Dillon { 725898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 726898c91eeSMatthew Dillon struct devfs_node *node; 727898c91eeSMatthew Dillon size_t targetlen; 72821864bc5SMatthew Dillon 729898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 730ca8d7677SMatthew Dillon return ENOENT; 731ca8d7677SMatthew Dillon 732894bbb25SAlex Hornung ap->a_vap->va_type = VLNK; 733894bbb25SAlex Hornung 7348e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 73521864bc5SMatthew Dillon goto out; 736898c91eeSMatthew Dillon 73721864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 7388e78a293SSascha Wildner devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Nlink, 739898c91eeSMatthew Dillon ap->a_nch->ncp->nc_name, dnode, NULL); 74021864bc5SMatthew Dillon 741898c91eeSMatthew Dillon targetlen = strlen(ap->a_target); 74221864bc5SMatthew Dillon if (*ap->a_vpp) { 743898c91eeSMatthew Dillon node = DEVFS_NODE(*ap->a_vpp); 744898c91eeSMatthew Dillon node->flags |= DEVFS_USER_CREATED; 745898c91eeSMatthew Dillon node->symlink_namelen = targetlen; 746898c91eeSMatthew Dillon node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK); 747898c91eeSMatthew Dillon memcpy(node->symlink_name, ap->a_target, targetlen); 748898c91eeSMatthew Dillon node->symlink_name[targetlen] = '\0'; 74921864bc5SMatthew Dillon cache_setunresolved(ap->a_nch); 75021864bc5SMatthew Dillon cache_setvp(ap->a_nch, *ap->a_vpp); 75121864bc5SMatthew Dillon } 75221864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 75321864bc5SMatthew Dillon out: 75421864bc5SMatthew Dillon return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 75521864bc5SMatthew Dillon } 75621864bc5SMatthew Dillon 75739a08947SAlex Hornung static int 7589f889dc4SMatthew Dillon devfs_vop_nrmdir(struct vop_nrmdir_args *ap) 75939a08947SAlex Hornung { 76039a08947SAlex Hornung struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 76139a08947SAlex Hornung struct devfs_node *node; 76239a08947SAlex Hornung struct namecache *ncp; 76339a08947SAlex Hornung int error = ENOENT; 76439a08947SAlex Hornung 76539a08947SAlex Hornung ncp = ap->a_nch->ncp; 76639a08947SAlex Hornung 76739a08947SAlex Hornung if (!devfs_node_is_accessible(dnode)) 76839a08947SAlex Hornung return ENOENT; 76939a08947SAlex Hornung 77039a08947SAlex Hornung lockmgr(&devfs_lock, LK_EXCLUSIVE); 77139a08947SAlex Hornung 7728e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 77339a08947SAlex Hornung goto out; 77439a08947SAlex Hornung 77539a08947SAlex Hornung TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 77639a08947SAlex Hornung if (ncp->nc_nlen != node->d_dir.d_namlen) 77739a08947SAlex Hornung continue; 77839a08947SAlex Hornung if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 77939a08947SAlex Hornung continue; 78039a08947SAlex Hornung 78139a08947SAlex Hornung /* 78239a08947SAlex Hornung * only allow removal of user created dirs 78339a08947SAlex Hornung */ 78439a08947SAlex Hornung if ((node->flags & DEVFS_USER_CREATED) == 0) { 78539a08947SAlex Hornung error = EPERM; 78639a08947SAlex Hornung goto out; 7878e78a293SSascha Wildner } else if (node->node_type != Ndir) { 78839a08947SAlex Hornung error = ENOTDIR; 78939a08947SAlex Hornung goto out; 79039a08947SAlex Hornung } else if (node->nchildren > 2) { 79139a08947SAlex Hornung error = ENOTEMPTY; 79239a08947SAlex Hornung goto out; 79339a08947SAlex Hornung } else { 79439a08947SAlex Hornung if (node->v_node) 79539a08947SAlex Hornung cache_inval_vp(node->v_node, CINV_DESTROY); 79639a08947SAlex Hornung devfs_unlinkp(node); 79739a08947SAlex Hornung error = 0; 79839a08947SAlex Hornung break; 79939a08947SAlex Hornung } 80039a08947SAlex Hornung } 80139a08947SAlex Hornung 8025323ed62SMatthew Dillon cache_unlink(ap->a_nch); 80339a08947SAlex Hornung out: 80439a08947SAlex Hornung lockmgr(&devfs_lock, LK_RELEASE); 80539a08947SAlex Hornung return error; 80639a08947SAlex Hornung } 80721864bc5SMatthew Dillon 80821864bc5SMatthew Dillon static int 8099f889dc4SMatthew Dillon devfs_vop_nremove(struct vop_nremove_args *ap) 81021864bc5SMatthew Dillon { 811898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 81221864bc5SMatthew Dillon struct devfs_node *node; 81321864bc5SMatthew Dillon struct namecache *ncp; 81421864bc5SMatthew Dillon int error = ENOENT; 81521864bc5SMatthew Dillon 81621864bc5SMatthew Dillon ncp = ap->a_nch->ncp; 81721864bc5SMatthew Dillon 818898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 819ca8d7677SMatthew Dillon return ENOENT; 820ca8d7677SMatthew Dillon 82121864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 82221864bc5SMatthew Dillon 8238e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 82421864bc5SMatthew Dillon goto out; 82521864bc5SMatthew Dillon 826898c91eeSMatthew Dillon TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 827898c91eeSMatthew Dillon if (ncp->nc_nlen != node->d_dir.d_namlen) 828898c91eeSMatthew Dillon continue; 829898c91eeSMatthew Dillon if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 830898c91eeSMatthew Dillon continue; 831898c91eeSMatthew Dillon 832898c91eeSMatthew Dillon /* 833898c91eeSMatthew Dillon * only allow removal of user created stuff (e.g. symlinks) 834898c91eeSMatthew Dillon */ 83521864bc5SMatthew Dillon if ((node->flags & DEVFS_USER_CREATED) == 0) { 83621864bc5SMatthew Dillon error = EPERM; 83721864bc5SMatthew Dillon goto out; 8388e78a293SSascha Wildner } else if (node->node_type == Ndir) { 83939a08947SAlex Hornung error = EISDIR; 84039a08947SAlex Hornung goto out; 84121864bc5SMatthew Dillon } else { 84221864bc5SMatthew Dillon if (node->v_node) 84321864bc5SMatthew Dillon cache_inval_vp(node->v_node, CINV_DESTROY); 84421864bc5SMatthew Dillon devfs_unlinkp(node); 84521864bc5SMatthew Dillon error = 0; 84621864bc5SMatthew Dillon break; 84721864bc5SMatthew Dillon } 84821864bc5SMatthew Dillon } 84921864bc5SMatthew Dillon 8505323ed62SMatthew Dillon cache_unlink(ap->a_nch); 85121864bc5SMatthew Dillon out: 85221864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 85321864bc5SMatthew Dillon return error; 85421864bc5SMatthew Dillon } 85521864bc5SMatthew Dillon 85621864bc5SMatthew Dillon 85721864bc5SMatthew Dillon static int 85821864bc5SMatthew Dillon devfs_spec_open(struct vop_open_args *ap) 85921864bc5SMatthew Dillon { 86021864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 861ca8d7677SMatthew Dillon struct vnode *orig_vp = NULL; 862898c91eeSMatthew Dillon struct devfs_node *node = DEVFS_NODE(vp); 863898c91eeSMatthew Dillon struct devfs_node *newnode; 86421864bc5SMatthew Dillon cdev_t dev, ndev = NULL; 86521864bc5SMatthew Dillon int error = 0; 86621864bc5SMatthew Dillon 867898c91eeSMatthew Dillon if (node) { 868898c91eeSMatthew Dillon if (node->d_dev == NULL) 86921864bc5SMatthew Dillon return ENXIO; 870898c91eeSMatthew Dillon if (!devfs_node_is_accessible(node)) 871894bbb25SAlex Hornung return ENOENT; 87221864bc5SMatthew Dillon } 87321864bc5SMatthew Dillon 87421864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) 87521864bc5SMatthew Dillon return ENXIO; 87621864bc5SMatthew Dillon 877845bd036SMatthew Dillon /* 878845bd036SMatthew Dillon * Simple devices that don't care. Retain the shared lock. 879845bd036SMatthew Dillon */ 880845bd036SMatthew Dillon if (dev_dflags(dev) & D_QUICK) { 881845bd036SMatthew Dillon vn_unlock(vp); 882845bd036SMatthew Dillon error = dev_dopen(dev, ap->a_mode, S_IFCHR, 8833064590aSFrançois Tigeot ap->a_cred, ap->a_fp, vp); 884845bd036SMatthew Dillon vn_lock(vp, LK_SHARED | LK_RETRY); 885845bd036SMatthew Dillon vop_stdopen(ap); 886845bd036SMatthew Dillon goto skip; 887845bd036SMatthew Dillon } 88812cdc371SMatthew Dillon 889845bd036SMatthew Dillon /* 890845bd036SMatthew Dillon * Slow code 891845bd036SMatthew Dillon */ 892845bd036SMatthew Dillon vn_lock(vp, LK_UPGRADE | LK_RETRY); 893898c91eeSMatthew Dillon if (node && ap->a_fp) { 8942c94b9eeSMatthew Dillon int exists; 8952c94b9eeSMatthew Dillon 89621864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); 897948d6431SMatthew Dillon lockmgr(&devfs_lock, LK_SHARED); 89807dfa375SAlex Hornung 89912cdc371SMatthew Dillon ndev = devfs_clone(dev, node->d_dir.d_name, 90012cdc371SMatthew Dillon node->d_dir.d_namlen, 90107dfa375SAlex Hornung ap->a_mode, ap->a_cred); 90207dfa375SAlex Hornung if (ndev != NULL) { 903948d6431SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 904948d6431SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 905898c91eeSMatthew Dillon newnode = devfs_create_device_node( 906898c91eeSMatthew Dillon DEVFS_MNTDATA(vp->v_mount)->root_node, 9072c94b9eeSMatthew Dillon ndev, &exists, NULL, NULL); 90807dfa375SAlex Hornung /* XXX: possibly destroy device if this happens */ 90907dfa375SAlex Hornung 91007dfa375SAlex Hornung if (newnode != NULL) { 91107dfa375SAlex Hornung dev = ndev; 9122c94b9eeSMatthew Dillon if (exists == 0) 91307dfa375SAlex Hornung devfs_link_dev(dev); 91421864bc5SMatthew Dillon 915898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 916898c91eeSMatthew Dillon "parent here is: %s, node is: |%s|\n", 9178e78a293SSascha Wildner ((node->parent->node_type == Nroot) ? 918898c91eeSMatthew Dillon "ROOT!" : node->parent->d_dir.d_name), 919898c91eeSMatthew Dillon newnode->d_dir.d_name); 920898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 921898c91eeSMatthew Dillon "test: %s\n", 922898c91eeSMatthew Dillon ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); 92321864bc5SMatthew Dillon 924ca8d7677SMatthew Dillon /* 9252c94b9eeSMatthew Dillon * orig_vp is set to the original vp if we 9262c94b9eeSMatthew Dillon * cloned. 927ca8d7677SMatthew Dillon */ 928ca8d7677SMatthew Dillon /* node->flags |= DEVFS_CLONED; */ 929898c91eeSMatthew Dillon devfs_allocv(&vp, newnode); 930ca8d7677SMatthew Dillon orig_vp = ap->a_vp; 93121864bc5SMatthew Dillon ap->a_vp = vp; 93221864bc5SMatthew Dillon } 93307dfa375SAlex Hornung } 93421864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 935845bd036SMatthew Dillon 93618baefa4SAlex Hornung /* 9372c94b9eeSMatthew Dillon * Synchronize devfs here to make sure that, if the cloned 9382c94b9eeSMatthew Dillon * device creates other device nodes in addition to the 9392c94b9eeSMatthew Dillon * cloned one, all of them are created by the time we return 9402c94b9eeSMatthew Dillon * from opening the cloned one. 94118baefa4SAlex Hornung */ 94218baefa4SAlex Hornung if (ndev) 94318baefa4SAlex Hornung devfs_config(); 94421864bc5SMatthew Dillon } 94521864bc5SMatthew Dillon 946898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 947898c91eeSMatthew Dillon "devfs_spec_open() called on %s! \n", 948898c91eeSMatthew Dillon dev->si_name); 949898c91eeSMatthew Dillon 95021864bc5SMatthew Dillon /* 95121864bc5SMatthew Dillon * Make this field valid before any I/O in ->d_open 952845bd036SMatthew Dillon * 953845bd036SMatthew Dillon * NOTE: Shared vnode lock probably held, but its ok as long 954845bd036SMatthew Dillon * as assignments are consistent. 95521864bc5SMatthew Dillon */ 95621864bc5SMatthew Dillon if (!dev->si_iosize_max) 95795df18e4SFrançois Tigeot /* XXX: old DFLTPHYS == 64KB dependency */ 95895df18e4SFrançois Tigeot dev->si_iosize_max = min(MAXPHYS,64*1024); 95921864bc5SMatthew Dillon 96021864bc5SMatthew Dillon if (dev_dflags(dev) & D_TTY) 9612247fe02SMatthew Dillon vsetflags(vp, VISTTY); 96221864bc5SMatthew Dillon 9639f889dc4SMatthew Dillon /* 964845bd036SMatthew Dillon * Open the underlying device 9659f889dc4SMatthew Dillon */ 96621864bc5SMatthew Dillon vn_unlock(vp); 9673064590aSFrançois Tigeot error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred, ap->a_fp, vp); 96821864bc5SMatthew Dillon vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 96921864bc5SMatthew Dillon 970ca8d7677SMatthew Dillon /* 971ca8d7677SMatthew Dillon * Clean up any cloned vp if we error out. 972ca8d7677SMatthew Dillon */ 97321864bc5SMatthew Dillon if (error) { 974ca8d7677SMatthew Dillon if (orig_vp) { 97521864bc5SMatthew Dillon vput(vp); 976ca8d7677SMatthew Dillon ap->a_vp = orig_vp; 977ca8d7677SMatthew Dillon /* orig_vp = NULL; */ 978ca8d7677SMatthew Dillon } 97921864bc5SMatthew Dillon return error; 98021864bc5SMatthew Dillon } 98121864bc5SMatthew Dillon 982d894b0ebSAntonio Huete /* 983ddd7de82SAntonio Huete * This checks if the disk device is going to be opened for writing. 984ddd7de82SAntonio Huete * It will be only allowed in the cases where securelevel permits it 985ddd7de82SAntonio Huete * and it's not mounted R/W. 986d894b0ebSAntonio Huete */ 987ddd7de82SAntonio Huete if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && 988d894b0ebSAntonio Huete (ap->a_cred != FSCRED)) { 989ddd7de82SAntonio Huete 990ddd7de82SAntonio Huete /* Very secure mode. No open for writing allowed */ 991d894b0ebSAntonio Huete if (securelevel >= 2) 992d894b0ebSAntonio Huete return EPERM; 993ddd7de82SAntonio Huete 994ddd7de82SAntonio Huete /* 995ddd7de82SAntonio Huete * If it is mounted R/W, do not allow to open for writing. 996ddd7de82SAntonio Huete * In the case it's mounted read-only but securelevel 997ddd7de82SAntonio Huete * is >= 1, then do not allow opening for writing either. 998ddd7de82SAntonio Huete */ 999ddd7de82SAntonio Huete if (vfs_mountedon(vp)) { 1000ddd7de82SAntonio Huete if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) 1001ddd7de82SAntonio Huete return EBUSY; 1002ddd7de82SAntonio Huete else if (securelevel >= 1) 1003ddd7de82SAntonio Huete return EPERM; 1004d894b0ebSAntonio Huete } 1005d894b0ebSAntonio Huete } 100621864bc5SMatthew Dillon 1007845bd036SMatthew Dillon /* 1008845bd036SMatthew Dillon * NOTE: vnode is still locked shared. t_stop assignment should 1009845bd036SMatthew Dillon * remain consistent so we should be ok. 1010845bd036SMatthew Dillon */ 101121864bc5SMatthew Dillon if (dev_dflags(dev) & D_TTY) { 101221864bc5SMatthew Dillon if (dev->si_tty) { 101321864bc5SMatthew Dillon struct tty *tp; 101421864bc5SMatthew Dillon tp = dev->si_tty; 101521864bc5SMatthew Dillon if (!tp->t_stop) { 1016898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1017898c91eeSMatthew Dillon "devfs: no t_stop\n"); 101821864bc5SMatthew Dillon tp->t_stop = nottystop; 101921864bc5SMatthew Dillon } 102021864bc5SMatthew Dillon } 102121864bc5SMatthew Dillon } 102221864bc5SMatthew Dillon 1023845bd036SMatthew Dillon /* 1024845bd036SMatthew Dillon * NOTE: vnode is still locked shared. assignments should 1025845bd036SMatthew Dillon * remain consistent so we should be ok. However, 1026845bd036SMatthew Dillon * upgrade to exclusive if we need a VM object. 1027845bd036SMatthew Dillon */ 102821864bc5SMatthew Dillon if (vn_isdisk(vp, NULL)) { 102921864bc5SMatthew Dillon if (!dev->si_bsize_phys) 103021864bc5SMatthew Dillon dev->si_bsize_phys = DEV_BSIZE; 1031b0d18f7dSMatthew Dillon vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); 103221864bc5SMatthew Dillon } 103321864bc5SMatthew Dillon 103421864bc5SMatthew Dillon vop_stdopen(ap); 103507dfa375SAlex Hornung #if 0 1036898c91eeSMatthew Dillon if (node) 1037d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 103807dfa375SAlex Hornung #endif 1039b80b4c32SMatthew Dillon /* 1040b80b4c32SMatthew Dillon * If we replaced the vp the vop_stdopen() call will have loaded 1041b80b4c32SMatthew Dillon * it into fp->f_data and vref()d the vp, giving us two refs. So 1042b80b4c32SMatthew Dillon * instead of just unlocking it here we have to vput() it. 1043b80b4c32SMatthew Dillon */ 1044ca8d7677SMatthew Dillon if (orig_vp) 1045b80b4c32SMatthew Dillon vput(vp); 104621864bc5SMatthew Dillon 104721864bc5SMatthew Dillon /* Ugly pty magic, to make pty devices appear once they are opened */ 1048845bd036SMatthew Dillon if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) { 1049845bd036SMatthew Dillon if (node->flags & DEVFS_INVISIBLE) 1050898c91eeSMatthew Dillon node->flags &= ~DEVFS_INVISIBLE; 1051845bd036SMatthew Dillon } 105221864bc5SMatthew Dillon 1053845bd036SMatthew Dillon skip: 105421864bc5SMatthew Dillon if (ap->a_fp) { 1055b80b4c32SMatthew Dillon KKASSERT(ap->a_fp->f_type == DTYPE_VNODE); 1056fb12e9a5SMatthew Dillon KKASSERT((ap->a_fp->f_flag & FMASK) == (ap->a_mode & FMASK)); 105721864bc5SMatthew Dillon ap->a_fp->f_ops = &devfs_dev_fileops; 1058b80b4c32SMatthew Dillon KKASSERT(ap->a_fp->f_data == (void *)vp); 105921864bc5SMatthew Dillon } 106021864bc5SMatthew Dillon 106121864bc5SMatthew Dillon return 0; 106221864bc5SMatthew Dillon } 106321864bc5SMatthew Dillon 106421864bc5SMatthew Dillon static int 106521864bc5SMatthew Dillon devfs_spec_close(struct vop_close_args *ap) 106621864bc5SMatthew Dillon { 106792fb0c6aSMatthew Dillon struct devfs_node *node; 106821864bc5SMatthew Dillon struct proc *p = curproc; 106921864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 107021864bc5SMatthew Dillon cdev_t dev = vp->v_rdev; 107121864bc5SMatthew Dillon int error = 0; 107221864bc5SMatthew Dillon int needrelock; 107397248700SMatthew Dillon int opencount; 107421864bc5SMatthew Dillon 107512cdc371SMatthew Dillon /* 1076845bd036SMatthew Dillon * Devices flagged D_QUICK require no special handling. 1077845bd036SMatthew Dillon */ 1078845bd036SMatthew Dillon if (dev && dev_dflags(dev) & D_QUICK) { 1079845bd036SMatthew Dillon opencount = vp->v_opencount; 1080845bd036SMatthew Dillon if (opencount <= 1) 1081845bd036SMatthew Dillon opencount = count_dev(dev); /* XXX NOT SMP SAFE */ 1082845bd036SMatthew Dillon if (((vp->v_flag & VRECLAIMED) || 1083845bd036SMatthew Dillon (dev_dflags(dev) & D_TRACKCLOSE) || 1084845bd036SMatthew Dillon (opencount == 1))) { 1085845bd036SMatthew Dillon vn_unlock(vp); 1086845bd036SMatthew Dillon error = dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_fp); 1087845bd036SMatthew Dillon vn_lock(vp, LK_SHARED | LK_RETRY); 1088845bd036SMatthew Dillon } 1089845bd036SMatthew Dillon goto skip; 1090845bd036SMatthew Dillon } 1091845bd036SMatthew Dillon 1092845bd036SMatthew Dillon /* 109312cdc371SMatthew Dillon * We do special tests on the opencount so unfortunately we need 109412cdc371SMatthew Dillon * an exclusive lock. 109512cdc371SMatthew Dillon */ 109612cdc371SMatthew Dillon vn_lock(vp, LK_UPGRADE | LK_RETRY); 109712cdc371SMatthew Dillon 109852f98df9SSamuel J. Greear if (dev) 1099898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1100898c91eeSMatthew Dillon "devfs_spec_close() called on %s! \n", 1101898c91eeSMatthew Dillon dev->si_name); 110252f98df9SSamuel J. Greear else 110352f98df9SSamuel J. Greear devfs_debug(DEVFS_DEBUG_DEBUG, 110452f98df9SSamuel J. Greear "devfs_spec_close() called, null vode!\n"); 110521864bc5SMatthew Dillon 110621864bc5SMatthew Dillon /* 110721864bc5SMatthew Dillon * A couple of hacks for devices and tty devices. The 110821864bc5SMatthew Dillon * vnode ref count cannot be used to figure out the 110921864bc5SMatthew Dillon * last close, but we can use v_opencount now that 111021864bc5SMatthew Dillon * revoke works properly. 111121864bc5SMatthew Dillon * 111221864bc5SMatthew Dillon * Detect the last close on a controlling terminal and clear 111321864bc5SMatthew Dillon * the session (half-close). 111497248700SMatthew Dillon * 111597248700SMatthew Dillon * XXX opencount is not SMP safe. The vnode is locked but there 111697248700SMatthew Dillon * may be multiple vnodes referencing the same device. 111721864bc5SMatthew Dillon */ 111897248700SMatthew Dillon if (dev) { 111997248700SMatthew Dillon /* 112097248700SMatthew Dillon * NOTE: Try to avoid global tokens when testing opencount 112197248700SMatthew Dillon * XXX hack, fixme. needs a struct lock and opencount in 112297248700SMatthew Dillon * struct cdev itself. 112397248700SMatthew Dillon */ 112421864bc5SMatthew Dillon reference_dev(dev); 112597248700SMatthew Dillon opencount = vp->v_opencount; 112697248700SMatthew Dillon if (opencount <= 1) 112797248700SMatthew Dillon opencount = count_dev(dev); /* XXX NOT SMP SAFE */ 112897248700SMatthew Dillon } else { 112997248700SMatthew Dillon opencount = 0; 113097248700SMatthew Dillon } 113121864bc5SMatthew Dillon 113221864bc5SMatthew Dillon if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) { 113321864bc5SMatthew Dillon p->p_session->s_ttyvp = NULL; 113421864bc5SMatthew Dillon vrele(vp); 113521864bc5SMatthew Dillon } 113621864bc5SMatthew Dillon 113721864bc5SMatthew Dillon /* 113821864bc5SMatthew Dillon * Vnodes can be opened and closed multiple times. Do not really 113921864bc5SMatthew Dillon * close the device unless (1) it is being closed forcibly, 114021864bc5SMatthew Dillon * (2) the device wants to track closes, or (3) this is the last 114121864bc5SMatthew Dillon * vnode doing its last close on the device. 114221864bc5SMatthew Dillon * 114321864bc5SMatthew Dillon * XXX the VXLOCK (force close) case can leave vnodes referencing 114421864bc5SMatthew Dillon * a closed device. This might not occur now that our revoke is 114521864bc5SMatthew Dillon * fixed. 114621864bc5SMatthew Dillon */ 114721864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n"); 114821864bc5SMatthew Dillon if (dev && ((vp->v_flag & VRECLAIMED) || 114921864bc5SMatthew Dillon (dev_dflags(dev) & D_TRACKCLOSE) || 115097248700SMatthew Dillon (opencount == 1))) { 1151898c91eeSMatthew Dillon /* 115292fb0c6aSMatthew Dillon * Ugly pty magic, to make pty devices disappear again once 115392fb0c6aSMatthew Dillon * they are closed. 115492fb0c6aSMatthew Dillon */ 115592fb0c6aSMatthew Dillon node = DEVFS_NODE(ap->a_vp); 115692fb0c6aSMatthew Dillon if (node && (node->flags & DEVFS_PTY)) 115792fb0c6aSMatthew Dillon node->flags |= DEVFS_INVISIBLE; 115892fb0c6aSMatthew Dillon 115992fb0c6aSMatthew Dillon /* 1160495d3a1eSMatthew Dillon * Unlock around dev_dclose(), unless the vnode is 1161495d3a1eSMatthew Dillon * undergoing a vgone/reclaim (during umount). 1162898c91eeSMatthew Dillon */ 116321864bc5SMatthew Dillon needrelock = 0; 1164495d3a1eSMatthew Dillon if ((vp->v_flag & VRECLAIMED) == 0 && vn_islocked(vp)) { 116521864bc5SMatthew Dillon needrelock = 1; 116621864bc5SMatthew Dillon vn_unlock(vp); 116721864bc5SMatthew Dillon } 1168898c91eeSMatthew Dillon 1169898c91eeSMatthew Dillon /* 117092fb0c6aSMatthew Dillon * WARNING! If the device destroys itself the devfs node 117192fb0c6aSMatthew Dillon * can disappear here. 1172495d3a1eSMatthew Dillon * 1173495d3a1eSMatthew Dillon * WARNING! vn_lock() will fail if the vp is in a VRECLAIM, 1174495d3a1eSMatthew Dillon * which can occur during umount. 1175898c91eeSMatthew Dillon */ 1176ce486e08SMarkus Pfeiffer error = dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_fp); 117792fb0c6aSMatthew Dillon /* node is now stale */ 117821864bc5SMatthew Dillon 1179495d3a1eSMatthew Dillon if (needrelock) { 1180b458d1abSMatthew Dillon if (vn_lock(vp, LK_EXCLUSIVE | 1181b458d1abSMatthew Dillon LK_RETRY | 1182b458d1abSMatthew Dillon LK_FAILRECLAIM) != 0) { 1183495d3a1eSMatthew Dillon panic("devfs_spec_close: vnode %p " 1184495d3a1eSMatthew Dillon "unexpectedly could not be relocked", 1185495d3a1eSMatthew Dillon vp); 1186495d3a1eSMatthew Dillon } 1187495d3a1eSMatthew Dillon } 118821864bc5SMatthew Dillon } else { 118921864bc5SMatthew Dillon error = 0; 119021864bc5SMatthew Dillon } 119121864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n"); 1192898c91eeSMatthew Dillon 119321864bc5SMatthew Dillon /* 119421864bc5SMatthew Dillon * Track the actual opens and closes on the vnode. The last close 1195898c91eeSMatthew Dillon * disassociates the rdev. If the rdev is already disassociated or 1196898c91eeSMatthew Dillon * the opencount is already 0, the vnode might have been revoked 1197898c91eeSMatthew Dillon * and no further opencount tracking occurs. 119821864bc5SMatthew Dillon */ 1199898c91eeSMatthew Dillon if (dev) 120021864bc5SMatthew Dillon release_dev(dev); 1201845bd036SMatthew Dillon skip: 1202898c91eeSMatthew Dillon if (vp->v_opencount > 0) 120321864bc5SMatthew Dillon vop_stdclose(ap); 120421864bc5SMatthew Dillon return(error); 120521864bc5SMatthew Dillon 120621864bc5SMatthew Dillon } 120721864bc5SMatthew Dillon 120821864bc5SMatthew Dillon 120921864bc5SMatthew Dillon static int 12109f889dc4SMatthew Dillon devfs_fo_close(struct file *fp) 121121864bc5SMatthew Dillon { 121221864bc5SMatthew Dillon struct vnode *vp = (struct vnode *)fp->f_data; 1213898c91eeSMatthew Dillon int error; 121421864bc5SMatthew Dillon 121521864bc5SMatthew Dillon fp->f_ops = &badfileops; 12163596743eSMarkus Pfeiffer error = vn_close(vp, fp->f_flag, fp); 1217ce486e08SMarkus Pfeiffer devfs_clear_cdevpriv(fp); 121821864bc5SMatthew Dillon 121921864bc5SMatthew Dillon return (error); 122021864bc5SMatthew Dillon } 122121864bc5SMatthew Dillon 122221864bc5SMatthew Dillon 122321864bc5SMatthew Dillon /* 122421864bc5SMatthew Dillon * Device-optimized file table vnode read routine. 122521864bc5SMatthew Dillon * 122621864bc5SMatthew Dillon * This bypasses the VOP table and talks directly to the device. Most 122721864bc5SMatthew Dillon * filesystems just route to specfs and can make this optimization. 122821864bc5SMatthew Dillon */ 122921864bc5SMatthew Dillon static int 12309f889dc4SMatthew Dillon devfs_fo_read(struct file *fp, struct uio *uio, 1231898c91eeSMatthew Dillon struct ucred *cred, int flags) 123221864bc5SMatthew Dillon { 1233898c91eeSMatthew Dillon struct devfs_node *node; 123421864bc5SMatthew Dillon struct vnode *vp; 123521864bc5SMatthew Dillon int ioflag; 123621864bc5SMatthew Dillon int error; 123721864bc5SMatthew Dillon cdev_t dev; 123821864bc5SMatthew Dillon 123921864bc5SMatthew Dillon KASSERT(uio->uio_td == curthread, 124021864bc5SMatthew Dillon ("uio_td %p is not td %p", uio->uio_td, curthread)); 124121864bc5SMatthew Dillon 12423a1032a6SAlex Hornung if (uio->uio_resid == 0) 12433a1032a6SAlex Hornung return 0; 12443a1032a6SAlex Hornung 124521864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 12463a1032a6SAlex Hornung if (vp == NULL || vp->v_type == VBAD) 12473a1032a6SAlex Hornung return EBADF; 12483a1032a6SAlex Hornung 1249898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 125021864bc5SMatthew Dillon 12513a1032a6SAlex Hornung if ((dev = vp->v_rdev) == NULL) 12523a1032a6SAlex Hornung return EBADF; 12533a1032a6SAlex Hornung 125421864bc5SMatthew Dillon reference_dev(dev); 125521864bc5SMatthew Dillon 125621864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 125721864bc5SMatthew Dillon uio->uio_offset = fp->f_offset; 125821864bc5SMatthew Dillon 125921864bc5SMatthew Dillon ioflag = 0; 126021864bc5SMatthew Dillon if (flags & O_FBLOCKING) { 126121864bc5SMatthew Dillon /* ioflag &= ~IO_NDELAY; */ 126221864bc5SMatthew Dillon } else if (flags & O_FNONBLOCKING) { 126321864bc5SMatthew Dillon ioflag |= IO_NDELAY; 126421864bc5SMatthew Dillon } else if (fp->f_flag & FNONBLOCK) { 126521864bc5SMatthew Dillon ioflag |= IO_NDELAY; 126621864bc5SMatthew Dillon } 1267c72df65dSMatthew Dillon if (fp->f_flag & O_DIRECT) { 126821864bc5SMatthew Dillon ioflag |= IO_DIRECT; 126921864bc5SMatthew Dillon } 127021864bc5SMatthew Dillon ioflag |= sequential_heuristic(uio, fp); 127121864bc5SMatthew Dillon 12728c530b23SJohannes Hofmann error = dev_dread(dev, uio, ioflag, fp); 127321864bc5SMatthew Dillon 127421864bc5SMatthew Dillon release_dev(dev); 1275898c91eeSMatthew Dillon if (node) 1276d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 127721864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 127821864bc5SMatthew Dillon fp->f_offset = uio->uio_offset; 127921864bc5SMatthew Dillon fp->f_nextoff = uio->uio_offset; 12803a1032a6SAlex Hornung 128121864bc5SMatthew Dillon return (error); 128221864bc5SMatthew Dillon } 128321864bc5SMatthew Dillon 128421864bc5SMatthew Dillon 128521864bc5SMatthew Dillon static int 12869f889dc4SMatthew Dillon devfs_fo_write(struct file *fp, struct uio *uio, 1287898c91eeSMatthew Dillon struct ucred *cred, int flags) 128821864bc5SMatthew Dillon { 1289898c91eeSMatthew Dillon struct devfs_node *node; 129021864bc5SMatthew Dillon struct vnode *vp; 129121864bc5SMatthew Dillon int ioflag; 129221864bc5SMatthew Dillon int error; 129321864bc5SMatthew Dillon cdev_t dev; 129421864bc5SMatthew Dillon 129521864bc5SMatthew Dillon KASSERT(uio->uio_td == curthread, 129621864bc5SMatthew Dillon ("uio_td %p is not p %p", uio->uio_td, curthread)); 129721864bc5SMatthew Dillon 129821864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 12993a1032a6SAlex Hornung if (vp == NULL || vp->v_type == VBAD) 13003a1032a6SAlex Hornung return EBADF; 13013a1032a6SAlex Hornung 1302898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 13033a1032a6SAlex Hornung 130421864bc5SMatthew Dillon if (vp->v_type == VREG) 130521864bc5SMatthew Dillon bwillwrite(uio->uio_resid); 13063a1032a6SAlex Hornung 130721864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 130821864bc5SMatthew Dillon 13093a1032a6SAlex Hornung if ((dev = vp->v_rdev) == NULL) 13103a1032a6SAlex Hornung return EBADF; 13113a1032a6SAlex Hornung 131221864bc5SMatthew Dillon reference_dev(dev); 131321864bc5SMatthew Dillon 131421864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 131521864bc5SMatthew Dillon uio->uio_offset = fp->f_offset; 131621864bc5SMatthew Dillon 131721864bc5SMatthew Dillon ioflag = IO_UNIT; 131821864bc5SMatthew Dillon if (vp->v_type == VREG && 131921864bc5SMatthew Dillon ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 132021864bc5SMatthew Dillon ioflag |= IO_APPEND; 132121864bc5SMatthew Dillon } 132221864bc5SMatthew Dillon 132321864bc5SMatthew Dillon if (flags & O_FBLOCKING) { 132421864bc5SMatthew Dillon /* ioflag &= ~IO_NDELAY; */ 132521864bc5SMatthew Dillon } else if (flags & O_FNONBLOCKING) { 132621864bc5SMatthew Dillon ioflag |= IO_NDELAY; 132721864bc5SMatthew Dillon } else if (fp->f_flag & FNONBLOCK) { 132821864bc5SMatthew Dillon ioflag |= IO_NDELAY; 132921864bc5SMatthew Dillon } 1330c72df65dSMatthew Dillon if (fp->f_flag & O_DIRECT) { 133121864bc5SMatthew Dillon ioflag |= IO_DIRECT; 133221864bc5SMatthew Dillon } 133321864bc5SMatthew Dillon if (flags & O_FASYNCWRITE) { 133421864bc5SMatthew Dillon /* ioflag &= ~IO_SYNC; */ 133521864bc5SMatthew Dillon } else if (flags & O_FSYNCWRITE) { 133621864bc5SMatthew Dillon ioflag |= IO_SYNC; 133721864bc5SMatthew Dillon } else if (fp->f_flag & O_FSYNC) { 133821864bc5SMatthew Dillon ioflag |= IO_SYNC; 133921864bc5SMatthew Dillon } 134021864bc5SMatthew Dillon 134121864bc5SMatthew Dillon if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 134221864bc5SMatthew Dillon ioflag |= IO_SYNC; 134321864bc5SMatthew Dillon ioflag |= sequential_heuristic(uio, fp); 134421864bc5SMatthew Dillon 13458c530b23SJohannes Hofmann error = dev_dwrite(dev, uio, ioflag, fp); 134621864bc5SMatthew Dillon 134721864bc5SMatthew Dillon release_dev(dev); 134807dfa375SAlex Hornung if (node) { 1349d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 1350d489a79aSMatthew Dillon vfs_timestamp(&node->mtime); 135107dfa375SAlex Hornung } 135221864bc5SMatthew Dillon 135321864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 135421864bc5SMatthew Dillon fp->f_offset = uio->uio_offset; 135521864bc5SMatthew Dillon fp->f_nextoff = uio->uio_offset; 13563a1032a6SAlex Hornung 135721864bc5SMatthew Dillon return (error); 135821864bc5SMatthew Dillon } 135921864bc5SMatthew Dillon 136021864bc5SMatthew Dillon 136121864bc5SMatthew Dillon static int 13629f889dc4SMatthew Dillon devfs_fo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 136321864bc5SMatthew Dillon { 136421864bc5SMatthew Dillon struct vnode *vp; 136521864bc5SMatthew Dillon struct vattr vattr; 136621864bc5SMatthew Dillon struct vattr *vap; 136721864bc5SMatthew Dillon u_short mode; 136821864bc5SMatthew Dillon cdev_t dev; 13693a1032a6SAlex Hornung int error; 13703a1032a6SAlex Hornung 13713a1032a6SAlex Hornung vp = (struct vnode *)fp->f_data; 13723a1032a6SAlex Hornung if (vp == NULL || vp->v_type == VBAD) 13733a1032a6SAlex Hornung return EBADF; 13743a1032a6SAlex Hornung 13753a1032a6SAlex Hornung error = vn_stat(vp, sb, cred); 13763a1032a6SAlex Hornung if (error) 13773a1032a6SAlex Hornung return (error); 137821864bc5SMatthew Dillon 137921864bc5SMatthew Dillon vap = &vattr; 138021864bc5SMatthew Dillon error = VOP_GETATTR(vp, vap); 13813a1032a6SAlex Hornung if (error) 138221864bc5SMatthew Dillon return (error); 138321864bc5SMatthew Dillon 138421864bc5SMatthew Dillon /* 138521864bc5SMatthew Dillon * Zero the spare stat fields 138621864bc5SMatthew Dillon */ 138721864bc5SMatthew Dillon sb->st_lspare = 0; 1388d98152a8SMatthew Dillon sb->st_qspare2 = 0; 138921864bc5SMatthew Dillon 139021864bc5SMatthew Dillon /* 139121864bc5SMatthew Dillon * Copy from vattr table ... or not in case it's a cloned device 139221864bc5SMatthew Dillon */ 139321864bc5SMatthew Dillon if (vap->va_fsid != VNOVAL) 139421864bc5SMatthew Dillon sb->st_dev = vap->va_fsid; 139521864bc5SMatthew Dillon else 139621864bc5SMatthew Dillon sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 139721864bc5SMatthew Dillon 139821864bc5SMatthew Dillon sb->st_ino = vap->va_fileid; 139921864bc5SMatthew Dillon 140021864bc5SMatthew Dillon mode = vap->va_mode; 140121864bc5SMatthew Dillon mode |= S_IFCHR; 140221864bc5SMatthew Dillon sb->st_mode = mode; 140321864bc5SMatthew Dillon 140421864bc5SMatthew Dillon if (vap->va_nlink > (nlink_t)-1) 140521864bc5SMatthew Dillon sb->st_nlink = (nlink_t)-1; 140621864bc5SMatthew Dillon else 140721864bc5SMatthew Dillon sb->st_nlink = vap->va_nlink; 14083a1032a6SAlex Hornung 140921864bc5SMatthew Dillon sb->st_uid = vap->va_uid; 141021864bc5SMatthew Dillon sb->st_gid = vap->va_gid; 14112ac7d105SSascha Wildner sb->st_rdev = devid_from_dev(DEVFS_NODE(vp)->d_dev); 14122d076755SAlex Hornung sb->st_size = vap->va_bytes; 141321864bc5SMatthew Dillon sb->st_atimespec = vap->va_atime; 141421864bc5SMatthew Dillon sb->st_mtimespec = vap->va_mtime; 141521864bc5SMatthew Dillon sb->st_ctimespec = vap->va_ctime; 141621864bc5SMatthew Dillon 141721864bc5SMatthew Dillon /* 141821864bc5SMatthew Dillon * A VCHR and VBLK device may track the last access and last modified 141921864bc5SMatthew Dillon * time independantly of the filesystem. This is particularly true 142021864bc5SMatthew Dillon * because device read and write calls may bypass the filesystem. 142121864bc5SMatthew Dillon */ 142221864bc5SMatthew Dillon if (vp->v_type == VCHR || vp->v_type == VBLK) { 142321864bc5SMatthew Dillon dev = vp->v_rdev; 142421864bc5SMatthew Dillon if (dev != NULL) { 142521864bc5SMatthew Dillon if (dev->si_lastread) { 1426cec73927SMatthew Dillon sb->st_atimespec.tv_sec = time_second + 1427cec73927SMatthew Dillon (time_uptime - 1428cec73927SMatthew Dillon dev->si_lastread); 142921864bc5SMatthew Dillon sb->st_atimespec.tv_nsec = 0; 143021864bc5SMatthew Dillon } 143121864bc5SMatthew Dillon if (dev->si_lastwrite) { 1432cec73927SMatthew Dillon sb->st_atimespec.tv_sec = time_second + 1433cec73927SMatthew Dillon (time_uptime - 1434cec73927SMatthew Dillon dev->si_lastwrite); 143521864bc5SMatthew Dillon sb->st_atimespec.tv_nsec = 0; 143621864bc5SMatthew Dillon } 143721864bc5SMatthew Dillon } 143821864bc5SMatthew Dillon } 143921864bc5SMatthew Dillon 144021864bc5SMatthew Dillon /* 144121864bc5SMatthew Dillon * According to www.opengroup.org, the meaning of st_blksize is 144221864bc5SMatthew Dillon * "a filesystem-specific preferred I/O block size for this 144321864bc5SMatthew Dillon * object. In some filesystem types, this may vary from file 144421864bc5SMatthew Dillon * to file" 144521864bc5SMatthew Dillon * Default to PAGE_SIZE after much discussion. 144621864bc5SMatthew Dillon */ 144721864bc5SMatthew Dillon 144821864bc5SMatthew Dillon sb->st_blksize = PAGE_SIZE; 144921864bc5SMatthew Dillon 145021864bc5SMatthew Dillon sb->st_flags = vap->va_flags; 145121864bc5SMatthew Dillon 145221864bc5SMatthew Dillon error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 145321864bc5SMatthew Dillon if (error) 145421864bc5SMatthew Dillon sb->st_gen = 0; 145521864bc5SMatthew Dillon else 145621864bc5SMatthew Dillon sb->st_gen = (u_int32_t)vap->va_gen; 145721864bc5SMatthew Dillon 145821864bc5SMatthew Dillon sb->st_blocks = vap->va_bytes / S_BLKSIZE; 145921864bc5SMatthew Dillon 146034c6728eSMatthew Dillon /* 146134c6728eSMatthew Dillon * This is for ABI compatibility <= 5.7 (for ABI change made in 146234c6728eSMatthew Dillon * 5.7 master). 146334c6728eSMatthew Dillon */ 146434c6728eSMatthew Dillon sb->__old_st_blksize = sb->st_blksize; 146534c6728eSMatthew Dillon 146621864bc5SMatthew Dillon return (0); 146721864bc5SMatthew Dillon } 146821864bc5SMatthew Dillon 146921864bc5SMatthew Dillon 147021864bc5SMatthew Dillon static int 14719f889dc4SMatthew Dillon devfs_fo_kqfilter(struct file *fp, struct knote *kn) 147221864bc5SMatthew Dillon { 147321864bc5SMatthew Dillon struct vnode *vp; 147421864bc5SMatthew Dillon int error; 147521864bc5SMatthew Dillon cdev_t dev; 147621864bc5SMatthew Dillon 147721864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 147821864bc5SMatthew Dillon if (vp == NULL || vp->v_type == VBAD) { 147921864bc5SMatthew Dillon error = EBADF; 148021864bc5SMatthew Dillon goto done; 148121864bc5SMatthew Dillon } 148221864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) { 148321864bc5SMatthew Dillon error = EBADF; 148421864bc5SMatthew Dillon goto done; 148521864bc5SMatthew Dillon } 148621864bc5SMatthew Dillon reference_dev(dev); 148721864bc5SMatthew Dillon 14888c530b23SJohannes Hofmann error = dev_dkqfilter(dev, kn, fp); 148921864bc5SMatthew Dillon 149021864bc5SMatthew Dillon release_dev(dev); 149121864bc5SMatthew Dillon 149221864bc5SMatthew Dillon done: 1493b287d649SMatthew Dillon return (error); 149421864bc5SMatthew Dillon } 149521864bc5SMatthew Dillon 149621864bc5SMatthew Dillon static int 14979f889dc4SMatthew Dillon devfs_fo_ioctl(struct file *fp, u_long com, caddr_t data, 149887baaf0cSMatthew Dillon struct ucred *ucred, struct sysmsg *msg) 149921864bc5SMatthew Dillon { 15001d0de3d3SSascha Wildner #if 0 1501898c91eeSMatthew Dillon struct devfs_node *node; 15021d0de3d3SSascha Wildner #endif 1503898c91eeSMatthew Dillon struct vnode *vp; 150421864bc5SMatthew Dillon struct vnode *ovp; 150521864bc5SMatthew Dillon cdev_t dev; 150621864bc5SMatthew Dillon int error; 150721864bc5SMatthew Dillon struct fiodname_args *name_args; 150821864bc5SMatthew Dillon size_t namlen; 150921864bc5SMatthew Dillon const char *name; 151021864bc5SMatthew Dillon 1511898c91eeSMatthew Dillon vp = ((struct vnode *)fp->f_data); 15123a1032a6SAlex Hornung 15133a1032a6SAlex Hornung if ((dev = vp->v_rdev) == NULL) 15143a1032a6SAlex Hornung return EBADF; /* device was revoked */ 15153a1032a6SAlex Hornung 15163a1032a6SAlex Hornung reference_dev(dev); 151721864bc5SMatthew Dillon 15181d0de3d3SSascha Wildner #if 0 1519898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 15201d0de3d3SSascha Wildner #endif 1521898c91eeSMatthew Dillon 1522898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 15239f889dc4SMatthew Dillon "devfs_fo_ioctl() called! for dev %s\n", 1524898c91eeSMatthew Dillon dev->si_name); 152521864bc5SMatthew Dillon 152621864bc5SMatthew Dillon if (com == FIODTYPE) { 152721864bc5SMatthew Dillon *(int *)data = dev_dflags(dev) & D_TYPEMASK; 152821864bc5SMatthew Dillon error = 0; 152921864bc5SMatthew Dillon goto out; 153021864bc5SMatthew Dillon } else if (com == FIODNAME) { 153121864bc5SMatthew Dillon name_args = (struct fiodname_args *)data; 153221864bc5SMatthew Dillon name = dev->si_name; 153321864bc5SMatthew Dillon namlen = strlen(name) + 1; 153421864bc5SMatthew Dillon 1535898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1536898c91eeSMatthew Dillon "ioctl, got: FIODNAME for %s\n", name); 153721864bc5SMatthew Dillon 153821864bc5SMatthew Dillon if (namlen <= name_args->len) 153921864bc5SMatthew Dillon error = copyout(dev->si_name, name_args->name, namlen); 154021864bc5SMatthew Dillon else 154121864bc5SMatthew Dillon error = EINVAL; 154221864bc5SMatthew Dillon 1543898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1544898c91eeSMatthew Dillon "ioctl stuff: error: %d\n", error); 154521864bc5SMatthew Dillon goto out; 154621864bc5SMatthew Dillon } 15473a1032a6SAlex Hornung 15488c530b23SJohannes Hofmann error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg, fp); 15493a1032a6SAlex Hornung 155007dfa375SAlex Hornung #if 0 1551898c91eeSMatthew Dillon if (node) { 1552d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 1553d489a79aSMatthew Dillon vfs_timestamp(&node->mtime); 155421864bc5SMatthew Dillon } 155507dfa375SAlex Hornung #endif 1556898c91eeSMatthew Dillon if (com == TIOCSCTTY) { 1557898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 15589f889dc4SMatthew Dillon "devfs_fo_ioctl: got TIOCSCTTY on %s\n", 1559898c91eeSMatthew Dillon dev->si_name); 1560898c91eeSMatthew Dillon } 156121864bc5SMatthew Dillon if (error == 0 && com == TIOCSCTTY) { 156221864bc5SMatthew Dillon struct proc *p = curthread->td_proc; 156321864bc5SMatthew Dillon struct session *sess; 1564898c91eeSMatthew Dillon 1565898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 15669f889dc4SMatthew Dillon "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n", 1567898c91eeSMatthew Dillon dev->si_name); 156821864bc5SMatthew Dillon if (p == NULL) { 156921864bc5SMatthew Dillon error = ENOTTY; 157021864bc5SMatthew Dillon goto out; 157121864bc5SMatthew Dillon } 157221864bc5SMatthew Dillon sess = p->p_session; 1573898c91eeSMatthew Dillon 1574898c91eeSMatthew Dillon /* 1575898c91eeSMatthew Dillon * Do nothing if reassigning same control tty 1576898c91eeSMatthew Dillon */ 157721864bc5SMatthew Dillon if (sess->s_ttyvp == vp) { 157821864bc5SMatthew Dillon error = 0; 157921864bc5SMatthew Dillon goto out; 158021864bc5SMatthew Dillon } 1581898c91eeSMatthew Dillon 1582898c91eeSMatthew Dillon /* 1583898c91eeSMatthew Dillon * Get rid of reference to old control tty 1584898c91eeSMatthew Dillon */ 158521864bc5SMatthew Dillon ovp = sess->s_ttyvp; 158621864bc5SMatthew Dillon vref(vp); 158721864bc5SMatthew Dillon sess->s_ttyvp = vp; 158821864bc5SMatthew Dillon if (ovp) 158921864bc5SMatthew Dillon vrele(ovp); 159021864bc5SMatthew Dillon } 159121864bc5SMatthew Dillon 159221864bc5SMatthew Dillon out: 15933a1032a6SAlex Hornung release_dev(dev); 15949f889dc4SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() finished! \n"); 159521864bc5SMatthew Dillon return (error); 159621864bc5SMatthew Dillon } 159721864bc5SMatthew Dillon 159821864bc5SMatthew Dillon 159921864bc5SMatthew Dillon static int 160021864bc5SMatthew Dillon devfs_spec_fsync(struct vop_fsync_args *ap) 160121864bc5SMatthew Dillon { 160221864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 160321864bc5SMatthew Dillon int error; 160421864bc5SMatthew Dillon 160521864bc5SMatthew Dillon if (!vn_isdisk(vp, NULL)) 160621864bc5SMatthew Dillon return (0); 160721864bc5SMatthew Dillon 160821864bc5SMatthew Dillon /* 160921864bc5SMatthew Dillon * Flush all dirty buffers associated with a block device. 161021864bc5SMatthew Dillon */ 161121864bc5SMatthew Dillon error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL); 161221864bc5SMatthew Dillon return (error); 161321864bc5SMatthew Dillon } 161421864bc5SMatthew Dillon 161521864bc5SMatthew Dillon static int 161621864bc5SMatthew Dillon devfs_spec_read(struct vop_read_args *ap) 161721864bc5SMatthew Dillon { 1618898c91eeSMatthew Dillon struct devfs_node *node; 161921864bc5SMatthew Dillon struct vnode *vp; 162021864bc5SMatthew Dillon struct uio *uio; 162121864bc5SMatthew Dillon cdev_t dev; 162221864bc5SMatthew Dillon int error; 162321864bc5SMatthew Dillon 162421864bc5SMatthew Dillon vp = ap->a_vp; 162521864bc5SMatthew Dillon dev = vp->v_rdev; 162621864bc5SMatthew Dillon uio = ap->a_uio; 1627898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 162821864bc5SMatthew Dillon 162921864bc5SMatthew Dillon if (dev == NULL) /* device was revoked */ 163021864bc5SMatthew Dillon return (EBADF); 163121864bc5SMatthew Dillon if (uio->uio_resid == 0) 163221864bc5SMatthew Dillon return (0); 163321864bc5SMatthew Dillon 163421864bc5SMatthew Dillon vn_unlock(vp); 16358c530b23SJohannes Hofmann error = dev_dread(dev, uio, ap->a_ioflag, NULL); 1636845bd036SMatthew Dillon vn_lock(vp, LK_SHARED | LK_RETRY); 163721864bc5SMatthew Dillon 1638898c91eeSMatthew Dillon if (node) 1639d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 164021864bc5SMatthew Dillon 164121864bc5SMatthew Dillon return (error); 164221864bc5SMatthew Dillon } 164321864bc5SMatthew Dillon 164421864bc5SMatthew Dillon /* 164521864bc5SMatthew Dillon * Vnode op for write 164621864bc5SMatthew Dillon * 164721864bc5SMatthew Dillon * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 164821864bc5SMatthew Dillon * struct ucred *a_cred) 164921864bc5SMatthew Dillon */ 165021864bc5SMatthew Dillon static int 165121864bc5SMatthew Dillon devfs_spec_write(struct vop_write_args *ap) 165221864bc5SMatthew Dillon { 1653898c91eeSMatthew Dillon struct devfs_node *node; 165421864bc5SMatthew Dillon struct vnode *vp; 165521864bc5SMatthew Dillon struct uio *uio; 165621864bc5SMatthew Dillon cdev_t dev; 165721864bc5SMatthew Dillon int error; 165821864bc5SMatthew Dillon 165921864bc5SMatthew Dillon vp = ap->a_vp; 166021864bc5SMatthew Dillon dev = vp->v_rdev; 166121864bc5SMatthew Dillon uio = ap->a_uio; 1662898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 166321864bc5SMatthew Dillon 166421864bc5SMatthew Dillon KKASSERT(uio->uio_segflg != UIO_NOCOPY); 166521864bc5SMatthew Dillon 166621864bc5SMatthew Dillon if (dev == NULL) /* device was revoked */ 166721864bc5SMatthew Dillon return (EBADF); 166821864bc5SMatthew Dillon 166921864bc5SMatthew Dillon vn_unlock(vp); 16708c530b23SJohannes Hofmann error = dev_dwrite(dev, uio, ap->a_ioflag, NULL); 167121864bc5SMatthew Dillon vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 167221864bc5SMatthew Dillon 167307dfa375SAlex Hornung if (node) { 1674d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 1675d489a79aSMatthew Dillon vfs_timestamp(&node->mtime); 167607dfa375SAlex Hornung } 167721864bc5SMatthew Dillon 167821864bc5SMatthew Dillon return (error); 167921864bc5SMatthew Dillon } 168021864bc5SMatthew Dillon 168121864bc5SMatthew Dillon /* 168221864bc5SMatthew Dillon * Device ioctl operation. 168321864bc5SMatthew Dillon * 168421864bc5SMatthew Dillon * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, 168587baaf0cSMatthew Dillon * int a_fflag, struct ucred *a_cred, struct sysmsg *msg) 168621864bc5SMatthew Dillon */ 168721864bc5SMatthew Dillon static int 168821864bc5SMatthew Dillon devfs_spec_ioctl(struct vop_ioctl_args *ap) 168921864bc5SMatthew Dillon { 169021864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 16911d0de3d3SSascha Wildner #if 0 1692898c91eeSMatthew Dillon struct devfs_node *node; 16931d0de3d3SSascha Wildner #endif 1694898c91eeSMatthew Dillon cdev_t dev; 169521864bc5SMatthew Dillon 169621864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) 169721864bc5SMatthew Dillon return (EBADF); /* device was revoked */ 16981d0de3d3SSascha Wildner #if 0 1699898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 170021864bc5SMatthew Dillon 1701898c91eeSMatthew Dillon if (node) { 1702d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 1703d489a79aSMatthew Dillon vfs_timestamp(&node->mtime); 170421864bc5SMatthew Dillon } 170507dfa375SAlex Hornung #endif 170621864bc5SMatthew Dillon 170787baaf0cSMatthew Dillon return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag, 17088c530b23SJohannes Hofmann ap->a_cred, ap->a_sysmsg, NULL)); 170921864bc5SMatthew Dillon } 171021864bc5SMatthew Dillon 171121864bc5SMatthew Dillon /* 171221864bc5SMatthew Dillon * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn) 171321864bc5SMatthew Dillon */ 171421864bc5SMatthew Dillon /* ARGSUSED */ 171521864bc5SMatthew Dillon static int 171621864bc5SMatthew Dillon devfs_spec_kqfilter(struct vop_kqfilter_args *ap) 171721864bc5SMatthew Dillon { 171821864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 17191d0de3d3SSascha Wildner #if 0 1720898c91eeSMatthew Dillon struct devfs_node *node; 17211d0de3d3SSascha Wildner #endif 1722898c91eeSMatthew Dillon cdev_t dev; 172321864bc5SMatthew Dillon 172421864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) 1725b287d649SMatthew Dillon return (EBADF); /* device was revoked (EBADF) */ 17261d0de3d3SSascha Wildner #if 0 1727898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 172821864bc5SMatthew Dillon 1729898c91eeSMatthew Dillon if (node) 1730d489a79aSMatthew Dillon vfs_timestamp(&node->atime); 173107dfa375SAlex Hornung #endif 173221864bc5SMatthew Dillon 17338c530b23SJohannes Hofmann return (dev_dkqfilter(dev, ap->a_kn, NULL)); 173421864bc5SMatthew Dillon } 173521864bc5SMatthew Dillon 173621864bc5SMatthew Dillon /* 173721864bc5SMatthew Dillon * Convert a vnode strategy call into a device strategy call. Vnode strategy 173821864bc5SMatthew Dillon * calls are not limited to device DMA limits so we have to deal with the 173921864bc5SMatthew Dillon * case. 174021864bc5SMatthew Dillon * 174121864bc5SMatthew Dillon * spec_strategy(struct vnode *a_vp, struct bio *a_bio) 174221864bc5SMatthew Dillon */ 174321864bc5SMatthew Dillon static int 174421864bc5SMatthew Dillon devfs_spec_strategy(struct vop_strategy_args *ap) 174521864bc5SMatthew Dillon { 174621864bc5SMatthew Dillon struct bio *bio = ap->a_bio; 174721864bc5SMatthew Dillon struct buf *bp = bio->bio_buf; 174821864bc5SMatthew Dillon struct buf *nbp; 174921864bc5SMatthew Dillon struct vnode *vp; 175021864bc5SMatthew Dillon struct mount *mp; 175121864bc5SMatthew Dillon int chunksize; 175221864bc5SMatthew Dillon int maxiosize; 175321864bc5SMatthew Dillon 175421864bc5SMatthew Dillon if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) 175521864bc5SMatthew Dillon buf_start(bp); 175621864bc5SMatthew Dillon 175721864bc5SMatthew Dillon /* 175821864bc5SMatthew Dillon * Collect statistics on synchronous and asynchronous read 175921864bc5SMatthew Dillon * and write counts for disks that have associated filesystems. 176021864bc5SMatthew Dillon */ 176121864bc5SMatthew Dillon vp = ap->a_vp; 176221864bc5SMatthew Dillon KKASSERT(vp->v_rdev != NULL); /* XXX */ 176321864bc5SMatthew Dillon if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { 176421864bc5SMatthew Dillon if (bp->b_cmd == BUF_CMD_READ) { 176521864bc5SMatthew Dillon if (bp->b_flags & BIO_SYNC) 176621864bc5SMatthew Dillon mp->mnt_stat.f_syncreads++; 176721864bc5SMatthew Dillon else 176821864bc5SMatthew Dillon mp->mnt_stat.f_asyncreads++; 176921864bc5SMatthew Dillon } else { 177021864bc5SMatthew Dillon if (bp->b_flags & BIO_SYNC) 177121864bc5SMatthew Dillon mp->mnt_stat.f_syncwrites++; 177221864bc5SMatthew Dillon else 177321864bc5SMatthew Dillon mp->mnt_stat.f_asyncwrites++; 177421864bc5SMatthew Dillon } 177521864bc5SMatthew Dillon } 177621864bc5SMatthew Dillon 177721864bc5SMatthew Dillon /* 177821864bc5SMatthew Dillon * Device iosize limitations only apply to read and write. Shortcut 177921864bc5SMatthew Dillon * the I/O if it fits. 178021864bc5SMatthew Dillon */ 178121864bc5SMatthew Dillon if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { 1782898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1783898c91eeSMatthew Dillon "%s: si_iosize_max not set!\n", 1784898c91eeSMatthew Dillon dev_dname(vp->v_rdev)); 178521864bc5SMatthew Dillon maxiosize = MAXPHYS; 178621864bc5SMatthew Dillon } 178721864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 2 178821864bc5SMatthew Dillon maxiosize = 4096; 178921864bc5SMatthew Dillon #endif 179021864bc5SMatthew Dillon if (bp->b_bcount <= maxiosize || 179121864bc5SMatthew Dillon (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { 179221864bc5SMatthew Dillon dev_dstrategy_chain(vp->v_rdev, bio); 179321864bc5SMatthew Dillon return (0); 179421864bc5SMatthew Dillon } 179521864bc5SMatthew Dillon 179621864bc5SMatthew Dillon /* 179721864bc5SMatthew Dillon * Clone the buffer and set up an I/O chain to chunk up the I/O. 179821864bc5SMatthew Dillon */ 179921864bc5SMatthew Dillon nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); 180021864bc5SMatthew Dillon initbufbio(nbp); 180121864bc5SMatthew Dillon buf_dep_init(nbp); 180221864bc5SMatthew Dillon BUF_LOCK(nbp, LK_EXCLUSIVE); 180321864bc5SMatthew Dillon BUF_KERNPROC(nbp); 180421864bc5SMatthew Dillon nbp->b_vp = vp; 1805b3f55d88SMatthew Dillon nbp->b_flags = B_PAGING | B_KVABIO | (bp->b_flags & B_BNOCLIP); 1806b3f55d88SMatthew Dillon nbp->b_cpumask = bp->b_cpumask; 180721864bc5SMatthew Dillon nbp->b_data = bp->b_data; 180821864bc5SMatthew Dillon nbp->b_bio1.bio_done = devfs_spec_strategy_done; 180921864bc5SMatthew Dillon nbp->b_bio1.bio_offset = bio->bio_offset; 181021864bc5SMatthew Dillon nbp->b_bio1.bio_caller_info1.ptr = bio; 181121864bc5SMatthew Dillon 181221864bc5SMatthew Dillon /* 181321864bc5SMatthew Dillon * Start the first transfer 181421864bc5SMatthew Dillon */ 181521864bc5SMatthew Dillon if (vn_isdisk(vp, NULL)) 181621864bc5SMatthew Dillon chunksize = vp->v_rdev->si_bsize_phys; 181721864bc5SMatthew Dillon else 181821864bc5SMatthew Dillon chunksize = DEV_BSIZE; 1819ed183f8cSSascha Wildner chunksize = rounddown(maxiosize, chunksize); 182021864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1821898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1822898c91eeSMatthew Dillon "spec_strategy chained I/O chunksize=%d\n", 1823898c91eeSMatthew Dillon chunksize); 182421864bc5SMatthew Dillon #endif 182521864bc5SMatthew Dillon nbp->b_cmd = bp->b_cmd; 182621864bc5SMatthew Dillon nbp->b_bcount = chunksize; 182721864bc5SMatthew Dillon nbp->b_bufsize = chunksize; /* used to detect a short I/O */ 182821864bc5SMatthew Dillon nbp->b_bio1.bio_caller_info2.index = chunksize; 182921864bc5SMatthew Dillon 183021864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1831898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1832898c91eeSMatthew Dillon "spec_strategy: chain %p offset %d/%d bcount %d\n", 183321864bc5SMatthew Dillon bp, 0, bp->b_bcount, nbp->b_bcount); 183421864bc5SMatthew Dillon #endif 183521864bc5SMatthew Dillon 183621864bc5SMatthew Dillon dev_dstrategy(vp->v_rdev, &nbp->b_bio1); 183721864bc5SMatthew Dillon 183821864bc5SMatthew Dillon if (DEVFS_NODE(vp)) { 1839d489a79aSMatthew Dillon vfs_timestamp(&DEVFS_NODE(vp)->atime); 1840d489a79aSMatthew Dillon vfs_timestamp(&DEVFS_NODE(vp)->mtime); 184121864bc5SMatthew Dillon } 184221864bc5SMatthew Dillon 184321864bc5SMatthew Dillon return (0); 184421864bc5SMatthew Dillon } 184521864bc5SMatthew Dillon 184621864bc5SMatthew Dillon /* 184721864bc5SMatthew Dillon * Chunked up transfer completion routine - chain transfers until done 184877912481SMatthew Dillon * 184977912481SMatthew Dillon * NOTE: MPSAFE callback. 185021864bc5SMatthew Dillon */ 185121864bc5SMatthew Dillon static 185221864bc5SMatthew Dillon void 185321864bc5SMatthew Dillon devfs_spec_strategy_done(struct bio *nbio) 185421864bc5SMatthew Dillon { 185521864bc5SMatthew Dillon struct buf *nbp = nbio->bio_buf; 185621864bc5SMatthew Dillon struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */ 185721864bc5SMatthew Dillon struct buf *bp = bio->bio_buf; /* original bp */ 185821864bc5SMatthew Dillon int chunksize = nbio->bio_caller_info2.index; /* chunking */ 185921864bc5SMatthew Dillon int boffset = nbp->b_data - bp->b_data; 186021864bc5SMatthew Dillon 186121864bc5SMatthew Dillon if (nbp->b_flags & B_ERROR) { 186221864bc5SMatthew Dillon /* 186321864bc5SMatthew Dillon * An error terminates the chain, propogate the error back 186421864bc5SMatthew Dillon * to the original bp 186521864bc5SMatthew Dillon */ 186621864bc5SMatthew Dillon bp->b_flags |= B_ERROR; 186721864bc5SMatthew Dillon bp->b_error = nbp->b_error; 186821864bc5SMatthew Dillon bp->b_resid = bp->b_bcount - boffset + 186921864bc5SMatthew Dillon (nbp->b_bcount - nbp->b_resid); 187021864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1871898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1872898c91eeSMatthew Dillon "spec_strategy: chain %p error %d bcount %d/%d\n", 187321864bc5SMatthew Dillon bp, bp->b_error, bp->b_bcount, 187421864bc5SMatthew Dillon bp->b_bcount - bp->b_resid); 187521864bc5SMatthew Dillon #endif 187621864bc5SMatthew Dillon } else if (nbp->b_resid) { 187721864bc5SMatthew Dillon /* 187821864bc5SMatthew Dillon * A short read or write terminates the chain 187921864bc5SMatthew Dillon */ 188021864bc5SMatthew Dillon bp->b_error = nbp->b_error; 188121864bc5SMatthew Dillon bp->b_resid = bp->b_bcount - boffset + 188221864bc5SMatthew Dillon (nbp->b_bcount - nbp->b_resid); 188321864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1884898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1885898c91eeSMatthew Dillon "spec_strategy: chain %p short read(1) " 1886898c91eeSMatthew Dillon "bcount %d/%d\n", 188721864bc5SMatthew Dillon bp, bp->b_bcount - bp->b_resid, bp->b_bcount); 188821864bc5SMatthew Dillon #endif 188921864bc5SMatthew Dillon } else if (nbp->b_bcount != nbp->b_bufsize) { 189021864bc5SMatthew Dillon /* 189121864bc5SMatthew Dillon * A short read or write can also occur by truncating b_bcount 189221864bc5SMatthew Dillon */ 189321864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1894898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1895898c91eeSMatthew Dillon "spec_strategy: chain %p short read(2) " 1896898c91eeSMatthew Dillon "bcount %d/%d\n", 189721864bc5SMatthew Dillon bp, nbp->b_bcount + boffset, bp->b_bcount); 189821864bc5SMatthew Dillon #endif 189921864bc5SMatthew Dillon bp->b_error = 0; 190021864bc5SMatthew Dillon bp->b_bcount = nbp->b_bcount + boffset; 190121864bc5SMatthew Dillon bp->b_resid = nbp->b_resid; 190221864bc5SMatthew Dillon } else if (nbp->b_bcount + boffset == bp->b_bcount) { 190321864bc5SMatthew Dillon /* 190421864bc5SMatthew Dillon * No more data terminates the chain 190521864bc5SMatthew Dillon */ 190621864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1907898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1908898c91eeSMatthew Dillon "spec_strategy: chain %p finished bcount %d\n", 190921864bc5SMatthew Dillon bp, bp->b_bcount); 191021864bc5SMatthew Dillon #endif 191121864bc5SMatthew Dillon bp->b_error = 0; 191221864bc5SMatthew Dillon bp->b_resid = 0; 191321864bc5SMatthew Dillon } else { 191421864bc5SMatthew Dillon /* 191521864bc5SMatthew Dillon * Continue the chain 191621864bc5SMatthew Dillon */ 191721864bc5SMatthew Dillon boffset += nbp->b_bcount; 191821864bc5SMatthew Dillon nbp->b_data = bp->b_data + boffset; 191921864bc5SMatthew Dillon nbp->b_bcount = bp->b_bcount - boffset; 192021864bc5SMatthew Dillon if (nbp->b_bcount > chunksize) 192121864bc5SMatthew Dillon nbp->b_bcount = chunksize; 192221864bc5SMatthew Dillon nbp->b_bio1.bio_done = devfs_spec_strategy_done; 192321864bc5SMatthew Dillon nbp->b_bio1.bio_offset = bio->bio_offset + boffset; 192421864bc5SMatthew Dillon 192521864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1926898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1927898c91eeSMatthew Dillon "spec_strategy: chain %p offset %d/%d bcount %d\n", 192821864bc5SMatthew Dillon bp, boffset, bp->b_bcount, nbp->b_bcount); 192921864bc5SMatthew Dillon #endif 193021864bc5SMatthew Dillon 193121864bc5SMatthew Dillon dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1); 1932b5d7061dSMatthew Dillon return; 193321864bc5SMatthew Dillon } 1934b5d7061dSMatthew Dillon 1935b5d7061dSMatthew Dillon /* 1936b5d7061dSMatthew Dillon * Fall through to here on termination. biodone(bp) and 1937b5d7061dSMatthew Dillon * clean up and free nbp. 1938b5d7061dSMatthew Dillon */ 1939b5d7061dSMatthew Dillon biodone(bio); 1940b5d7061dSMatthew Dillon BUF_UNLOCK(nbp); 1941b5d7061dSMatthew Dillon uninitbufbio(nbp); 1942b5d7061dSMatthew Dillon kfree(nbp, M_DEVBUF); 194321864bc5SMatthew Dillon } 194421864bc5SMatthew Dillon 194521864bc5SMatthew Dillon /* 194621864bc5SMatthew Dillon * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length) 194721864bc5SMatthew Dillon */ 194821864bc5SMatthew Dillon static int 194921864bc5SMatthew Dillon devfs_spec_freeblks(struct vop_freeblks_args *ap) 195021864bc5SMatthew Dillon { 195121864bc5SMatthew Dillon struct buf *bp; 195221864bc5SMatthew Dillon 195321864bc5SMatthew Dillon /* 195453005b09SMatthew Dillon * Must be a synchronous operation 195521864bc5SMatthew Dillon */ 195621864bc5SMatthew Dillon KKASSERT(ap->a_vp->v_rdev != NULL); 1957bf390b25SAlex Hornung if ((ap->a_vp->v_rdev->si_flags & SI_CANFREE) == 0) 195821864bc5SMatthew Dillon return (0); 1959bf20632cSMatthew Dillon bp = getpbuf(NULL); 196021864bc5SMatthew Dillon bp->b_cmd = BUF_CMD_FREEBLKS; 196153005b09SMatthew Dillon bp->b_bio1.bio_flags |= BIO_SYNC; 196221864bc5SMatthew Dillon bp->b_bio1.bio_offset = ap->a_offset; 196353005b09SMatthew Dillon bp->b_bio1.bio_done = biodone_sync; 196421864bc5SMatthew Dillon bp->b_bcount = ap->a_length; 196521864bc5SMatthew Dillon dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1); 196653005b09SMatthew Dillon biowait(&bp->b_bio1, "TRIM"); 1967bf20632cSMatthew Dillon relpbuf(bp, NULL); 196853005b09SMatthew Dillon 196921864bc5SMatthew Dillon return (0); 197021864bc5SMatthew Dillon } 197121864bc5SMatthew Dillon 197221864bc5SMatthew Dillon /* 197321864bc5SMatthew Dillon * Implement degenerate case where the block requested is the block 197421864bc5SMatthew Dillon * returned, and assume that the entire device is contiguous in regards 197521864bc5SMatthew Dillon * to the contiguous block range (runp and runb). 197621864bc5SMatthew Dillon * 197721864bc5SMatthew Dillon * spec_bmap(struct vnode *a_vp, off_t a_loffset, 197821864bc5SMatthew Dillon * off_t *a_doffsetp, int *a_runp, int *a_runb) 197921864bc5SMatthew Dillon */ 198021864bc5SMatthew Dillon static int 198121864bc5SMatthew Dillon devfs_spec_bmap(struct vop_bmap_args *ap) 198221864bc5SMatthew Dillon { 198321864bc5SMatthew Dillon if (ap->a_doffsetp != NULL) 198421864bc5SMatthew Dillon *ap->a_doffsetp = ap->a_loffset; 198521864bc5SMatthew Dillon if (ap->a_runp != NULL) 198621864bc5SMatthew Dillon *ap->a_runp = MAXBSIZE; 198721864bc5SMatthew Dillon if (ap->a_runb != NULL) { 198821864bc5SMatthew Dillon if (ap->a_loffset < MAXBSIZE) 198921864bc5SMatthew Dillon *ap->a_runb = (int)ap->a_loffset; 199021864bc5SMatthew Dillon else 199121864bc5SMatthew Dillon *ap->a_runb = MAXBSIZE; 199221864bc5SMatthew Dillon } 199321864bc5SMatthew Dillon return (0); 199421864bc5SMatthew Dillon } 199521864bc5SMatthew Dillon 199621864bc5SMatthew Dillon 199721864bc5SMatthew Dillon /* 199821864bc5SMatthew Dillon * Special device advisory byte-level locks. 199921864bc5SMatthew Dillon * 200021864bc5SMatthew Dillon * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, 200121864bc5SMatthew Dillon * struct flock *a_fl, int a_flags) 200221864bc5SMatthew Dillon */ 200321864bc5SMatthew Dillon /* ARGSUSED */ 200421864bc5SMatthew Dillon static int 200521864bc5SMatthew Dillon devfs_spec_advlock(struct vop_advlock_args *ap) 200621864bc5SMatthew Dillon { 200721864bc5SMatthew Dillon return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP); 200821864bc5SMatthew Dillon } 200921864bc5SMatthew Dillon 201077912481SMatthew Dillon /* 201177912481SMatthew Dillon * NOTE: MPSAFE callback. 201277912481SMatthew Dillon */ 201321864bc5SMatthew Dillon static void 201421864bc5SMatthew Dillon devfs_spec_getpages_iodone(struct bio *bio) 201521864bc5SMatthew Dillon { 201621864bc5SMatthew Dillon bio->bio_buf->b_cmd = BUF_CMD_DONE; 201721864bc5SMatthew Dillon wakeup(bio->bio_buf); 201821864bc5SMatthew Dillon } 201921864bc5SMatthew Dillon 202021864bc5SMatthew Dillon /* 202121864bc5SMatthew Dillon * spec_getpages() - get pages associated with device vnode. 202221864bc5SMatthew Dillon * 202321864bc5SMatthew Dillon * Note that spec_read and spec_write do not use the buffer cache, so we 202421864bc5SMatthew Dillon * must fully implement getpages here. 202521864bc5SMatthew Dillon */ 202621864bc5SMatthew Dillon static int 202721864bc5SMatthew Dillon devfs_spec_getpages(struct vop_getpages_args *ap) 202821864bc5SMatthew Dillon { 202921864bc5SMatthew Dillon vm_offset_t kva; 203021864bc5SMatthew Dillon int error; 203121864bc5SMatthew Dillon int i, pcount, size; 203221864bc5SMatthew Dillon struct buf *bp; 203321864bc5SMatthew Dillon vm_page_t m; 203421864bc5SMatthew Dillon vm_ooffset_t offset; 203521864bc5SMatthew Dillon int toff, nextoff, nread; 203621864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 203721864bc5SMatthew Dillon int blksiz; 203821864bc5SMatthew Dillon int gotreqpage; 203921864bc5SMatthew Dillon 204021864bc5SMatthew Dillon error = 0; 204121864bc5SMatthew Dillon pcount = round_page(ap->a_count) / PAGE_SIZE; 204221864bc5SMatthew Dillon 204321864bc5SMatthew Dillon /* 204421864bc5SMatthew Dillon * Calculate the offset of the transfer and do sanity check. 204521864bc5SMatthew Dillon */ 204621864bc5SMatthew Dillon offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset; 204721864bc5SMatthew Dillon 204821864bc5SMatthew Dillon /* 204921864bc5SMatthew Dillon * Round up physical size for real devices. We cannot round using 205021864bc5SMatthew Dillon * v_mount's block size data because v_mount has nothing to do with 205121864bc5SMatthew Dillon * the device. i.e. it's usually '/dev'. We need the physical block 205221864bc5SMatthew Dillon * size for the device itself. 205321864bc5SMatthew Dillon * 205421864bc5SMatthew Dillon * We can't use v_rdev->si_mountpoint because it only exists when the 205521864bc5SMatthew Dillon * block device is mounted. However, we can use v_rdev. 205621864bc5SMatthew Dillon */ 205721864bc5SMatthew Dillon if (vn_isdisk(vp, NULL)) 205821864bc5SMatthew Dillon blksiz = vp->v_rdev->si_bsize_phys; 205921864bc5SMatthew Dillon else 206021864bc5SMatthew Dillon blksiz = DEV_BSIZE; 206121864bc5SMatthew Dillon 2062965b839fSSascha Wildner size = roundup2(ap->a_count, blksiz); 206321864bc5SMatthew Dillon 2064ad8b1a17SMatthew Dillon bp = getpbuf_kva(NULL); 206521864bc5SMatthew Dillon kva = (vm_offset_t)bp->b_data; 206621864bc5SMatthew Dillon 206721864bc5SMatthew Dillon /* 206821864bc5SMatthew Dillon * Map the pages to be read into the kva. 206921864bc5SMatthew Dillon */ 2070b3f55d88SMatthew Dillon pmap_qenter_noinval(kva, ap->a_m, pcount); 207121864bc5SMatthew Dillon 207221864bc5SMatthew Dillon /* Build a minimal buffer header. */ 207321864bc5SMatthew Dillon bp->b_cmd = BUF_CMD_READ; 2074b3f55d88SMatthew Dillon bp->b_flags |= B_KVABIO; 207521864bc5SMatthew Dillon bp->b_bcount = size; 207621864bc5SMatthew Dillon bp->b_resid = 0; 207777912481SMatthew Dillon bsetrunningbufspace(bp, size); 207821864bc5SMatthew Dillon 207921864bc5SMatthew Dillon bp->b_bio1.bio_offset = offset; 208021864bc5SMatthew Dillon bp->b_bio1.bio_done = devfs_spec_getpages_iodone; 208121864bc5SMatthew Dillon 208221864bc5SMatthew Dillon mycpu->gd_cnt.v_vnodein++; 208321864bc5SMatthew Dillon mycpu->gd_cnt.v_vnodepgsin += pcount; 208421864bc5SMatthew Dillon 208521864bc5SMatthew Dillon /* Do the input. */ 208621864bc5SMatthew Dillon vn_strategy(ap->a_vp, &bp->b_bio1); 208721864bc5SMatthew Dillon 208821864bc5SMatthew Dillon crit_enter(); 208921864bc5SMatthew Dillon 209021864bc5SMatthew Dillon /* We definitely need to be at splbio here. */ 209121864bc5SMatthew Dillon while (bp->b_cmd != BUF_CMD_DONE) 209221864bc5SMatthew Dillon tsleep(bp, 0, "spread", 0); 209321864bc5SMatthew Dillon 209421864bc5SMatthew Dillon crit_exit(); 209521864bc5SMatthew Dillon 209621864bc5SMatthew Dillon if (bp->b_flags & B_ERROR) { 209721864bc5SMatthew Dillon if (bp->b_error) 209821864bc5SMatthew Dillon error = bp->b_error; 209921864bc5SMatthew Dillon else 210021864bc5SMatthew Dillon error = EIO; 210121864bc5SMatthew Dillon } 210221864bc5SMatthew Dillon 210321864bc5SMatthew Dillon /* 210421864bc5SMatthew Dillon * If EOF is encountered we must zero-extend the result in order 210521864bc5SMatthew Dillon * to ensure that the page does not contain garabge. When no 210621864bc5SMatthew Dillon * error occurs, an early EOF is indicated if b_bcount got truncated. 210721864bc5SMatthew Dillon * b_resid is relative to b_bcount and should be 0, but some devices 210821864bc5SMatthew Dillon * might indicate an EOF with b_resid instead of truncating b_bcount. 210921864bc5SMatthew Dillon */ 211021864bc5SMatthew Dillon nread = bp->b_bcount - bp->b_resid; 2111b3f55d88SMatthew Dillon if (nread < ap->a_count) { 2112b3f55d88SMatthew Dillon bkvasync(bp); 211321864bc5SMatthew Dillon bzero((caddr_t)kva + nread, ap->a_count - nread); 2114b3f55d88SMatthew Dillon } 2115b3f55d88SMatthew Dillon pmap_qremove_noinval(kva, pcount); 211621864bc5SMatthew Dillon 211721864bc5SMatthew Dillon gotreqpage = 0; 211821864bc5SMatthew Dillon for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) { 211921864bc5SMatthew Dillon nextoff = toff + PAGE_SIZE; 212021864bc5SMatthew Dillon m = ap->a_m[i]; 212121864bc5SMatthew Dillon 2122cb1cf930SMatthew Dillon /* 2123cb1cf930SMatthew Dillon * NOTE: vm_page_undirty/clear_dirty etc do not clear the 2124cb1cf930SMatthew Dillon * pmap modified bit. pmap modified bit should have 2125cb1cf930SMatthew Dillon * already been cleared. 2126cb1cf930SMatthew Dillon */ 212721864bc5SMatthew Dillon if (nextoff <= nread) { 212821864bc5SMatthew Dillon m->valid = VM_PAGE_BITS_ALL; 212921864bc5SMatthew Dillon vm_page_undirty(m); 213021864bc5SMatthew Dillon } else if (toff < nread) { 213121864bc5SMatthew Dillon /* 213221864bc5SMatthew Dillon * Since this is a VM request, we have to supply the 2133cb1cf930SMatthew Dillon * unaligned offset to allow vm_page_set_valid() 213421864bc5SMatthew Dillon * to zero sub-DEV_BSIZE'd portions of the page. 213521864bc5SMatthew Dillon */ 21361a54183bSMatthew Dillon vm_page_set_valid(m, 0, nread - toff); 21371a54183bSMatthew Dillon vm_page_clear_dirty_end_nonincl(m, 0, nread - toff); 213821864bc5SMatthew Dillon } else { 213921864bc5SMatthew Dillon m->valid = 0; 214021864bc5SMatthew Dillon vm_page_undirty(m); 214121864bc5SMatthew Dillon } 214221864bc5SMatthew Dillon 214321864bc5SMatthew Dillon if (i != ap->a_reqpage) { 214421864bc5SMatthew Dillon /* 214521864bc5SMatthew Dillon * Just in case someone was asking for this page we 214621864bc5SMatthew Dillon * now tell them that it is ok to use. 214721864bc5SMatthew Dillon */ 214821864bc5SMatthew Dillon if (!error || (m->valid == VM_PAGE_BITS_ALL)) { 214921864bc5SMatthew Dillon if (m->valid) { 2150b12defdcSMatthew Dillon if (m->flags & PG_REFERENCED) { 215121864bc5SMatthew Dillon vm_page_activate(m); 215221864bc5SMatthew Dillon } else { 215321864bc5SMatthew Dillon vm_page_deactivate(m); 215421864bc5SMatthew Dillon } 215521864bc5SMatthew Dillon vm_page_wakeup(m); 215621864bc5SMatthew Dillon } else { 215721864bc5SMatthew Dillon vm_page_free(m); 215821864bc5SMatthew Dillon } 215921864bc5SMatthew Dillon } else { 216021864bc5SMatthew Dillon vm_page_free(m); 216121864bc5SMatthew Dillon } 216221864bc5SMatthew Dillon } else if (m->valid) { 216321864bc5SMatthew Dillon gotreqpage = 1; 216421864bc5SMatthew Dillon /* 216521864bc5SMatthew Dillon * Since this is a VM request, we need to make the 216621864bc5SMatthew Dillon * entire page presentable by zeroing invalid sections. 216721864bc5SMatthew Dillon */ 216821864bc5SMatthew Dillon if (m->valid != VM_PAGE_BITS_ALL) 216921864bc5SMatthew Dillon vm_page_zero_invalid(m, FALSE); 217021864bc5SMatthew Dillon } 217121864bc5SMatthew Dillon } 217221864bc5SMatthew Dillon if (!gotreqpage) { 217321864bc5SMatthew Dillon m = ap->a_m[ap->a_reqpage]; 217421864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_WARNING, 217521864bc5SMatthew Dillon "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", 217621864bc5SMatthew Dillon devtoname(vp->v_rdev), error, bp, bp->b_vp); 217721864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_WARNING, 217821864bc5SMatthew Dillon " size: %d, resid: %d, a_count: %d, valid: 0x%x\n", 217921864bc5SMatthew Dillon size, bp->b_resid, ap->a_count, m->valid); 218021864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_WARNING, 218121864bc5SMatthew Dillon " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", 218221864bc5SMatthew Dillon nread, ap->a_reqpage, (u_long)m->pindex, pcount); 218321864bc5SMatthew Dillon /* 218421864bc5SMatthew Dillon * Free the buffer header back to the swap buffer pool. 218521864bc5SMatthew Dillon */ 218621864bc5SMatthew Dillon relpbuf(bp, NULL); 218721864bc5SMatthew Dillon return VM_PAGER_ERROR; 218821864bc5SMatthew Dillon } 218921864bc5SMatthew Dillon /* 219021864bc5SMatthew Dillon * Free the buffer header back to the swap buffer pool. 219121864bc5SMatthew Dillon */ 219221864bc5SMatthew Dillon relpbuf(bp, NULL); 219307dfa375SAlex Hornung if (DEVFS_NODE(ap->a_vp)) 2194d489a79aSMatthew Dillon vfs_timestamp(&DEVFS_NODE(ap->a_vp)->mtime); 219521864bc5SMatthew Dillon return VM_PAGER_OK; 219621864bc5SMatthew Dillon } 219721864bc5SMatthew Dillon 219821864bc5SMatthew Dillon static __inline 219921864bc5SMatthew Dillon int 220021864bc5SMatthew Dillon sequential_heuristic(struct uio *uio, struct file *fp) 220121864bc5SMatthew Dillon { 220221864bc5SMatthew Dillon /* 220321864bc5SMatthew Dillon * Sequential heuristic - detect sequential operation 220421864bc5SMatthew Dillon */ 220521864bc5SMatthew Dillon if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 220621864bc5SMatthew Dillon uio->uio_offset == fp->f_nextoff) { 220721864bc5SMatthew Dillon /* 220821864bc5SMatthew Dillon * XXX we assume that the filesystem block size is 220921864bc5SMatthew Dillon * the default. Not true, but still gives us a pretty 221021864bc5SMatthew Dillon * good indicator of how sequential the read operations 221121864bc5SMatthew Dillon * are. 221221864bc5SMatthew Dillon */ 2213898c91eeSMatthew Dillon int tmpseq = fp->f_seqcount; 2214898c91eeSMatthew Dillon 2215*4f048b1cSSascha Wildner tmpseq += howmany(uio->uio_resid, MAXBSIZE); 221621864bc5SMatthew Dillon if (tmpseq > IO_SEQMAX) 221721864bc5SMatthew Dillon tmpseq = IO_SEQMAX; 221821864bc5SMatthew Dillon fp->f_seqcount = tmpseq; 221921864bc5SMatthew Dillon return(fp->f_seqcount << IO_SEQSHIFT); 222021864bc5SMatthew Dillon } 222121864bc5SMatthew Dillon 222221864bc5SMatthew Dillon /* 222321864bc5SMatthew Dillon * Not sequential, quick draw-down of seqcount 222421864bc5SMatthew Dillon */ 222521864bc5SMatthew Dillon if (fp->f_seqcount > 1) 222621864bc5SMatthew Dillon fp->f_seqcount = 1; 222721864bc5SMatthew Dillon else 222821864bc5SMatthew Dillon fp->f_seqcount = 0; 222921864bc5SMatthew Dillon return(0); 223021864bc5SMatthew Dillon } 2231