121864bc5SMatthew Dillon /* 29f889dc4SMatthew Dillon * (MPSAFE) 39f889dc4SMatthew Dillon * 421864bc5SMatthew Dillon * Copyright (c) 2009 The DragonFly Project. All rights reserved. 521864bc5SMatthew Dillon * 621864bc5SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 721864bc5SMatthew Dillon * by Alex Hornung <ahornung@gmail.com> 821864bc5SMatthew Dillon * 921864bc5SMatthew Dillon * Redistribution and use in source and binary forms, with or without 1021864bc5SMatthew Dillon * modification, are permitted provided that the following conditions 1121864bc5SMatthew Dillon * are met: 1221864bc5SMatthew Dillon * 1321864bc5SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 1421864bc5SMatthew Dillon * notice, this list of conditions and the following disclaimer. 1521864bc5SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 1621864bc5SMatthew Dillon * notice, this list of conditions and the following disclaimer in 1721864bc5SMatthew Dillon * the documentation and/or other materials provided with the 1821864bc5SMatthew Dillon * distribution. 1921864bc5SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 2021864bc5SMatthew Dillon * contributors may be used to endorse or promote products derived 2121864bc5SMatthew Dillon * from this software without specific, prior written permission. 2221864bc5SMatthew Dillon * 2321864bc5SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2421864bc5SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 2521864bc5SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 2621864bc5SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 2721864bc5SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 2821864bc5SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 2921864bc5SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 3021864bc5SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 3121864bc5SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 3221864bc5SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 3321864bc5SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3421864bc5SMatthew Dillon * SUCH DAMAGE. 3521864bc5SMatthew Dillon */ 3621864bc5SMatthew Dillon #include <sys/param.h> 3721864bc5SMatthew Dillon #include <sys/systm.h> 3821864bc5SMatthew Dillon #include <sys/time.h> 3921864bc5SMatthew Dillon #include <sys/kernel.h> 4021864bc5SMatthew Dillon #include <sys/lock.h> 4121864bc5SMatthew Dillon #include <sys/fcntl.h> 4221864bc5SMatthew Dillon #include <sys/proc.h> 4321864bc5SMatthew Dillon #include <sys/priv.h> 4421864bc5SMatthew Dillon #include <sys/signalvar.h> 4521864bc5SMatthew Dillon #include <sys/vnode.h> 4621864bc5SMatthew Dillon #include <sys/uio.h> 4721864bc5SMatthew Dillon #include <sys/mount.h> 4821864bc5SMatthew Dillon #include <sys/file.h> 4921864bc5SMatthew Dillon #include <sys/namei.h> 5021864bc5SMatthew Dillon #include <sys/dirent.h> 5121864bc5SMatthew Dillon #include <sys/malloc.h> 5221864bc5SMatthew Dillon #include <sys/stat.h> 5321864bc5SMatthew Dillon #include <sys/reg.h> 5421864bc5SMatthew Dillon #include <vm/vm_pager.h> 5521864bc5SMatthew Dillon #include <vm/vm_zone.h> 5621864bc5SMatthew Dillon #include <vm/vm_object.h> 5721864bc5SMatthew Dillon #include <sys/filio.h> 5821864bc5SMatthew Dillon #include <sys/ttycom.h> 5921864bc5SMatthew Dillon #include <sys/tty.h> 602d076755SAlex Hornung #include <sys/diskslice.h> 613a1032a6SAlex Hornung #include <sys/sysctl.h> 622c1e28ddSAlex Hornung #include <sys/devfs.h> 6321864bc5SMatthew Dillon #include <sys/pioctl.h> 64c705e298SSamuel J. Greear #include <vfs/fifofs/fifo.h> 6521864bc5SMatthew Dillon 6621864bc5SMatthew Dillon #include <machine/limits.h> 67684a93c4SMatthew Dillon 681a54183bSMatthew Dillon #include <sys/buf2.h> 691a54183bSMatthew Dillon #include <sys/sysref2.h> 70684a93c4SMatthew Dillon #include <sys/mplock2.h> 71684a93c4SMatthew Dillon #include <vm/vm_page2.h> 7221864bc5SMatthew Dillon 73752b2d38SSascha Wildner #ifndef SPEC_CHAIN_DEBUG 74752b2d38SSascha Wildner #define SPEC_CHAIN_DEBUG 0 75752b2d38SSascha Wildner #endif 76752b2d38SSascha Wildner 7721864bc5SMatthew Dillon MALLOC_DECLARE(M_DEVFS); 789f889dc4SMatthew Dillon #define DEVFS_BADOP (void *)devfs_vop_badop 7921864bc5SMatthew Dillon 809f889dc4SMatthew Dillon static int devfs_vop_badop(struct vop_generic_args *); 819f889dc4SMatthew Dillon static int devfs_vop_access(struct vop_access_args *); 829f889dc4SMatthew Dillon static int devfs_vop_inactive(struct vop_inactive_args *); 839f889dc4SMatthew Dillon static int devfs_vop_reclaim(struct vop_reclaim_args *); 849f889dc4SMatthew Dillon static int devfs_vop_readdir(struct vop_readdir_args *); 859f889dc4SMatthew Dillon static int devfs_vop_getattr(struct vop_getattr_args *); 869f889dc4SMatthew Dillon static int devfs_vop_setattr(struct vop_setattr_args *); 879f889dc4SMatthew Dillon static int devfs_vop_readlink(struct vop_readlink_args *); 889f889dc4SMatthew Dillon static int devfs_vop_print(struct vop_print_args *); 8921864bc5SMatthew Dillon 909f889dc4SMatthew Dillon static int devfs_vop_nresolve(struct vop_nresolve_args *); 919f889dc4SMatthew Dillon static int devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *); 929f889dc4SMatthew Dillon static int devfs_vop_nmkdir(struct vop_nmkdir_args *); 939f889dc4SMatthew Dillon static int devfs_vop_nsymlink(struct vop_nsymlink_args *); 949f889dc4SMatthew Dillon static int devfs_vop_nrmdir(struct vop_nrmdir_args *); 959f889dc4SMatthew Dillon static int devfs_vop_nremove(struct vop_nremove_args *); 9621864bc5SMatthew Dillon 9721864bc5SMatthew Dillon static int devfs_spec_open(struct vop_open_args *); 9821864bc5SMatthew Dillon static int devfs_spec_close(struct vop_close_args *); 9921864bc5SMatthew Dillon static int devfs_spec_fsync(struct vop_fsync_args *); 10021864bc5SMatthew Dillon 10121864bc5SMatthew Dillon static int devfs_spec_read(struct vop_read_args *); 10221864bc5SMatthew Dillon static int devfs_spec_write(struct vop_write_args *); 10321864bc5SMatthew Dillon static int devfs_spec_ioctl(struct vop_ioctl_args *); 10421864bc5SMatthew Dillon static int devfs_spec_kqfilter(struct vop_kqfilter_args *); 10521864bc5SMatthew Dillon static int devfs_spec_strategy(struct vop_strategy_args *); 10621864bc5SMatthew Dillon static void devfs_spec_strategy_done(struct bio *); 10721864bc5SMatthew Dillon static int devfs_spec_freeblks(struct vop_freeblks_args *); 10821864bc5SMatthew Dillon static int devfs_spec_bmap(struct vop_bmap_args *); 10921864bc5SMatthew Dillon static int devfs_spec_advlock(struct vop_advlock_args *); 11021864bc5SMatthew Dillon static void devfs_spec_getpages_iodone(struct bio *); 11121864bc5SMatthew Dillon static int devfs_spec_getpages(struct vop_getpages_args *); 11221864bc5SMatthew Dillon 1139f889dc4SMatthew Dillon static int devfs_fo_close(struct file *); 1149f889dc4SMatthew Dillon static int devfs_fo_read(struct file *, struct uio *, struct ucred *, int); 1159f889dc4SMatthew Dillon static int devfs_fo_write(struct file *, struct uio *, struct ucred *, int); 1169f889dc4SMatthew Dillon static int devfs_fo_stat(struct file *, struct stat *, struct ucred *); 1179f889dc4SMatthew Dillon static int devfs_fo_kqfilter(struct file *, struct knote *); 1189f889dc4SMatthew Dillon static int devfs_fo_ioctl(struct file *, u_long, caddr_t, 11987baaf0cSMatthew Dillon struct ucred *, struct sysmsg *); 12021864bc5SMatthew Dillon static __inline int sequential_heuristic(struct uio *, struct file *); 12187baaf0cSMatthew Dillon 12221864bc5SMatthew Dillon extern struct lock devfs_lock; 12321864bc5SMatthew Dillon 12421864bc5SMatthew Dillon /* 1259f889dc4SMatthew Dillon * devfs vnode operations for regular files. All vnode ops are MPSAFE. 12621864bc5SMatthew Dillon */ 12721864bc5SMatthew Dillon struct vop_ops devfs_vnode_norm_vops = { 12821864bc5SMatthew Dillon .vop_default = vop_defaultop, 1299f889dc4SMatthew Dillon .vop_access = devfs_vop_access, 13021864bc5SMatthew Dillon .vop_advlock = DEVFS_BADOP, 13121864bc5SMatthew Dillon .vop_bmap = DEVFS_BADOP, 13221864bc5SMatthew Dillon .vop_close = vop_stdclose, 1339f889dc4SMatthew Dillon .vop_getattr = devfs_vop_getattr, 1349f889dc4SMatthew Dillon .vop_inactive = devfs_vop_inactive, 13521864bc5SMatthew Dillon .vop_ncreate = DEVFS_BADOP, 1369f889dc4SMatthew Dillon .vop_nresolve = devfs_vop_nresolve, 1379f889dc4SMatthew Dillon .vop_nlookupdotdot = devfs_vop_nlookupdotdot, 13821864bc5SMatthew Dillon .vop_nlink = DEVFS_BADOP, 1399f889dc4SMatthew Dillon .vop_nmkdir = devfs_vop_nmkdir, 14021864bc5SMatthew Dillon .vop_nmknod = DEVFS_BADOP, 1419f889dc4SMatthew Dillon .vop_nremove = devfs_vop_nremove, 14221864bc5SMatthew Dillon .vop_nrename = DEVFS_BADOP, 1439f889dc4SMatthew Dillon .vop_nrmdir = devfs_vop_nrmdir, 1449f889dc4SMatthew Dillon .vop_nsymlink = devfs_vop_nsymlink, 14521864bc5SMatthew Dillon .vop_open = vop_stdopen, 14621864bc5SMatthew Dillon .vop_pathconf = vop_stdpathconf, 1479f889dc4SMatthew Dillon .vop_print = devfs_vop_print, 14821864bc5SMatthew Dillon .vop_read = DEVFS_BADOP, 1499f889dc4SMatthew Dillon .vop_readdir = devfs_vop_readdir, 1509f889dc4SMatthew Dillon .vop_readlink = devfs_vop_readlink, 1519f889dc4SMatthew Dillon .vop_reclaim = devfs_vop_reclaim, 1529f889dc4SMatthew Dillon .vop_setattr = devfs_vop_setattr, 15321864bc5SMatthew Dillon .vop_write = DEVFS_BADOP, 15421864bc5SMatthew Dillon .vop_ioctl = DEVFS_BADOP 15521864bc5SMatthew Dillon }; 15621864bc5SMatthew Dillon 15721864bc5SMatthew Dillon /* 1589f889dc4SMatthew Dillon * devfs vnode operations for character devices. All vnode ops are MPSAFE. 15921864bc5SMatthew Dillon */ 16021864bc5SMatthew Dillon struct vop_ops devfs_vnode_dev_vops = { 16121864bc5SMatthew Dillon .vop_default = vop_defaultop, 1629f889dc4SMatthew Dillon .vop_access = devfs_vop_access, 16321864bc5SMatthew Dillon .vop_advlock = devfs_spec_advlock, 16421864bc5SMatthew Dillon .vop_bmap = devfs_spec_bmap, 16521864bc5SMatthew Dillon .vop_close = devfs_spec_close, 16621864bc5SMatthew Dillon .vop_freeblks = devfs_spec_freeblks, 16721864bc5SMatthew Dillon .vop_fsync = devfs_spec_fsync, 1689f889dc4SMatthew Dillon .vop_getattr = devfs_vop_getattr, 16921864bc5SMatthew Dillon .vop_getpages = devfs_spec_getpages, 1709f889dc4SMatthew Dillon .vop_inactive = devfs_vop_inactive, 17121864bc5SMatthew Dillon .vop_open = devfs_spec_open, 17221864bc5SMatthew Dillon .vop_pathconf = vop_stdpathconf, 1739f889dc4SMatthew Dillon .vop_print = devfs_vop_print, 17421864bc5SMatthew Dillon .vop_kqfilter = devfs_spec_kqfilter, 17521864bc5SMatthew Dillon .vop_read = devfs_spec_read, 17621864bc5SMatthew Dillon .vop_readdir = DEVFS_BADOP, 17721864bc5SMatthew Dillon .vop_readlink = DEVFS_BADOP, 1789f889dc4SMatthew Dillon .vop_reclaim = devfs_vop_reclaim, 1799f889dc4SMatthew Dillon .vop_setattr = devfs_vop_setattr, 18021864bc5SMatthew Dillon .vop_strategy = devfs_spec_strategy, 18121864bc5SMatthew Dillon .vop_write = devfs_spec_write, 18221864bc5SMatthew Dillon .vop_ioctl = devfs_spec_ioctl 18321864bc5SMatthew Dillon }; 18421864bc5SMatthew Dillon 1859f889dc4SMatthew Dillon /* 1869f889dc4SMatthew Dillon * devfs file pointer operations. All fileops are MPSAFE. 1879f889dc4SMatthew Dillon */ 18821864bc5SMatthew Dillon struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops; 18921864bc5SMatthew Dillon 19021864bc5SMatthew Dillon struct fileops devfs_dev_fileops = { 1919f889dc4SMatthew Dillon .fo_read = devfs_fo_read, 1929f889dc4SMatthew Dillon .fo_write = devfs_fo_write, 1939f889dc4SMatthew Dillon .fo_ioctl = devfs_fo_ioctl, 1949f889dc4SMatthew Dillon .fo_kqfilter = devfs_fo_kqfilter, 1959f889dc4SMatthew Dillon .fo_stat = devfs_fo_stat, 1969f889dc4SMatthew Dillon .fo_close = devfs_fo_close, 19721864bc5SMatthew Dillon .fo_shutdown = nofo_shutdown 19821864bc5SMatthew Dillon }; 19921864bc5SMatthew Dillon 2004062d050SMatthew Dillon /* 2019f889dc4SMatthew Dillon * These two functions are possibly temporary hacks for devices (aka 2029f889dc4SMatthew Dillon * the pty code) which want to control the node attributes themselves. 2034062d050SMatthew Dillon * 2044062d050SMatthew Dillon * XXX we may ultimately desire to simply remove the uid/gid/mode 2054062d050SMatthew Dillon * from the node entirely. 2069f889dc4SMatthew Dillon * 2079f889dc4SMatthew Dillon * MPSAFE - sorta. Theoretically the overwrite can compete since they 2089f889dc4SMatthew Dillon * are loading from the same fields. 2094062d050SMatthew Dillon */ 2104062d050SMatthew Dillon static __inline void 2114062d050SMatthew Dillon node_sync_dev_get(struct devfs_node *node) 2124062d050SMatthew Dillon { 2134062d050SMatthew Dillon cdev_t dev; 2144062d050SMatthew Dillon 2154062d050SMatthew Dillon if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 2164062d050SMatthew Dillon node->uid = dev->si_uid; 2174062d050SMatthew Dillon node->gid = dev->si_gid; 2184062d050SMatthew Dillon node->mode = dev->si_perms; 2194062d050SMatthew Dillon } 2204062d050SMatthew Dillon } 2214062d050SMatthew Dillon 2224062d050SMatthew Dillon static __inline void 2234062d050SMatthew Dillon node_sync_dev_set(struct devfs_node *node) 2244062d050SMatthew Dillon { 2254062d050SMatthew Dillon cdev_t dev; 2264062d050SMatthew Dillon 2274062d050SMatthew Dillon if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 2284062d050SMatthew Dillon dev->si_uid = node->uid; 2294062d050SMatthew Dillon dev->si_gid = node->gid; 2304062d050SMatthew Dillon dev->si_perms = node->mode; 2314062d050SMatthew Dillon } 2324062d050SMatthew Dillon } 23321864bc5SMatthew Dillon 23421864bc5SMatthew Dillon /* 23521864bc5SMatthew Dillon * generic entry point for unsupported operations 23621864bc5SMatthew Dillon */ 23721864bc5SMatthew Dillon static int 2389f889dc4SMatthew Dillon devfs_vop_badop(struct vop_generic_args *ap) 23921864bc5SMatthew Dillon { 24021864bc5SMatthew Dillon return (EIO); 24121864bc5SMatthew Dillon } 24221864bc5SMatthew Dillon 24321864bc5SMatthew Dillon 24421864bc5SMatthew Dillon static int 2459f889dc4SMatthew Dillon devfs_vop_access(struct vop_access_args *ap) 24621864bc5SMatthew Dillon { 24721864bc5SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 248898c91eeSMatthew Dillon int error; 24921864bc5SMatthew Dillon 250894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 251894bbb25SAlex Hornung return ENOENT; 2524062d050SMatthew Dillon node_sync_dev_get(node); 25321864bc5SMatthew Dillon error = vop_helper_access(ap, node->uid, node->gid, 25421864bc5SMatthew Dillon node->mode, node->flags); 25521864bc5SMatthew Dillon 25621864bc5SMatthew Dillon return error; 25721864bc5SMatthew Dillon } 25821864bc5SMatthew Dillon 25921864bc5SMatthew Dillon 26021864bc5SMatthew Dillon static int 2619f889dc4SMatthew Dillon devfs_vop_inactive(struct vop_inactive_args *ap) 26221864bc5SMatthew Dillon { 263ca8d7677SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 26421864bc5SMatthew Dillon 265ca8d7677SMatthew Dillon if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0) 26621864bc5SMatthew Dillon vrecycle(ap->a_vp); 26721864bc5SMatthew Dillon return 0; 26821864bc5SMatthew Dillon } 26921864bc5SMatthew Dillon 27021864bc5SMatthew Dillon 27121864bc5SMatthew Dillon static int 2729f889dc4SMatthew Dillon devfs_vop_reclaim(struct vop_reclaim_args *ap) 27321864bc5SMatthew Dillon { 274be6f2e86SMatthew Dillon struct devfs_node *node; 275be6f2e86SMatthew Dillon struct vnode *vp; 276be6f2e86SMatthew Dillon int locked; 277be6f2e86SMatthew Dillon 278be6f2e86SMatthew Dillon /* 279be6f2e86SMatthew Dillon * Check if it is locked already. if not, we acquire the devfs lock 280be6f2e86SMatthew Dillon */ 281ab08ac79SSascha Wildner if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 28221864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 28321864bc5SMatthew Dillon locked = 1; 284be6f2e86SMatthew Dillon } else { 285be6f2e86SMatthew Dillon locked = 0; 28621864bc5SMatthew Dillon } 28721864bc5SMatthew Dillon 288be6f2e86SMatthew Dillon /* 289be6f2e86SMatthew Dillon * Get rid of the devfs_node if it is no longer linked into the 290be6f2e86SMatthew Dillon * topology. 291be6f2e86SMatthew Dillon */ 292be6f2e86SMatthew Dillon vp = ap->a_vp; 293be6f2e86SMatthew Dillon if ((node = DEVFS_NODE(vp)) != NULL) { 294be6f2e86SMatthew Dillon node->v_node = NULL; 2954062d050SMatthew Dillon if ((node->flags & DEVFS_NODE_LINKED) == 0) 2964062d050SMatthew Dillon devfs_freep(node); 29721864bc5SMatthew Dillon } 29821864bc5SMatthew Dillon 29921864bc5SMatthew Dillon if (locked) 30021864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 30121864bc5SMatthew Dillon 302be6f2e86SMatthew Dillon /* 3039b823501SAlex Hornung * v_rdev needs to be properly released using v_release_rdev 3049b823501SAlex Hornung * Make sure v_data is NULL as well. 305be6f2e86SMatthew Dillon */ 306be6f2e86SMatthew Dillon vp->v_data = NULL; 3079b823501SAlex Hornung v_release_rdev(vp); 30821864bc5SMatthew Dillon return 0; 30921864bc5SMatthew Dillon } 31021864bc5SMatthew Dillon 31121864bc5SMatthew Dillon 31221864bc5SMatthew Dillon static int 3139f889dc4SMatthew Dillon devfs_vop_readdir(struct vop_readdir_args *ap) 31421864bc5SMatthew Dillon { 315898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_vp); 31621864bc5SMatthew Dillon struct devfs_node *node; 31721864bc5SMatthew Dillon int cookie_index; 31821864bc5SMatthew Dillon int ncookies; 319898c91eeSMatthew Dillon int error2; 320898c91eeSMatthew Dillon int error; 321898c91eeSMatthew Dillon int r; 32221864bc5SMatthew Dillon off_t *cookies; 32321864bc5SMatthew Dillon off_t saveoff; 32421864bc5SMatthew Dillon 32521864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n"); 32621864bc5SMatthew Dillon 32721864bc5SMatthew Dillon if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX) 32821864bc5SMatthew Dillon return (EINVAL); 329*b458d1abSMatthew Dillon error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY | LK_FAILRECLAIM); 330*b458d1abSMatthew Dillon if (error) 33121864bc5SMatthew Dillon return (error); 33221864bc5SMatthew Dillon 333c512ab96SMatthew Dillon if (!devfs_node_is_accessible(dnode)) { 334c512ab96SMatthew Dillon vn_unlock(ap->a_vp); 335ca8d7677SMatthew Dillon return ENOENT; 336c512ab96SMatthew Dillon } 337ca8d7677SMatthew Dillon 338ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 339ca8d7677SMatthew Dillon 34021864bc5SMatthew Dillon saveoff = ap->a_uio->uio_offset; 34121864bc5SMatthew Dillon 34221864bc5SMatthew Dillon if (ap->a_ncookies) { 34321864bc5SMatthew Dillon ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */ 34421864bc5SMatthew Dillon if (ncookies > 256) 34521864bc5SMatthew Dillon ncookies = 256; 34621864bc5SMatthew Dillon cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK); 34721864bc5SMatthew Dillon cookie_index = 0; 34821864bc5SMatthew Dillon } else { 34921864bc5SMatthew Dillon ncookies = -1; 35021864bc5SMatthew Dillon cookies = NULL; 35121864bc5SMatthew Dillon cookie_index = 0; 35221864bc5SMatthew Dillon } 35321864bc5SMatthew Dillon 354898c91eeSMatthew Dillon nanotime(&dnode->atime); 35521864bc5SMatthew Dillon 35621864bc5SMatthew Dillon if (saveoff == 0) { 357898c91eeSMatthew Dillon r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, 358898c91eeSMatthew Dillon DT_DIR, 1, "."); 35921864bc5SMatthew Dillon if (r) 36021864bc5SMatthew Dillon goto done; 36121864bc5SMatthew Dillon if (cookies) 36221864bc5SMatthew Dillon cookies[cookie_index] = saveoff; 36321864bc5SMatthew Dillon saveoff++; 36421864bc5SMatthew Dillon cookie_index++; 36521864bc5SMatthew Dillon if (cookie_index == ncookies) 36621864bc5SMatthew Dillon goto done; 36721864bc5SMatthew Dillon } 36821864bc5SMatthew Dillon 36921864bc5SMatthew Dillon if (saveoff == 1) { 370898c91eeSMatthew Dillon if (dnode->parent) { 37121864bc5SMatthew Dillon r = vop_write_dirent(&error, ap->a_uio, 372898c91eeSMatthew Dillon dnode->parent->d_dir.d_ino, 37321864bc5SMatthew Dillon DT_DIR, 2, ".."); 37421864bc5SMatthew Dillon } else { 37521864bc5SMatthew Dillon r = vop_write_dirent(&error, ap->a_uio, 376898c91eeSMatthew Dillon dnode->d_dir.d_ino, 377898c91eeSMatthew Dillon DT_DIR, 2, ".."); 37821864bc5SMatthew Dillon } 37921864bc5SMatthew Dillon if (r) 38021864bc5SMatthew Dillon goto done; 38121864bc5SMatthew Dillon if (cookies) 38221864bc5SMatthew Dillon cookies[cookie_index] = saveoff; 38321864bc5SMatthew Dillon saveoff++; 38421864bc5SMatthew Dillon cookie_index++; 38521864bc5SMatthew Dillon if (cookie_index == ncookies) 38621864bc5SMatthew Dillon goto done; 38721864bc5SMatthew Dillon } 38821864bc5SMatthew Dillon 389898c91eeSMatthew Dillon TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 390898c91eeSMatthew Dillon if ((node->flags & DEVFS_HIDDEN) || 391898c91eeSMatthew Dillon (node->flags & DEVFS_INVISIBLE)) { 39221864bc5SMatthew Dillon continue; 393898c91eeSMatthew Dillon } 39421864bc5SMatthew Dillon 395f7e8960cSAlex Hornung /* 3969f889dc4SMatthew Dillon * If the node type is a valid devfs alias, then we make 3979f889dc4SMatthew Dillon * sure that the target isn't hidden. If it is, we don't 3989f889dc4SMatthew Dillon * show the link in the directory listing. 399f7e8960cSAlex Hornung */ 4008e78a293SSascha Wildner if ((node->node_type == Nlink) && (node->link_target != NULL) && 401f7e8960cSAlex Hornung (node->link_target->flags & DEVFS_HIDDEN)) 402f7e8960cSAlex Hornung continue; 403f7e8960cSAlex Hornung 40421864bc5SMatthew Dillon if (node->cookie < saveoff) 40521864bc5SMatthew Dillon continue; 406f7e8960cSAlex Hornung 40721864bc5SMatthew Dillon saveoff = node->cookie; 40821864bc5SMatthew Dillon 409898c91eeSMatthew Dillon error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino, 410898c91eeSMatthew Dillon node->d_dir.d_type, 411898c91eeSMatthew Dillon node->d_dir.d_namlen, 412898c91eeSMatthew Dillon node->d_dir.d_name); 41321864bc5SMatthew Dillon 41421864bc5SMatthew Dillon if (error2) 41521864bc5SMatthew Dillon break; 41621864bc5SMatthew Dillon 41721864bc5SMatthew Dillon saveoff++; 41821864bc5SMatthew Dillon 41921864bc5SMatthew Dillon if (cookies) 42021864bc5SMatthew Dillon cookies[cookie_index] = node->cookie; 42121864bc5SMatthew Dillon ++cookie_index; 42221864bc5SMatthew Dillon if (cookie_index == ncookies) 42321864bc5SMatthew Dillon break; 42421864bc5SMatthew Dillon } 42521864bc5SMatthew Dillon 42621864bc5SMatthew Dillon done: 427ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 42821864bc5SMatthew Dillon vn_unlock(ap->a_vp); 42921864bc5SMatthew Dillon 43021864bc5SMatthew Dillon ap->a_uio->uio_offset = saveoff; 43121864bc5SMatthew Dillon if (error && cookie_index == 0) { 43221864bc5SMatthew Dillon if (cookies) { 43321864bc5SMatthew Dillon kfree(cookies, M_TEMP); 43421864bc5SMatthew Dillon *ap->a_ncookies = 0; 43521864bc5SMatthew Dillon *ap->a_cookies = NULL; 43621864bc5SMatthew Dillon } 43721864bc5SMatthew Dillon } else { 43821864bc5SMatthew Dillon if (cookies) { 43921864bc5SMatthew Dillon *ap->a_ncookies = cookie_index; 44021864bc5SMatthew Dillon *ap->a_cookies = cookies; 44121864bc5SMatthew Dillon } 44221864bc5SMatthew Dillon } 44321864bc5SMatthew Dillon return (error); 44421864bc5SMatthew Dillon } 44521864bc5SMatthew Dillon 44621864bc5SMatthew Dillon 44721864bc5SMatthew Dillon static int 4489f889dc4SMatthew Dillon devfs_vop_nresolve(struct vop_nresolve_args *ap) 44921864bc5SMatthew Dillon { 450898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 45121864bc5SMatthew Dillon struct devfs_node *node, *found = NULL; 45221864bc5SMatthew Dillon struct namecache *ncp; 45321864bc5SMatthew Dillon struct vnode *vp = NULL; 45421864bc5SMatthew Dillon int error = 0; 45521864bc5SMatthew Dillon int len; 456260e4e8bSAlex Hornung int depth; 45721864bc5SMatthew Dillon 45821864bc5SMatthew Dillon ncp = ap->a_nch->ncp; 45921864bc5SMatthew Dillon len = ncp->nc_nlen; 46021864bc5SMatthew Dillon 461898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 462ca8d7677SMatthew Dillon return ENOENT; 463ca8d7677SMatthew Dillon 46421864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 46521864bc5SMatthew Dillon 4668e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) { 467e23485a5SMatthew Dillon error = ENOENT; 46821864bc5SMatthew Dillon cache_setvp(ap->a_nch, NULL); 46921864bc5SMatthew Dillon goto out; 47021864bc5SMatthew Dillon } 47121864bc5SMatthew Dillon 472898c91eeSMatthew Dillon TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 47321864bc5SMatthew Dillon if (len == node->d_dir.d_namlen) { 47421864bc5SMatthew Dillon if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { 47521864bc5SMatthew Dillon found = node; 47621864bc5SMatthew Dillon break; 47721864bc5SMatthew Dillon } 47821864bc5SMatthew Dillon } 47921864bc5SMatthew Dillon } 48021864bc5SMatthew Dillon 48121864bc5SMatthew Dillon if (found) { 482260e4e8bSAlex Hornung depth = 0; 4838e78a293SSascha Wildner while ((found->node_type == Nlink) && (found->link_target)) { 484260e4e8bSAlex Hornung if (depth >= 8) { 485260e4e8bSAlex Hornung devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 486260e4e8bSAlex Hornung break; 487260e4e8bSAlex Hornung } 488260e4e8bSAlex Hornung 48921864bc5SMatthew Dillon found = found->link_target; 490260e4e8bSAlex Hornung ++depth; 491260e4e8bSAlex Hornung } 49221864bc5SMatthew Dillon 49321864bc5SMatthew Dillon if (!(found->flags & DEVFS_HIDDEN)) 49421864bc5SMatthew Dillon devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); 49521864bc5SMatthew Dillon } 49621864bc5SMatthew Dillon 49721864bc5SMatthew Dillon if (vp == NULL) { 49821864bc5SMatthew Dillon error = ENOENT; 49921864bc5SMatthew Dillon cache_setvp(ap->a_nch, NULL); 50021864bc5SMatthew Dillon goto out; 50121864bc5SMatthew Dillon 50221864bc5SMatthew Dillon } 50321864bc5SMatthew Dillon KKASSERT(vp); 50421864bc5SMatthew Dillon vn_unlock(vp); 50521864bc5SMatthew Dillon cache_setvp(ap->a_nch, vp); 50621864bc5SMatthew Dillon vrele(vp); 50721864bc5SMatthew Dillon out: 50821864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 509898c91eeSMatthew Dillon 51021864bc5SMatthew Dillon return error; 51121864bc5SMatthew Dillon } 51221864bc5SMatthew Dillon 51321864bc5SMatthew Dillon 51421864bc5SMatthew Dillon static int 5159f889dc4SMatthew Dillon devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 51621864bc5SMatthew Dillon { 517898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 51821864bc5SMatthew Dillon 519898c91eeSMatthew Dillon *ap->a_vpp = NULL; 520898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 521894bbb25SAlex Hornung return ENOENT; 522894bbb25SAlex Hornung 52321864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 524898c91eeSMatthew Dillon if (dnode->parent != NULL) { 525898c91eeSMatthew Dillon devfs_allocv(ap->a_vpp, dnode->parent); 52621864bc5SMatthew Dillon vn_unlock(*ap->a_vpp); 52721864bc5SMatthew Dillon } 52821864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 52921864bc5SMatthew Dillon 53021864bc5SMatthew Dillon return ((*ap->a_vpp == NULL) ? ENOENT : 0); 53121864bc5SMatthew Dillon } 53221864bc5SMatthew Dillon 53321864bc5SMatthew Dillon 53421864bc5SMatthew Dillon static int 5359f889dc4SMatthew Dillon devfs_vop_getattr(struct vop_getattr_args *ap) 53621864bc5SMatthew Dillon { 53721864bc5SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 538898c91eeSMatthew Dillon struct vattr *vap = ap->a_vap; 5392d076755SAlex Hornung struct partinfo pinfo; 54021864bc5SMatthew Dillon int error = 0; 54121864bc5SMatthew Dillon 542952f0188SAlex Hornung #if 0 543894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 544ca8d7677SMatthew Dillon return ENOENT; 545952f0188SAlex Hornung #endif 5464062d050SMatthew Dillon node_sync_dev_get(node); 547ca8d7677SMatthew Dillon 548ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 54921864bc5SMatthew Dillon 55021864bc5SMatthew Dillon /* start by zeroing out the attributes */ 55121864bc5SMatthew Dillon VATTR_NULL(vap); 55221864bc5SMatthew Dillon 55321864bc5SMatthew Dillon /* next do all the common fields */ 55421864bc5SMatthew Dillon vap->va_type = ap->a_vp->v_type; 55521864bc5SMatthew Dillon vap->va_mode = node->mode; 55621864bc5SMatthew Dillon vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ; 557a3c7ebb9SMatthew Dillon vap->va_flags = 0; 55821864bc5SMatthew Dillon vap->va_blocksize = DEV_BSIZE; 559a3c7ebb9SMatthew Dillon vap->va_bytes = vap->va_size = 0; 56021864bc5SMatthew Dillon 56121864bc5SMatthew Dillon vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 56221864bc5SMatthew Dillon 56321864bc5SMatthew Dillon vap->va_atime = node->atime; 56421864bc5SMatthew Dillon vap->va_mtime = node->mtime; 56521864bc5SMatthew Dillon vap->va_ctime = node->ctime; 56621864bc5SMatthew Dillon 56721864bc5SMatthew Dillon vap->va_nlink = 1; /* number of references to file */ 56821864bc5SMatthew Dillon 56921864bc5SMatthew Dillon vap->va_uid = node->uid; 57021864bc5SMatthew Dillon vap->va_gid = node->gid; 57121864bc5SMatthew Dillon 57221864bc5SMatthew Dillon vap->va_rmajor = 0; 57321864bc5SMatthew Dillon vap->va_rminor = 0; 57421864bc5SMatthew Dillon 5758e78a293SSascha Wildner if ((node->node_type == Ndev) && node->d_dev) { 576898c91eeSMatthew Dillon reference_dev(node->d_dev); 577898c91eeSMatthew Dillon vap->va_rminor = node->d_dev->si_uminor; 578898c91eeSMatthew Dillon release_dev(node->d_dev); 57921864bc5SMatthew Dillon } 58021864bc5SMatthew Dillon 58121864bc5SMatthew Dillon /* For a softlink the va_size is the length of the softlink */ 582898c91eeSMatthew Dillon if (node->symlink_name != 0) { 5832d076755SAlex Hornung vap->va_bytes = vap->va_size = node->symlink_namelen; 58421864bc5SMatthew Dillon } 5852d076755SAlex Hornung 5862d076755SAlex Hornung /* 5872d076755SAlex Hornung * For a disk-type device, va_size is the size of the underlying 5882d076755SAlex Hornung * device, so that lseek() works properly. 5892d076755SAlex Hornung */ 5902d076755SAlex Hornung if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) { 5912d076755SAlex Hornung bzero(&pinfo, sizeof(pinfo)); 5922d076755SAlex Hornung error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo, 5932d076755SAlex Hornung 0, proc0.p_ucred, NULL); 5942d076755SAlex Hornung if ((error == 0) && (pinfo.media_blksize != 0)) { 5952d076755SAlex Hornung vap->va_size = pinfo.media_size; 5962d076755SAlex Hornung } else { 5972d076755SAlex Hornung vap->va_size = 0; 5982d076755SAlex Hornung error = 0; 5992d076755SAlex Hornung } 6002d076755SAlex Hornung } 6012d076755SAlex Hornung 602ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 603898c91eeSMatthew Dillon 604894bbb25SAlex Hornung return (error); 60521864bc5SMatthew Dillon } 60621864bc5SMatthew Dillon 60721864bc5SMatthew Dillon 60821864bc5SMatthew Dillon static int 6099f889dc4SMatthew Dillon devfs_vop_setattr(struct vop_setattr_args *ap) 61021864bc5SMatthew Dillon { 611898c91eeSMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 61221864bc5SMatthew Dillon struct vattr *vap; 613dffaed1bSAlex Hornung uid_t cur_uid; 614dffaed1bSAlex Hornung gid_t cur_gid; 615dffaed1bSAlex Hornung mode_t cur_mode; 61621864bc5SMatthew Dillon int error = 0; 61721864bc5SMatthew Dillon 618894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 619ca8d7677SMatthew Dillon return ENOENT; 6204062d050SMatthew Dillon node_sync_dev_get(node); 62121864bc5SMatthew Dillon 62221864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 62321864bc5SMatthew Dillon 62421864bc5SMatthew Dillon vap = ap->a_vap; 62521864bc5SMatthew Dillon 626dffaed1bSAlex Hornung if ((vap->va_uid != (uid_t)VNOVAL) || (vap->va_gid != (gid_t)VNOVAL)) { 627dffaed1bSAlex Hornung cur_uid = node->uid; 628dffaed1bSAlex Hornung cur_gid = node->gid; 629dffaed1bSAlex Hornung cur_mode = node->mode; 630dffaed1bSAlex Hornung error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 631dffaed1bSAlex Hornung ap->a_cred, &cur_uid, &cur_gid, &cur_mode); 632898c91eeSMatthew Dillon if (error) 63321864bc5SMatthew Dillon goto out; 63421864bc5SMatthew Dillon 635dffaed1bSAlex Hornung if (node->uid != cur_uid || node->gid != cur_gid) { 636dffaed1bSAlex Hornung node->uid = cur_uid; 637dffaed1bSAlex Hornung node->gid = cur_gid; 638dffaed1bSAlex Hornung node->mode = cur_mode; 63921864bc5SMatthew Dillon } 64021864bc5SMatthew Dillon } 64121864bc5SMatthew Dillon 64221864bc5SMatthew Dillon if (vap->va_mode != (mode_t)VNOVAL) { 643dffaed1bSAlex Hornung cur_mode = node->mode; 644dffaed1bSAlex Hornung error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 645dffaed1bSAlex Hornung node->uid, node->gid, &cur_mode); 646dffaed1bSAlex Hornung if (error == 0 && node->mode != cur_mode) { 647dffaed1bSAlex Hornung node->mode = cur_mode; 64821864bc5SMatthew Dillon } 64921864bc5SMatthew Dillon } 65021864bc5SMatthew Dillon 65121864bc5SMatthew Dillon out: 6524062d050SMatthew Dillon node_sync_dev_set(node); 65307dfa375SAlex Hornung nanotime(&node->ctime); 65421864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 655898c91eeSMatthew Dillon 65621864bc5SMatthew Dillon return error; 65721864bc5SMatthew Dillon } 65821864bc5SMatthew Dillon 65921864bc5SMatthew Dillon 66021864bc5SMatthew Dillon static int 6619f889dc4SMatthew Dillon devfs_vop_readlink(struct vop_readlink_args *ap) 66221864bc5SMatthew Dillon { 66321864bc5SMatthew Dillon struct devfs_node *node = DEVFS_NODE(ap->a_vp); 664ca8d7677SMatthew Dillon int ret; 665ca8d7677SMatthew Dillon 666894bbb25SAlex Hornung if (!devfs_node_is_accessible(node)) 667ca8d7677SMatthew Dillon return ENOENT; 66821864bc5SMatthew Dillon 669ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 670ca8d7677SMatthew Dillon ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio); 671ca8d7677SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 672ca8d7677SMatthew Dillon 673ca8d7677SMatthew Dillon return ret; 67421864bc5SMatthew Dillon } 67521864bc5SMatthew Dillon 67621864bc5SMatthew Dillon 67721864bc5SMatthew Dillon static int 6789f889dc4SMatthew Dillon devfs_vop_print(struct vop_print_args *ap) 67921864bc5SMatthew Dillon { 68021864bc5SMatthew Dillon return (0); 68121864bc5SMatthew Dillon } 68221864bc5SMatthew Dillon 68339a08947SAlex Hornung static int 6849f889dc4SMatthew Dillon devfs_vop_nmkdir(struct vop_nmkdir_args *ap) 68539a08947SAlex Hornung { 68639a08947SAlex Hornung struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 68739a08947SAlex Hornung struct devfs_node *node; 68839a08947SAlex Hornung 68939a08947SAlex Hornung if (!devfs_node_is_accessible(dnode)) 69039a08947SAlex Hornung return ENOENT; 69139a08947SAlex Hornung 6928e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 69339a08947SAlex Hornung goto out; 69439a08947SAlex Hornung 69539a08947SAlex Hornung lockmgr(&devfs_lock, LK_EXCLUSIVE); 6968e78a293SSascha Wildner devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Ndir, 69739a08947SAlex Hornung ap->a_nch->ncp->nc_name, dnode, NULL); 69839a08947SAlex Hornung 69939a08947SAlex Hornung if (*ap->a_vpp) { 70039a08947SAlex Hornung node = DEVFS_NODE(*ap->a_vpp); 70139a08947SAlex Hornung node->flags |= DEVFS_USER_CREATED; 70239a08947SAlex Hornung cache_setunresolved(ap->a_nch); 70339a08947SAlex Hornung cache_setvp(ap->a_nch, *ap->a_vpp); 70439a08947SAlex Hornung } 70539a08947SAlex Hornung lockmgr(&devfs_lock, LK_RELEASE); 70639a08947SAlex Hornung out: 70739a08947SAlex Hornung return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 70839a08947SAlex Hornung } 70921864bc5SMatthew Dillon 71021864bc5SMatthew Dillon static int 7119f889dc4SMatthew Dillon devfs_vop_nsymlink(struct vop_nsymlink_args *ap) 71221864bc5SMatthew Dillon { 713898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 714898c91eeSMatthew Dillon struct devfs_node *node; 715898c91eeSMatthew Dillon size_t targetlen; 71621864bc5SMatthew Dillon 717898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 718ca8d7677SMatthew Dillon return ENOENT; 719ca8d7677SMatthew Dillon 720894bbb25SAlex Hornung ap->a_vap->va_type = VLNK; 721894bbb25SAlex Hornung 7228e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 72321864bc5SMatthew Dillon goto out; 724898c91eeSMatthew Dillon 72521864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 7268e78a293SSascha Wildner devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Nlink, 727898c91eeSMatthew Dillon ap->a_nch->ncp->nc_name, dnode, NULL); 72821864bc5SMatthew Dillon 729898c91eeSMatthew Dillon targetlen = strlen(ap->a_target); 73021864bc5SMatthew Dillon if (*ap->a_vpp) { 731898c91eeSMatthew Dillon node = DEVFS_NODE(*ap->a_vpp); 732898c91eeSMatthew Dillon node->flags |= DEVFS_USER_CREATED; 733898c91eeSMatthew Dillon node->symlink_namelen = targetlen; 734898c91eeSMatthew Dillon node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK); 735898c91eeSMatthew Dillon memcpy(node->symlink_name, ap->a_target, targetlen); 736898c91eeSMatthew Dillon node->symlink_name[targetlen] = '\0'; 73721864bc5SMatthew Dillon cache_setunresolved(ap->a_nch); 73821864bc5SMatthew Dillon cache_setvp(ap->a_nch, *ap->a_vpp); 73921864bc5SMatthew Dillon } 74021864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 74121864bc5SMatthew Dillon out: 74221864bc5SMatthew Dillon return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 74321864bc5SMatthew Dillon } 74421864bc5SMatthew Dillon 74539a08947SAlex Hornung static int 7469f889dc4SMatthew Dillon devfs_vop_nrmdir(struct vop_nrmdir_args *ap) 74739a08947SAlex Hornung { 74839a08947SAlex Hornung struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 74939a08947SAlex Hornung struct devfs_node *node; 75039a08947SAlex Hornung struct namecache *ncp; 75139a08947SAlex Hornung int error = ENOENT; 75239a08947SAlex Hornung 75339a08947SAlex Hornung ncp = ap->a_nch->ncp; 75439a08947SAlex Hornung 75539a08947SAlex Hornung if (!devfs_node_is_accessible(dnode)) 75639a08947SAlex Hornung return ENOENT; 75739a08947SAlex Hornung 75839a08947SAlex Hornung lockmgr(&devfs_lock, LK_EXCLUSIVE); 75939a08947SAlex Hornung 7608e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 76139a08947SAlex Hornung goto out; 76239a08947SAlex Hornung 76339a08947SAlex Hornung TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 76439a08947SAlex Hornung if (ncp->nc_nlen != node->d_dir.d_namlen) 76539a08947SAlex Hornung continue; 76639a08947SAlex Hornung if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 76739a08947SAlex Hornung continue; 76839a08947SAlex Hornung 76939a08947SAlex Hornung /* 77039a08947SAlex Hornung * only allow removal of user created dirs 77139a08947SAlex Hornung */ 77239a08947SAlex Hornung if ((node->flags & DEVFS_USER_CREATED) == 0) { 77339a08947SAlex Hornung error = EPERM; 77439a08947SAlex Hornung goto out; 7758e78a293SSascha Wildner } else if (node->node_type != Ndir) { 77639a08947SAlex Hornung error = ENOTDIR; 77739a08947SAlex Hornung goto out; 77839a08947SAlex Hornung } else if (node->nchildren > 2) { 77939a08947SAlex Hornung error = ENOTEMPTY; 78039a08947SAlex Hornung goto out; 78139a08947SAlex Hornung } else { 78239a08947SAlex Hornung if (node->v_node) 78339a08947SAlex Hornung cache_inval_vp(node->v_node, CINV_DESTROY); 78439a08947SAlex Hornung devfs_unlinkp(node); 78539a08947SAlex Hornung error = 0; 78639a08947SAlex Hornung break; 78739a08947SAlex Hornung } 78839a08947SAlex Hornung } 78939a08947SAlex Hornung 7905323ed62SMatthew Dillon cache_unlink(ap->a_nch); 79139a08947SAlex Hornung out: 79239a08947SAlex Hornung lockmgr(&devfs_lock, LK_RELEASE); 79339a08947SAlex Hornung return error; 79439a08947SAlex Hornung } 79521864bc5SMatthew Dillon 79621864bc5SMatthew Dillon static int 7979f889dc4SMatthew Dillon devfs_vop_nremove(struct vop_nremove_args *ap) 79821864bc5SMatthew Dillon { 799898c91eeSMatthew Dillon struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 80021864bc5SMatthew Dillon struct devfs_node *node; 80121864bc5SMatthew Dillon struct namecache *ncp; 80221864bc5SMatthew Dillon int error = ENOENT; 80321864bc5SMatthew Dillon 80421864bc5SMatthew Dillon ncp = ap->a_nch->ncp; 80521864bc5SMatthew Dillon 806898c91eeSMatthew Dillon if (!devfs_node_is_accessible(dnode)) 807ca8d7677SMatthew Dillon return ENOENT; 808ca8d7677SMatthew Dillon 80921864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 81021864bc5SMatthew Dillon 8118e78a293SSascha Wildner if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 81221864bc5SMatthew Dillon goto out; 81321864bc5SMatthew Dillon 814898c91eeSMatthew Dillon TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 815898c91eeSMatthew Dillon if (ncp->nc_nlen != node->d_dir.d_namlen) 816898c91eeSMatthew Dillon continue; 817898c91eeSMatthew Dillon if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 818898c91eeSMatthew Dillon continue; 819898c91eeSMatthew Dillon 820898c91eeSMatthew Dillon /* 821898c91eeSMatthew Dillon * only allow removal of user created stuff (e.g. symlinks) 822898c91eeSMatthew Dillon */ 82321864bc5SMatthew Dillon if ((node->flags & DEVFS_USER_CREATED) == 0) { 82421864bc5SMatthew Dillon error = EPERM; 82521864bc5SMatthew Dillon goto out; 8268e78a293SSascha Wildner } else if (node->node_type == Ndir) { 82739a08947SAlex Hornung error = EISDIR; 82839a08947SAlex Hornung goto out; 82921864bc5SMatthew Dillon } else { 83021864bc5SMatthew Dillon if (node->v_node) 83121864bc5SMatthew Dillon cache_inval_vp(node->v_node, CINV_DESTROY); 83221864bc5SMatthew Dillon devfs_unlinkp(node); 83321864bc5SMatthew Dillon error = 0; 83421864bc5SMatthew Dillon break; 83521864bc5SMatthew Dillon } 83621864bc5SMatthew Dillon } 83721864bc5SMatthew Dillon 8385323ed62SMatthew Dillon cache_unlink(ap->a_nch); 83921864bc5SMatthew Dillon out: 84021864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 84121864bc5SMatthew Dillon return error; 84221864bc5SMatthew Dillon } 84321864bc5SMatthew Dillon 84421864bc5SMatthew Dillon 84521864bc5SMatthew Dillon static int 84621864bc5SMatthew Dillon devfs_spec_open(struct vop_open_args *ap) 84721864bc5SMatthew Dillon { 84821864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 849ca8d7677SMatthew Dillon struct vnode *orig_vp = NULL; 850898c91eeSMatthew Dillon struct devfs_node *node = DEVFS_NODE(vp); 851898c91eeSMatthew Dillon struct devfs_node *newnode; 85221864bc5SMatthew Dillon cdev_t dev, ndev = NULL; 85321864bc5SMatthew Dillon int error = 0; 85421864bc5SMatthew Dillon 855898c91eeSMatthew Dillon if (node) { 856898c91eeSMatthew Dillon if (node->d_dev == NULL) 85721864bc5SMatthew Dillon return ENXIO; 858898c91eeSMatthew Dillon if (!devfs_node_is_accessible(node)) 859894bbb25SAlex Hornung return ENOENT; 86021864bc5SMatthew Dillon } 86121864bc5SMatthew Dillon 86221864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) 86321864bc5SMatthew Dillon return ENXIO; 86421864bc5SMatthew Dillon 86512cdc371SMatthew Dillon vn_lock(vp, LK_UPGRADE | LK_RETRY); 86612cdc371SMatthew Dillon 867898c91eeSMatthew Dillon if (node && ap->a_fp) { 86821864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); 86921864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_EXCLUSIVE); 87007dfa375SAlex Hornung 87112cdc371SMatthew Dillon ndev = devfs_clone(dev, node->d_dir.d_name, 87212cdc371SMatthew Dillon node->d_dir.d_namlen, 87307dfa375SAlex Hornung ap->a_mode, ap->a_cred); 87407dfa375SAlex Hornung if (ndev != NULL) { 875898c91eeSMatthew Dillon newnode = devfs_create_device_node( 876898c91eeSMatthew Dillon DEVFS_MNTDATA(vp->v_mount)->root_node, 87707dfa375SAlex Hornung ndev, NULL, NULL); 87807dfa375SAlex Hornung /* XXX: possibly destroy device if this happens */ 87907dfa375SAlex Hornung 88007dfa375SAlex Hornung if (newnode != NULL) { 88107dfa375SAlex Hornung dev = ndev; 88207dfa375SAlex Hornung devfs_link_dev(dev); 88321864bc5SMatthew Dillon 884898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 885898c91eeSMatthew Dillon "parent here is: %s, node is: |%s|\n", 8868e78a293SSascha Wildner ((node->parent->node_type == Nroot) ? 887898c91eeSMatthew Dillon "ROOT!" : node->parent->d_dir.d_name), 888898c91eeSMatthew Dillon newnode->d_dir.d_name); 889898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 890898c91eeSMatthew Dillon "test: %s\n", 891898c91eeSMatthew Dillon ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); 89221864bc5SMatthew Dillon 893ca8d7677SMatthew Dillon /* 894ca8d7677SMatthew Dillon * orig_vp is set to the original vp if we cloned. 895ca8d7677SMatthew Dillon */ 896ca8d7677SMatthew Dillon /* node->flags |= DEVFS_CLONED; */ 897898c91eeSMatthew Dillon devfs_allocv(&vp, newnode); 898ca8d7677SMatthew Dillon orig_vp = ap->a_vp; 89921864bc5SMatthew Dillon ap->a_vp = vp; 90021864bc5SMatthew Dillon } 90107dfa375SAlex Hornung } 90221864bc5SMatthew Dillon lockmgr(&devfs_lock, LK_RELEASE); 90321864bc5SMatthew Dillon } 90421864bc5SMatthew Dillon 905898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 906898c91eeSMatthew Dillon "devfs_spec_open() called on %s! \n", 907898c91eeSMatthew Dillon dev->si_name); 908898c91eeSMatthew Dillon 90921864bc5SMatthew Dillon /* 91021864bc5SMatthew Dillon * Make this field valid before any I/O in ->d_open 91121864bc5SMatthew Dillon */ 91221864bc5SMatthew Dillon if (!dev->si_iosize_max) 91395df18e4SFrançois Tigeot /* XXX: old DFLTPHYS == 64KB dependency */ 91495df18e4SFrançois Tigeot dev->si_iosize_max = min(MAXPHYS,64*1024); 91521864bc5SMatthew Dillon 91621864bc5SMatthew Dillon if (dev_dflags(dev) & D_TTY) 9172247fe02SMatthew Dillon vsetflags(vp, VISTTY); 91821864bc5SMatthew Dillon 9199f889dc4SMatthew Dillon /* 9209f889dc4SMatthew Dillon * Open underlying device 9219f889dc4SMatthew Dillon */ 92221864bc5SMatthew Dillon vn_unlock(vp); 92321864bc5SMatthew Dillon error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred); 92421864bc5SMatthew Dillon vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 92521864bc5SMatthew Dillon 926ca8d7677SMatthew Dillon /* 927ca8d7677SMatthew Dillon * Clean up any cloned vp if we error out. 928ca8d7677SMatthew Dillon */ 92921864bc5SMatthew Dillon if (error) { 930ca8d7677SMatthew Dillon if (orig_vp) { 93121864bc5SMatthew Dillon vput(vp); 932ca8d7677SMatthew Dillon ap->a_vp = orig_vp; 933ca8d7677SMatthew Dillon /* orig_vp = NULL; */ 934ca8d7677SMatthew Dillon } 93521864bc5SMatthew Dillon return error; 93621864bc5SMatthew Dillon } 93721864bc5SMatthew Dillon 938d894b0ebSAntonio Huete /* 939ddd7de82SAntonio Huete * This checks if the disk device is going to be opened for writing. 940ddd7de82SAntonio Huete * It will be only allowed in the cases where securelevel permits it 941ddd7de82SAntonio Huete * and it's not mounted R/W. 942d894b0ebSAntonio Huete */ 943ddd7de82SAntonio Huete if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && 944d894b0ebSAntonio Huete (ap->a_cred != FSCRED)) { 945ddd7de82SAntonio Huete 946ddd7de82SAntonio Huete /* Very secure mode. No open for writing allowed */ 947d894b0ebSAntonio Huete if (securelevel >= 2) 948d894b0ebSAntonio Huete return EPERM; 949ddd7de82SAntonio Huete 950ddd7de82SAntonio Huete /* 951ddd7de82SAntonio Huete * If it is mounted R/W, do not allow to open for writing. 952ddd7de82SAntonio Huete * In the case it's mounted read-only but securelevel 953ddd7de82SAntonio Huete * is >= 1, then do not allow opening for writing either. 954ddd7de82SAntonio Huete */ 955ddd7de82SAntonio Huete if (vfs_mountedon(vp)) { 956ddd7de82SAntonio Huete if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) 957ddd7de82SAntonio Huete return EBUSY; 958ddd7de82SAntonio Huete else if (securelevel >= 1) 959ddd7de82SAntonio Huete return EPERM; 960d894b0ebSAntonio Huete } 961d894b0ebSAntonio Huete } 96221864bc5SMatthew Dillon 96321864bc5SMatthew Dillon if (dev_dflags(dev) & D_TTY) { 96421864bc5SMatthew Dillon if (dev->si_tty) { 96521864bc5SMatthew Dillon struct tty *tp; 96621864bc5SMatthew Dillon tp = dev->si_tty; 96721864bc5SMatthew Dillon if (!tp->t_stop) { 968898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 969898c91eeSMatthew Dillon "devfs: no t_stop\n"); 97021864bc5SMatthew Dillon tp->t_stop = nottystop; 97121864bc5SMatthew Dillon } 97221864bc5SMatthew Dillon } 97321864bc5SMatthew Dillon } 97421864bc5SMatthew Dillon 97521864bc5SMatthew Dillon 97621864bc5SMatthew Dillon if (vn_isdisk(vp, NULL)) { 97721864bc5SMatthew Dillon if (!dev->si_bsize_phys) 97821864bc5SMatthew Dillon dev->si_bsize_phys = DEV_BSIZE; 979b0d18f7dSMatthew Dillon vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); 98021864bc5SMatthew Dillon } 98121864bc5SMatthew Dillon 98221864bc5SMatthew Dillon vop_stdopen(ap); 98307dfa375SAlex Hornung #if 0 984898c91eeSMatthew Dillon if (node) 985898c91eeSMatthew Dillon nanotime(&node->atime); 98607dfa375SAlex Hornung #endif 98721864bc5SMatthew Dillon 988b80b4c32SMatthew Dillon /* 989b80b4c32SMatthew Dillon * If we replaced the vp the vop_stdopen() call will have loaded 990b80b4c32SMatthew Dillon * it into fp->f_data and vref()d the vp, giving us two refs. So 991b80b4c32SMatthew Dillon * instead of just unlocking it here we have to vput() it. 992b80b4c32SMatthew Dillon */ 993ca8d7677SMatthew Dillon if (orig_vp) 994b80b4c32SMatthew Dillon vput(vp); 99521864bc5SMatthew Dillon 99621864bc5SMatthew Dillon /* Ugly pty magic, to make pty devices appear once they are opened */ 997898c91eeSMatthew Dillon if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) 998898c91eeSMatthew Dillon node->flags &= ~DEVFS_INVISIBLE; 99921864bc5SMatthew Dillon 100021864bc5SMatthew Dillon if (ap->a_fp) { 1001b80b4c32SMatthew Dillon KKASSERT(ap->a_fp->f_type == DTYPE_VNODE); 1002fb12e9a5SMatthew Dillon KKASSERT((ap->a_fp->f_flag & FMASK) == (ap->a_mode & FMASK)); 100321864bc5SMatthew Dillon ap->a_fp->f_ops = &devfs_dev_fileops; 1004b80b4c32SMatthew Dillon KKASSERT(ap->a_fp->f_data == (void *)vp); 100521864bc5SMatthew Dillon } 100621864bc5SMatthew Dillon 100721864bc5SMatthew Dillon return 0; 100821864bc5SMatthew Dillon } 100921864bc5SMatthew Dillon 101021864bc5SMatthew Dillon static int 101121864bc5SMatthew Dillon devfs_spec_close(struct vop_close_args *ap) 101221864bc5SMatthew Dillon { 101392fb0c6aSMatthew Dillon struct devfs_node *node; 101421864bc5SMatthew Dillon struct proc *p = curproc; 101521864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 101621864bc5SMatthew Dillon cdev_t dev = vp->v_rdev; 101721864bc5SMatthew Dillon int error = 0; 101821864bc5SMatthew Dillon int needrelock; 101921864bc5SMatthew Dillon 102012cdc371SMatthew Dillon /* 102112cdc371SMatthew Dillon * We do special tests on the opencount so unfortunately we need 102212cdc371SMatthew Dillon * an exclusive lock. 102312cdc371SMatthew Dillon */ 102412cdc371SMatthew Dillon vn_lock(vp, LK_UPGRADE | LK_RETRY); 102512cdc371SMatthew Dillon 102652f98df9SSamuel J. Greear if (dev) 1027898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1028898c91eeSMatthew Dillon "devfs_spec_close() called on %s! \n", 1029898c91eeSMatthew Dillon dev->si_name); 103052f98df9SSamuel J. Greear else 103152f98df9SSamuel J. Greear devfs_debug(DEVFS_DEBUG_DEBUG, 103252f98df9SSamuel J. Greear "devfs_spec_close() called, null vode!\n"); 103321864bc5SMatthew Dillon 103421864bc5SMatthew Dillon /* 103521864bc5SMatthew Dillon * A couple of hacks for devices and tty devices. The 103621864bc5SMatthew Dillon * vnode ref count cannot be used to figure out the 103721864bc5SMatthew Dillon * last close, but we can use v_opencount now that 103821864bc5SMatthew Dillon * revoke works properly. 103921864bc5SMatthew Dillon * 104021864bc5SMatthew Dillon * Detect the last close on a controlling terminal and clear 104121864bc5SMatthew Dillon * the session (half-close). 104221864bc5SMatthew Dillon */ 104321864bc5SMatthew Dillon if (dev) 104421864bc5SMatthew Dillon reference_dev(dev); 104521864bc5SMatthew Dillon 104621864bc5SMatthew Dillon if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) { 104721864bc5SMatthew Dillon p->p_session->s_ttyvp = NULL; 104821864bc5SMatthew Dillon vrele(vp); 104921864bc5SMatthew Dillon } 105021864bc5SMatthew Dillon 105121864bc5SMatthew Dillon /* 105221864bc5SMatthew Dillon * Vnodes can be opened and closed multiple times. Do not really 105321864bc5SMatthew Dillon * close the device unless (1) it is being closed forcibly, 105421864bc5SMatthew Dillon * (2) the device wants to track closes, or (3) this is the last 105521864bc5SMatthew Dillon * vnode doing its last close on the device. 105621864bc5SMatthew Dillon * 105721864bc5SMatthew Dillon * XXX the VXLOCK (force close) case can leave vnodes referencing 105821864bc5SMatthew Dillon * a closed device. This might not occur now that our revoke is 105921864bc5SMatthew Dillon * fixed. 106021864bc5SMatthew Dillon */ 106121864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n"); 106221864bc5SMatthew Dillon if (dev && ((vp->v_flag & VRECLAIMED) || 106321864bc5SMatthew Dillon (dev_dflags(dev) & D_TRACKCLOSE) || 106421864bc5SMatthew Dillon (vp->v_opencount == 1))) { 1065898c91eeSMatthew Dillon /* 106692fb0c6aSMatthew Dillon * Ugly pty magic, to make pty devices disappear again once 106792fb0c6aSMatthew Dillon * they are closed. 106892fb0c6aSMatthew Dillon */ 106992fb0c6aSMatthew Dillon node = DEVFS_NODE(ap->a_vp); 107092fb0c6aSMatthew Dillon if (node && (node->flags & DEVFS_PTY)) 107192fb0c6aSMatthew Dillon node->flags |= DEVFS_INVISIBLE; 107292fb0c6aSMatthew Dillon 107392fb0c6aSMatthew Dillon /* 1074495d3a1eSMatthew Dillon * Unlock around dev_dclose(), unless the vnode is 1075495d3a1eSMatthew Dillon * undergoing a vgone/reclaim (during umount). 1076898c91eeSMatthew Dillon */ 107721864bc5SMatthew Dillon needrelock = 0; 1078495d3a1eSMatthew Dillon if ((vp->v_flag & VRECLAIMED) == 0 && vn_islocked(vp)) { 107921864bc5SMatthew Dillon needrelock = 1; 108021864bc5SMatthew Dillon vn_unlock(vp); 108121864bc5SMatthew Dillon } 1082898c91eeSMatthew Dillon 1083898c91eeSMatthew Dillon /* 108492fb0c6aSMatthew Dillon * WARNING! If the device destroys itself the devfs node 108592fb0c6aSMatthew Dillon * can disappear here. 1086495d3a1eSMatthew Dillon * 1087495d3a1eSMatthew Dillon * WARNING! vn_lock() will fail if the vp is in a VRECLAIM, 1088495d3a1eSMatthew Dillon * which can occur during umount. 1089898c91eeSMatthew Dillon */ 109092fb0c6aSMatthew Dillon error = dev_dclose(dev, ap->a_fflag, S_IFCHR); 109192fb0c6aSMatthew Dillon /* node is now stale */ 109221864bc5SMatthew Dillon 1093495d3a1eSMatthew Dillon if (needrelock) { 1094*b458d1abSMatthew Dillon if (vn_lock(vp, LK_EXCLUSIVE | 1095*b458d1abSMatthew Dillon LK_RETRY | 1096*b458d1abSMatthew Dillon LK_FAILRECLAIM) != 0) { 1097495d3a1eSMatthew Dillon panic("devfs_spec_close: vnode %p " 1098495d3a1eSMatthew Dillon "unexpectedly could not be relocked", 1099495d3a1eSMatthew Dillon vp); 1100495d3a1eSMatthew Dillon } 1101495d3a1eSMatthew Dillon } 110221864bc5SMatthew Dillon } else { 110321864bc5SMatthew Dillon error = 0; 110421864bc5SMatthew Dillon } 110521864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n"); 1106898c91eeSMatthew Dillon 110721864bc5SMatthew Dillon /* 110821864bc5SMatthew Dillon * Track the actual opens and closes on the vnode. The last close 1109898c91eeSMatthew Dillon * disassociates the rdev. If the rdev is already disassociated or 1110898c91eeSMatthew Dillon * the opencount is already 0, the vnode might have been revoked 1111898c91eeSMatthew Dillon * and no further opencount tracking occurs. 111221864bc5SMatthew Dillon */ 1113898c91eeSMatthew Dillon if (dev) 111421864bc5SMatthew Dillon release_dev(dev); 1115898c91eeSMatthew Dillon if (vp->v_opencount > 0) 111621864bc5SMatthew Dillon vop_stdclose(ap); 111721864bc5SMatthew Dillon return(error); 111821864bc5SMatthew Dillon 111921864bc5SMatthew Dillon } 112021864bc5SMatthew Dillon 112121864bc5SMatthew Dillon 112221864bc5SMatthew Dillon static int 11239f889dc4SMatthew Dillon devfs_fo_close(struct file *fp) 112421864bc5SMatthew Dillon { 112521864bc5SMatthew Dillon struct vnode *vp = (struct vnode *)fp->f_data; 1126898c91eeSMatthew Dillon int error; 112721864bc5SMatthew Dillon 112821864bc5SMatthew Dillon fp->f_ops = &badfileops; 112921864bc5SMatthew Dillon error = vn_close(vp, fp->f_flag); 113021864bc5SMatthew Dillon 113121864bc5SMatthew Dillon return (error); 113221864bc5SMatthew Dillon } 113321864bc5SMatthew Dillon 113421864bc5SMatthew Dillon 113521864bc5SMatthew Dillon /* 113621864bc5SMatthew Dillon * Device-optimized file table vnode read routine. 113721864bc5SMatthew Dillon * 113821864bc5SMatthew Dillon * This bypasses the VOP table and talks directly to the device. Most 113921864bc5SMatthew Dillon * filesystems just route to specfs and can make this optimization. 114021864bc5SMatthew Dillon * 114121864bc5SMatthew Dillon * MPALMOSTSAFE - acquires mplock 114221864bc5SMatthew Dillon */ 114321864bc5SMatthew Dillon static int 11449f889dc4SMatthew Dillon devfs_fo_read(struct file *fp, struct uio *uio, 1145898c91eeSMatthew Dillon struct ucred *cred, int flags) 114621864bc5SMatthew Dillon { 1147898c91eeSMatthew Dillon struct devfs_node *node; 114821864bc5SMatthew Dillon struct vnode *vp; 114921864bc5SMatthew Dillon int ioflag; 115021864bc5SMatthew Dillon int error; 115121864bc5SMatthew Dillon cdev_t dev; 115221864bc5SMatthew Dillon 115321864bc5SMatthew Dillon KASSERT(uio->uio_td == curthread, 115421864bc5SMatthew Dillon ("uio_td %p is not td %p", uio->uio_td, curthread)); 115521864bc5SMatthew Dillon 11563a1032a6SAlex Hornung if (uio->uio_resid == 0) 11573a1032a6SAlex Hornung return 0; 11583a1032a6SAlex Hornung 115921864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 11603a1032a6SAlex Hornung if (vp == NULL || vp->v_type == VBAD) 11613a1032a6SAlex Hornung return EBADF; 11623a1032a6SAlex Hornung 1163898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 116421864bc5SMatthew Dillon 11653a1032a6SAlex Hornung if ((dev = vp->v_rdev) == NULL) 11663a1032a6SAlex Hornung return EBADF; 11673a1032a6SAlex Hornung 116821864bc5SMatthew Dillon reference_dev(dev); 116921864bc5SMatthew Dillon 117021864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 117121864bc5SMatthew Dillon uio->uio_offset = fp->f_offset; 117221864bc5SMatthew Dillon 117321864bc5SMatthew Dillon ioflag = 0; 117421864bc5SMatthew Dillon if (flags & O_FBLOCKING) { 117521864bc5SMatthew Dillon /* ioflag &= ~IO_NDELAY; */ 117621864bc5SMatthew Dillon } else if (flags & O_FNONBLOCKING) { 117721864bc5SMatthew Dillon ioflag |= IO_NDELAY; 117821864bc5SMatthew Dillon } else if (fp->f_flag & FNONBLOCK) { 117921864bc5SMatthew Dillon ioflag |= IO_NDELAY; 118021864bc5SMatthew Dillon } 118121864bc5SMatthew Dillon if (flags & O_FBUFFERED) { 118221864bc5SMatthew Dillon /* ioflag &= ~IO_DIRECT; */ 118321864bc5SMatthew Dillon } else if (flags & O_FUNBUFFERED) { 118421864bc5SMatthew Dillon ioflag |= IO_DIRECT; 118521864bc5SMatthew Dillon } else if (fp->f_flag & O_DIRECT) { 118621864bc5SMatthew Dillon ioflag |= IO_DIRECT; 118721864bc5SMatthew Dillon } 118821864bc5SMatthew Dillon ioflag |= sequential_heuristic(uio, fp); 118921864bc5SMatthew Dillon 119021864bc5SMatthew Dillon error = dev_dread(dev, uio, ioflag); 119121864bc5SMatthew Dillon 119221864bc5SMatthew Dillon release_dev(dev); 1193898c91eeSMatthew Dillon if (node) 1194898c91eeSMatthew Dillon nanotime(&node->atime); 119521864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 119621864bc5SMatthew Dillon fp->f_offset = uio->uio_offset; 119721864bc5SMatthew Dillon fp->f_nextoff = uio->uio_offset; 11983a1032a6SAlex Hornung 119921864bc5SMatthew Dillon return (error); 120021864bc5SMatthew Dillon } 120121864bc5SMatthew Dillon 120221864bc5SMatthew Dillon 120321864bc5SMatthew Dillon static int 12049f889dc4SMatthew Dillon devfs_fo_write(struct file *fp, struct uio *uio, 1205898c91eeSMatthew Dillon struct ucred *cred, int flags) 120621864bc5SMatthew Dillon { 1207898c91eeSMatthew Dillon struct devfs_node *node; 120821864bc5SMatthew Dillon struct vnode *vp; 120921864bc5SMatthew Dillon int ioflag; 121021864bc5SMatthew Dillon int error; 121121864bc5SMatthew Dillon cdev_t dev; 121221864bc5SMatthew Dillon 121321864bc5SMatthew Dillon KASSERT(uio->uio_td == curthread, 121421864bc5SMatthew Dillon ("uio_td %p is not p %p", uio->uio_td, curthread)); 121521864bc5SMatthew Dillon 121621864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 12173a1032a6SAlex Hornung if (vp == NULL || vp->v_type == VBAD) 12183a1032a6SAlex Hornung return EBADF; 12193a1032a6SAlex Hornung 1220898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 12213a1032a6SAlex Hornung 122221864bc5SMatthew Dillon if (vp->v_type == VREG) 122321864bc5SMatthew Dillon bwillwrite(uio->uio_resid); 12243a1032a6SAlex Hornung 122521864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 122621864bc5SMatthew Dillon 12273a1032a6SAlex Hornung if ((dev = vp->v_rdev) == NULL) 12283a1032a6SAlex Hornung return EBADF; 12293a1032a6SAlex Hornung 123021864bc5SMatthew Dillon reference_dev(dev); 123121864bc5SMatthew Dillon 123221864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 123321864bc5SMatthew Dillon uio->uio_offset = fp->f_offset; 123421864bc5SMatthew Dillon 123521864bc5SMatthew Dillon ioflag = IO_UNIT; 123621864bc5SMatthew Dillon if (vp->v_type == VREG && 123721864bc5SMatthew Dillon ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 123821864bc5SMatthew Dillon ioflag |= IO_APPEND; 123921864bc5SMatthew Dillon } 124021864bc5SMatthew Dillon 124121864bc5SMatthew Dillon if (flags & O_FBLOCKING) { 124221864bc5SMatthew Dillon /* ioflag &= ~IO_NDELAY; */ 124321864bc5SMatthew Dillon } else if (flags & O_FNONBLOCKING) { 124421864bc5SMatthew Dillon ioflag |= IO_NDELAY; 124521864bc5SMatthew Dillon } else if (fp->f_flag & FNONBLOCK) { 124621864bc5SMatthew Dillon ioflag |= IO_NDELAY; 124721864bc5SMatthew Dillon } 124821864bc5SMatthew Dillon if (flags & O_FBUFFERED) { 124921864bc5SMatthew Dillon /* ioflag &= ~IO_DIRECT; */ 125021864bc5SMatthew Dillon } else if (flags & O_FUNBUFFERED) { 125121864bc5SMatthew Dillon ioflag |= IO_DIRECT; 125221864bc5SMatthew Dillon } else if (fp->f_flag & O_DIRECT) { 125321864bc5SMatthew Dillon ioflag |= IO_DIRECT; 125421864bc5SMatthew Dillon } 125521864bc5SMatthew Dillon if (flags & O_FASYNCWRITE) { 125621864bc5SMatthew Dillon /* ioflag &= ~IO_SYNC; */ 125721864bc5SMatthew Dillon } else if (flags & O_FSYNCWRITE) { 125821864bc5SMatthew Dillon ioflag |= IO_SYNC; 125921864bc5SMatthew Dillon } else if (fp->f_flag & O_FSYNC) { 126021864bc5SMatthew Dillon ioflag |= IO_SYNC; 126121864bc5SMatthew Dillon } 126221864bc5SMatthew Dillon 126321864bc5SMatthew Dillon if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 126421864bc5SMatthew Dillon ioflag |= IO_SYNC; 126521864bc5SMatthew Dillon ioflag |= sequential_heuristic(uio, fp); 126621864bc5SMatthew Dillon 126721864bc5SMatthew Dillon error = dev_dwrite(dev, uio, ioflag); 126821864bc5SMatthew Dillon 126921864bc5SMatthew Dillon release_dev(dev); 127007dfa375SAlex Hornung if (node) { 127107dfa375SAlex Hornung nanotime(&node->atime); 1272898c91eeSMatthew Dillon nanotime(&node->mtime); 127307dfa375SAlex Hornung } 127421864bc5SMatthew Dillon 127521864bc5SMatthew Dillon if ((flags & O_FOFFSET) == 0) 127621864bc5SMatthew Dillon fp->f_offset = uio->uio_offset; 127721864bc5SMatthew Dillon fp->f_nextoff = uio->uio_offset; 12783a1032a6SAlex Hornung 127921864bc5SMatthew Dillon return (error); 128021864bc5SMatthew Dillon } 128121864bc5SMatthew Dillon 128221864bc5SMatthew Dillon 128321864bc5SMatthew Dillon static int 12849f889dc4SMatthew Dillon devfs_fo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 128521864bc5SMatthew Dillon { 128621864bc5SMatthew Dillon struct vnode *vp; 128721864bc5SMatthew Dillon struct vattr vattr; 128821864bc5SMatthew Dillon struct vattr *vap; 128921864bc5SMatthew Dillon u_short mode; 129021864bc5SMatthew Dillon cdev_t dev; 12913a1032a6SAlex Hornung int error; 12923a1032a6SAlex Hornung 12933a1032a6SAlex Hornung vp = (struct vnode *)fp->f_data; 12943a1032a6SAlex Hornung if (vp == NULL || vp->v_type == VBAD) 12953a1032a6SAlex Hornung return EBADF; 12963a1032a6SAlex Hornung 12973a1032a6SAlex Hornung error = vn_stat(vp, sb, cred); 12983a1032a6SAlex Hornung if (error) 12993a1032a6SAlex Hornung return (error); 130021864bc5SMatthew Dillon 130121864bc5SMatthew Dillon vap = &vattr; 130221864bc5SMatthew Dillon error = VOP_GETATTR(vp, vap); 13033a1032a6SAlex Hornung if (error) 130421864bc5SMatthew Dillon return (error); 130521864bc5SMatthew Dillon 130621864bc5SMatthew Dillon /* 130721864bc5SMatthew Dillon * Zero the spare stat fields 130821864bc5SMatthew Dillon */ 130921864bc5SMatthew Dillon sb->st_lspare = 0; 1310d98152a8SMatthew Dillon sb->st_qspare1 = 0; 1311d98152a8SMatthew Dillon sb->st_qspare2 = 0; 131221864bc5SMatthew Dillon 131321864bc5SMatthew Dillon /* 131421864bc5SMatthew Dillon * Copy from vattr table ... or not in case it's a cloned device 131521864bc5SMatthew Dillon */ 131621864bc5SMatthew Dillon if (vap->va_fsid != VNOVAL) 131721864bc5SMatthew Dillon sb->st_dev = vap->va_fsid; 131821864bc5SMatthew Dillon else 131921864bc5SMatthew Dillon sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 132021864bc5SMatthew Dillon 132121864bc5SMatthew Dillon sb->st_ino = vap->va_fileid; 132221864bc5SMatthew Dillon 132321864bc5SMatthew Dillon mode = vap->va_mode; 132421864bc5SMatthew Dillon mode |= S_IFCHR; 132521864bc5SMatthew Dillon sb->st_mode = mode; 132621864bc5SMatthew Dillon 132721864bc5SMatthew Dillon if (vap->va_nlink > (nlink_t)-1) 132821864bc5SMatthew Dillon sb->st_nlink = (nlink_t)-1; 132921864bc5SMatthew Dillon else 133021864bc5SMatthew Dillon sb->st_nlink = vap->va_nlink; 13313a1032a6SAlex Hornung 133221864bc5SMatthew Dillon sb->st_uid = vap->va_uid; 133321864bc5SMatthew Dillon sb->st_gid = vap->va_gid; 1334ca8d7677SMatthew Dillon sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev); 13352d076755SAlex Hornung sb->st_size = vap->va_bytes; 133621864bc5SMatthew Dillon sb->st_atimespec = vap->va_atime; 133721864bc5SMatthew Dillon sb->st_mtimespec = vap->va_mtime; 133821864bc5SMatthew Dillon sb->st_ctimespec = vap->va_ctime; 133921864bc5SMatthew Dillon 134021864bc5SMatthew Dillon /* 134121864bc5SMatthew Dillon * A VCHR and VBLK device may track the last access and last modified 134221864bc5SMatthew Dillon * time independantly of the filesystem. This is particularly true 134321864bc5SMatthew Dillon * because device read and write calls may bypass the filesystem. 134421864bc5SMatthew Dillon */ 134521864bc5SMatthew Dillon if (vp->v_type == VCHR || vp->v_type == VBLK) { 134621864bc5SMatthew Dillon dev = vp->v_rdev; 134721864bc5SMatthew Dillon if (dev != NULL) { 134821864bc5SMatthew Dillon if (dev->si_lastread) { 1349cec73927SMatthew Dillon sb->st_atimespec.tv_sec = time_second + 1350cec73927SMatthew Dillon (time_uptime - 1351cec73927SMatthew Dillon dev->si_lastread); 135221864bc5SMatthew Dillon sb->st_atimespec.tv_nsec = 0; 135321864bc5SMatthew Dillon } 135421864bc5SMatthew Dillon if (dev->si_lastwrite) { 1355cec73927SMatthew Dillon sb->st_atimespec.tv_sec = time_second + 1356cec73927SMatthew Dillon (time_uptime - 1357cec73927SMatthew Dillon dev->si_lastwrite); 135821864bc5SMatthew Dillon sb->st_atimespec.tv_nsec = 0; 135921864bc5SMatthew Dillon } 136021864bc5SMatthew Dillon } 136121864bc5SMatthew Dillon } 136221864bc5SMatthew Dillon 136321864bc5SMatthew Dillon /* 136421864bc5SMatthew Dillon * According to www.opengroup.org, the meaning of st_blksize is 136521864bc5SMatthew Dillon * "a filesystem-specific preferred I/O block size for this 136621864bc5SMatthew Dillon * object. In some filesystem types, this may vary from file 136721864bc5SMatthew Dillon * to file" 136821864bc5SMatthew Dillon * Default to PAGE_SIZE after much discussion. 136921864bc5SMatthew Dillon */ 137021864bc5SMatthew Dillon 137121864bc5SMatthew Dillon sb->st_blksize = PAGE_SIZE; 137221864bc5SMatthew Dillon 137321864bc5SMatthew Dillon sb->st_flags = vap->va_flags; 137421864bc5SMatthew Dillon 137521864bc5SMatthew Dillon error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 137621864bc5SMatthew Dillon if (error) 137721864bc5SMatthew Dillon sb->st_gen = 0; 137821864bc5SMatthew Dillon else 137921864bc5SMatthew Dillon sb->st_gen = (u_int32_t)vap->va_gen; 138021864bc5SMatthew Dillon 138121864bc5SMatthew Dillon sb->st_blocks = vap->va_bytes / S_BLKSIZE; 138221864bc5SMatthew Dillon 138321864bc5SMatthew Dillon return (0); 138421864bc5SMatthew Dillon } 138521864bc5SMatthew Dillon 138621864bc5SMatthew Dillon 138721864bc5SMatthew Dillon static int 13889f889dc4SMatthew Dillon devfs_fo_kqfilter(struct file *fp, struct knote *kn) 138921864bc5SMatthew Dillon { 139021864bc5SMatthew Dillon struct vnode *vp; 139121864bc5SMatthew Dillon int error; 139221864bc5SMatthew Dillon cdev_t dev; 139321864bc5SMatthew Dillon 139421864bc5SMatthew Dillon vp = (struct vnode *)fp->f_data; 139521864bc5SMatthew Dillon if (vp == NULL || vp->v_type == VBAD) { 139621864bc5SMatthew Dillon error = EBADF; 139721864bc5SMatthew Dillon goto done; 139821864bc5SMatthew Dillon } 139921864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) { 140021864bc5SMatthew Dillon error = EBADF; 140121864bc5SMatthew Dillon goto done; 140221864bc5SMatthew Dillon } 140321864bc5SMatthew Dillon reference_dev(dev); 140421864bc5SMatthew Dillon 140521864bc5SMatthew Dillon error = dev_dkqfilter(dev, kn); 140621864bc5SMatthew Dillon 140721864bc5SMatthew Dillon release_dev(dev); 140821864bc5SMatthew Dillon 140921864bc5SMatthew Dillon done: 1410b287d649SMatthew Dillon return (error); 141121864bc5SMatthew Dillon } 141221864bc5SMatthew Dillon 141321864bc5SMatthew Dillon /* 141421864bc5SMatthew Dillon * MPALMOSTSAFE - acquires mplock 141521864bc5SMatthew Dillon */ 141621864bc5SMatthew Dillon static int 14179f889dc4SMatthew Dillon devfs_fo_ioctl(struct file *fp, u_long com, caddr_t data, 141887baaf0cSMatthew Dillon struct ucred *ucred, struct sysmsg *msg) 141921864bc5SMatthew Dillon { 14201d0de3d3SSascha Wildner #if 0 1421898c91eeSMatthew Dillon struct devfs_node *node; 14221d0de3d3SSascha Wildner #endif 1423898c91eeSMatthew Dillon struct vnode *vp; 142421864bc5SMatthew Dillon struct vnode *ovp; 142521864bc5SMatthew Dillon cdev_t dev; 142621864bc5SMatthew Dillon int error; 142721864bc5SMatthew Dillon struct fiodname_args *name_args; 142821864bc5SMatthew Dillon size_t namlen; 142921864bc5SMatthew Dillon const char *name; 143021864bc5SMatthew Dillon 1431898c91eeSMatthew Dillon vp = ((struct vnode *)fp->f_data); 14323a1032a6SAlex Hornung 14333a1032a6SAlex Hornung if ((dev = vp->v_rdev) == NULL) 14343a1032a6SAlex Hornung return EBADF; /* device was revoked */ 14353a1032a6SAlex Hornung 14363a1032a6SAlex Hornung reference_dev(dev); 143721864bc5SMatthew Dillon 14381d0de3d3SSascha Wildner #if 0 1439898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 14401d0de3d3SSascha Wildner #endif 1441898c91eeSMatthew Dillon 1442898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 14439f889dc4SMatthew Dillon "devfs_fo_ioctl() called! for dev %s\n", 1444898c91eeSMatthew Dillon dev->si_name); 144521864bc5SMatthew Dillon 144621864bc5SMatthew Dillon if (com == FIODTYPE) { 144721864bc5SMatthew Dillon *(int *)data = dev_dflags(dev) & D_TYPEMASK; 144821864bc5SMatthew Dillon error = 0; 144921864bc5SMatthew Dillon goto out; 145021864bc5SMatthew Dillon } else if (com == FIODNAME) { 145121864bc5SMatthew Dillon name_args = (struct fiodname_args *)data; 145221864bc5SMatthew Dillon name = dev->si_name; 145321864bc5SMatthew Dillon namlen = strlen(name) + 1; 145421864bc5SMatthew Dillon 1455898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1456898c91eeSMatthew Dillon "ioctl, got: FIODNAME for %s\n", name); 145721864bc5SMatthew Dillon 145821864bc5SMatthew Dillon if (namlen <= name_args->len) 145921864bc5SMatthew Dillon error = copyout(dev->si_name, name_args->name, namlen); 146021864bc5SMatthew Dillon else 146121864bc5SMatthew Dillon error = EINVAL; 146221864bc5SMatthew Dillon 1463898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1464898c91eeSMatthew Dillon "ioctl stuff: error: %d\n", error); 146521864bc5SMatthew Dillon goto out; 146621864bc5SMatthew Dillon } 14673a1032a6SAlex Hornung 146887baaf0cSMatthew Dillon error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg); 14693a1032a6SAlex Hornung 147007dfa375SAlex Hornung #if 0 1471898c91eeSMatthew Dillon if (node) { 1472898c91eeSMatthew Dillon nanotime(&node->atime); 1473898c91eeSMatthew Dillon nanotime(&node->mtime); 147421864bc5SMatthew Dillon } 147507dfa375SAlex Hornung #endif 1476898c91eeSMatthew Dillon if (com == TIOCSCTTY) { 1477898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 14789f889dc4SMatthew Dillon "devfs_fo_ioctl: got TIOCSCTTY on %s\n", 1479898c91eeSMatthew Dillon dev->si_name); 1480898c91eeSMatthew Dillon } 148121864bc5SMatthew Dillon if (error == 0 && com == TIOCSCTTY) { 148221864bc5SMatthew Dillon struct proc *p = curthread->td_proc; 148321864bc5SMatthew Dillon struct session *sess; 1484898c91eeSMatthew Dillon 1485898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 14869f889dc4SMatthew Dillon "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n", 1487898c91eeSMatthew Dillon dev->si_name); 148821864bc5SMatthew Dillon if (p == NULL) { 148921864bc5SMatthew Dillon error = ENOTTY; 149021864bc5SMatthew Dillon goto out; 149121864bc5SMatthew Dillon } 149221864bc5SMatthew Dillon sess = p->p_session; 1493898c91eeSMatthew Dillon 1494898c91eeSMatthew Dillon /* 1495898c91eeSMatthew Dillon * Do nothing if reassigning same control tty 1496898c91eeSMatthew Dillon */ 149721864bc5SMatthew Dillon if (sess->s_ttyvp == vp) { 149821864bc5SMatthew Dillon error = 0; 149921864bc5SMatthew Dillon goto out; 150021864bc5SMatthew Dillon } 1501898c91eeSMatthew Dillon 1502898c91eeSMatthew Dillon /* 1503898c91eeSMatthew Dillon * Get rid of reference to old control tty 1504898c91eeSMatthew Dillon */ 150521864bc5SMatthew Dillon ovp = sess->s_ttyvp; 150621864bc5SMatthew Dillon vref(vp); 150721864bc5SMatthew Dillon sess->s_ttyvp = vp; 150821864bc5SMatthew Dillon if (ovp) 150921864bc5SMatthew Dillon vrele(ovp); 151021864bc5SMatthew Dillon } 151121864bc5SMatthew Dillon 151221864bc5SMatthew Dillon out: 15133a1032a6SAlex Hornung release_dev(dev); 15149f889dc4SMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() finished! \n"); 151521864bc5SMatthew Dillon return (error); 151621864bc5SMatthew Dillon } 151721864bc5SMatthew Dillon 151821864bc5SMatthew Dillon 151921864bc5SMatthew Dillon static int 152021864bc5SMatthew Dillon devfs_spec_fsync(struct vop_fsync_args *ap) 152121864bc5SMatthew Dillon { 152221864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 152321864bc5SMatthew Dillon int error; 152421864bc5SMatthew Dillon 152521864bc5SMatthew Dillon if (!vn_isdisk(vp, NULL)) 152621864bc5SMatthew Dillon return (0); 152721864bc5SMatthew Dillon 152821864bc5SMatthew Dillon /* 152921864bc5SMatthew Dillon * Flush all dirty buffers associated with a block device. 153021864bc5SMatthew Dillon */ 153121864bc5SMatthew Dillon error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL); 153221864bc5SMatthew Dillon return (error); 153321864bc5SMatthew Dillon } 153421864bc5SMatthew Dillon 153521864bc5SMatthew Dillon static int 153621864bc5SMatthew Dillon devfs_spec_read(struct vop_read_args *ap) 153721864bc5SMatthew Dillon { 1538898c91eeSMatthew Dillon struct devfs_node *node; 153921864bc5SMatthew Dillon struct vnode *vp; 154021864bc5SMatthew Dillon struct uio *uio; 154121864bc5SMatthew Dillon cdev_t dev; 154221864bc5SMatthew Dillon int error; 154321864bc5SMatthew Dillon 154421864bc5SMatthew Dillon vp = ap->a_vp; 154521864bc5SMatthew Dillon dev = vp->v_rdev; 154621864bc5SMatthew Dillon uio = ap->a_uio; 1547898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 154821864bc5SMatthew Dillon 154921864bc5SMatthew Dillon if (dev == NULL) /* device was revoked */ 155021864bc5SMatthew Dillon return (EBADF); 155121864bc5SMatthew Dillon if (uio->uio_resid == 0) 155221864bc5SMatthew Dillon return (0); 155321864bc5SMatthew Dillon 155421864bc5SMatthew Dillon vn_unlock(vp); 155521864bc5SMatthew Dillon error = dev_dread(dev, uio, ap->a_ioflag); 155621864bc5SMatthew Dillon vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 155721864bc5SMatthew Dillon 1558898c91eeSMatthew Dillon if (node) 1559898c91eeSMatthew Dillon nanotime(&node->atime); 156021864bc5SMatthew Dillon 156121864bc5SMatthew Dillon return (error); 156221864bc5SMatthew Dillon } 156321864bc5SMatthew Dillon 156421864bc5SMatthew Dillon /* 156521864bc5SMatthew Dillon * Vnode op for write 156621864bc5SMatthew Dillon * 156721864bc5SMatthew Dillon * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 156821864bc5SMatthew Dillon * struct ucred *a_cred) 156921864bc5SMatthew Dillon */ 157021864bc5SMatthew Dillon static int 157121864bc5SMatthew Dillon devfs_spec_write(struct vop_write_args *ap) 157221864bc5SMatthew Dillon { 1573898c91eeSMatthew Dillon struct devfs_node *node; 157421864bc5SMatthew Dillon struct vnode *vp; 157521864bc5SMatthew Dillon struct uio *uio; 157621864bc5SMatthew Dillon cdev_t dev; 157721864bc5SMatthew Dillon int error; 157821864bc5SMatthew Dillon 157921864bc5SMatthew Dillon vp = ap->a_vp; 158021864bc5SMatthew Dillon dev = vp->v_rdev; 158121864bc5SMatthew Dillon uio = ap->a_uio; 1582898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 158321864bc5SMatthew Dillon 158421864bc5SMatthew Dillon KKASSERT(uio->uio_segflg != UIO_NOCOPY); 158521864bc5SMatthew Dillon 158621864bc5SMatthew Dillon if (dev == NULL) /* device was revoked */ 158721864bc5SMatthew Dillon return (EBADF); 158821864bc5SMatthew Dillon 158921864bc5SMatthew Dillon vn_unlock(vp); 159021864bc5SMatthew Dillon error = dev_dwrite(dev, uio, ap->a_ioflag); 159121864bc5SMatthew Dillon vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 159221864bc5SMatthew Dillon 159307dfa375SAlex Hornung if (node) { 159407dfa375SAlex Hornung nanotime(&node->atime); 1595898c91eeSMatthew Dillon nanotime(&node->mtime); 159607dfa375SAlex Hornung } 159721864bc5SMatthew Dillon 159821864bc5SMatthew Dillon return (error); 159921864bc5SMatthew Dillon } 160021864bc5SMatthew Dillon 160121864bc5SMatthew Dillon /* 160221864bc5SMatthew Dillon * Device ioctl operation. 160321864bc5SMatthew Dillon * 160421864bc5SMatthew Dillon * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, 160587baaf0cSMatthew Dillon * int a_fflag, struct ucred *a_cred, struct sysmsg *msg) 160621864bc5SMatthew Dillon */ 160721864bc5SMatthew Dillon static int 160821864bc5SMatthew Dillon devfs_spec_ioctl(struct vop_ioctl_args *ap) 160921864bc5SMatthew Dillon { 161021864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 16111d0de3d3SSascha Wildner #if 0 1612898c91eeSMatthew Dillon struct devfs_node *node; 16131d0de3d3SSascha Wildner #endif 1614898c91eeSMatthew Dillon cdev_t dev; 161521864bc5SMatthew Dillon 161621864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) 161721864bc5SMatthew Dillon return (EBADF); /* device was revoked */ 16181d0de3d3SSascha Wildner #if 0 1619898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 162021864bc5SMatthew Dillon 1621898c91eeSMatthew Dillon if (node) { 1622898c91eeSMatthew Dillon nanotime(&node->atime); 1623898c91eeSMatthew Dillon nanotime(&node->mtime); 162421864bc5SMatthew Dillon } 162507dfa375SAlex Hornung #endif 162621864bc5SMatthew Dillon 162787baaf0cSMatthew Dillon return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag, 162887baaf0cSMatthew Dillon ap->a_cred, ap->a_sysmsg)); 162921864bc5SMatthew Dillon } 163021864bc5SMatthew Dillon 163121864bc5SMatthew Dillon /* 163221864bc5SMatthew Dillon * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn) 163321864bc5SMatthew Dillon */ 163421864bc5SMatthew Dillon /* ARGSUSED */ 163521864bc5SMatthew Dillon static int 163621864bc5SMatthew Dillon devfs_spec_kqfilter(struct vop_kqfilter_args *ap) 163721864bc5SMatthew Dillon { 163821864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 16391d0de3d3SSascha Wildner #if 0 1640898c91eeSMatthew Dillon struct devfs_node *node; 16411d0de3d3SSascha Wildner #endif 1642898c91eeSMatthew Dillon cdev_t dev; 164321864bc5SMatthew Dillon 164421864bc5SMatthew Dillon if ((dev = vp->v_rdev) == NULL) 1645b287d649SMatthew Dillon return (EBADF); /* device was revoked (EBADF) */ 16461d0de3d3SSascha Wildner #if 0 1647898c91eeSMatthew Dillon node = DEVFS_NODE(vp); 164821864bc5SMatthew Dillon 1649898c91eeSMatthew Dillon if (node) 1650898c91eeSMatthew Dillon nanotime(&node->atime); 165107dfa375SAlex Hornung #endif 165221864bc5SMatthew Dillon 1653b287d649SMatthew Dillon return (dev_dkqfilter(dev, ap->a_kn)); 165421864bc5SMatthew Dillon } 165521864bc5SMatthew Dillon 165621864bc5SMatthew Dillon /* 165721864bc5SMatthew Dillon * Convert a vnode strategy call into a device strategy call. Vnode strategy 165821864bc5SMatthew Dillon * calls are not limited to device DMA limits so we have to deal with the 165921864bc5SMatthew Dillon * case. 166021864bc5SMatthew Dillon * 166121864bc5SMatthew Dillon * spec_strategy(struct vnode *a_vp, struct bio *a_bio) 166221864bc5SMatthew Dillon */ 166321864bc5SMatthew Dillon static int 166421864bc5SMatthew Dillon devfs_spec_strategy(struct vop_strategy_args *ap) 166521864bc5SMatthew Dillon { 166621864bc5SMatthew Dillon struct bio *bio = ap->a_bio; 166721864bc5SMatthew Dillon struct buf *bp = bio->bio_buf; 166821864bc5SMatthew Dillon struct buf *nbp; 166921864bc5SMatthew Dillon struct vnode *vp; 167021864bc5SMatthew Dillon struct mount *mp; 167121864bc5SMatthew Dillon int chunksize; 167221864bc5SMatthew Dillon int maxiosize; 167321864bc5SMatthew Dillon 167421864bc5SMatthew Dillon if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) 167521864bc5SMatthew Dillon buf_start(bp); 167621864bc5SMatthew Dillon 167721864bc5SMatthew Dillon /* 167821864bc5SMatthew Dillon * Collect statistics on synchronous and asynchronous read 167921864bc5SMatthew Dillon * and write counts for disks that have associated filesystems. 168021864bc5SMatthew Dillon */ 168121864bc5SMatthew Dillon vp = ap->a_vp; 168221864bc5SMatthew Dillon KKASSERT(vp->v_rdev != NULL); /* XXX */ 168321864bc5SMatthew Dillon if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { 168421864bc5SMatthew Dillon if (bp->b_cmd == BUF_CMD_READ) { 168521864bc5SMatthew Dillon if (bp->b_flags & BIO_SYNC) 168621864bc5SMatthew Dillon mp->mnt_stat.f_syncreads++; 168721864bc5SMatthew Dillon else 168821864bc5SMatthew Dillon mp->mnt_stat.f_asyncreads++; 168921864bc5SMatthew Dillon } else { 169021864bc5SMatthew Dillon if (bp->b_flags & BIO_SYNC) 169121864bc5SMatthew Dillon mp->mnt_stat.f_syncwrites++; 169221864bc5SMatthew Dillon else 169321864bc5SMatthew Dillon mp->mnt_stat.f_asyncwrites++; 169421864bc5SMatthew Dillon } 169521864bc5SMatthew Dillon } 169621864bc5SMatthew Dillon 169721864bc5SMatthew Dillon /* 169821864bc5SMatthew Dillon * Device iosize limitations only apply to read and write. Shortcut 169921864bc5SMatthew Dillon * the I/O if it fits. 170021864bc5SMatthew Dillon */ 170121864bc5SMatthew Dillon if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { 1702898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1703898c91eeSMatthew Dillon "%s: si_iosize_max not set!\n", 1704898c91eeSMatthew Dillon dev_dname(vp->v_rdev)); 170521864bc5SMatthew Dillon maxiosize = MAXPHYS; 170621864bc5SMatthew Dillon } 170721864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 2 170821864bc5SMatthew Dillon maxiosize = 4096; 170921864bc5SMatthew Dillon #endif 171021864bc5SMatthew Dillon if (bp->b_bcount <= maxiosize || 171121864bc5SMatthew Dillon (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { 171221864bc5SMatthew Dillon dev_dstrategy_chain(vp->v_rdev, bio); 171321864bc5SMatthew Dillon return (0); 171421864bc5SMatthew Dillon } 171521864bc5SMatthew Dillon 171621864bc5SMatthew Dillon /* 171721864bc5SMatthew Dillon * Clone the buffer and set up an I/O chain to chunk up the I/O. 171821864bc5SMatthew Dillon */ 171921864bc5SMatthew Dillon nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); 172021864bc5SMatthew Dillon initbufbio(nbp); 172121864bc5SMatthew Dillon buf_dep_init(nbp); 172221864bc5SMatthew Dillon BUF_LOCK(nbp, LK_EXCLUSIVE); 172321864bc5SMatthew Dillon BUF_KERNPROC(nbp); 172421864bc5SMatthew Dillon nbp->b_vp = vp; 172521864bc5SMatthew Dillon nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP); 172621864bc5SMatthew Dillon nbp->b_data = bp->b_data; 172721864bc5SMatthew Dillon nbp->b_bio1.bio_done = devfs_spec_strategy_done; 172821864bc5SMatthew Dillon nbp->b_bio1.bio_offset = bio->bio_offset; 172921864bc5SMatthew Dillon nbp->b_bio1.bio_caller_info1.ptr = bio; 173021864bc5SMatthew Dillon 173121864bc5SMatthew Dillon /* 173221864bc5SMatthew Dillon * Start the first transfer 173321864bc5SMatthew Dillon */ 173421864bc5SMatthew Dillon if (vn_isdisk(vp, NULL)) 173521864bc5SMatthew Dillon chunksize = vp->v_rdev->si_bsize_phys; 173621864bc5SMatthew Dillon else 173721864bc5SMatthew Dillon chunksize = DEV_BSIZE; 173821864bc5SMatthew Dillon chunksize = maxiosize / chunksize * chunksize; 173921864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1740898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1741898c91eeSMatthew Dillon "spec_strategy chained I/O chunksize=%d\n", 1742898c91eeSMatthew Dillon chunksize); 174321864bc5SMatthew Dillon #endif 174421864bc5SMatthew Dillon nbp->b_cmd = bp->b_cmd; 174521864bc5SMatthew Dillon nbp->b_bcount = chunksize; 174621864bc5SMatthew Dillon nbp->b_bufsize = chunksize; /* used to detect a short I/O */ 174721864bc5SMatthew Dillon nbp->b_bio1.bio_caller_info2.index = chunksize; 174821864bc5SMatthew Dillon 174921864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1750898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1751898c91eeSMatthew Dillon "spec_strategy: chain %p offset %d/%d bcount %d\n", 175221864bc5SMatthew Dillon bp, 0, bp->b_bcount, nbp->b_bcount); 175321864bc5SMatthew Dillon #endif 175421864bc5SMatthew Dillon 175521864bc5SMatthew Dillon dev_dstrategy(vp->v_rdev, &nbp->b_bio1); 175621864bc5SMatthew Dillon 175721864bc5SMatthew Dillon if (DEVFS_NODE(vp)) { 175821864bc5SMatthew Dillon nanotime(&DEVFS_NODE(vp)->atime); 175921864bc5SMatthew Dillon nanotime(&DEVFS_NODE(vp)->mtime); 176021864bc5SMatthew Dillon } 176121864bc5SMatthew Dillon 176221864bc5SMatthew Dillon return (0); 176321864bc5SMatthew Dillon } 176421864bc5SMatthew Dillon 176521864bc5SMatthew Dillon /* 176621864bc5SMatthew Dillon * Chunked up transfer completion routine - chain transfers until done 176777912481SMatthew Dillon * 176877912481SMatthew Dillon * NOTE: MPSAFE callback. 176921864bc5SMatthew Dillon */ 177021864bc5SMatthew Dillon static 177121864bc5SMatthew Dillon void 177221864bc5SMatthew Dillon devfs_spec_strategy_done(struct bio *nbio) 177321864bc5SMatthew Dillon { 177421864bc5SMatthew Dillon struct buf *nbp = nbio->bio_buf; 177521864bc5SMatthew Dillon struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */ 177621864bc5SMatthew Dillon struct buf *bp = bio->bio_buf; /* original bp */ 177721864bc5SMatthew Dillon int chunksize = nbio->bio_caller_info2.index; /* chunking */ 177821864bc5SMatthew Dillon int boffset = nbp->b_data - bp->b_data; 177921864bc5SMatthew Dillon 178021864bc5SMatthew Dillon if (nbp->b_flags & B_ERROR) { 178121864bc5SMatthew Dillon /* 178221864bc5SMatthew Dillon * An error terminates the chain, propogate the error back 178321864bc5SMatthew Dillon * to the original bp 178421864bc5SMatthew Dillon */ 178521864bc5SMatthew Dillon bp->b_flags |= B_ERROR; 178621864bc5SMatthew Dillon bp->b_error = nbp->b_error; 178721864bc5SMatthew Dillon bp->b_resid = bp->b_bcount - boffset + 178821864bc5SMatthew Dillon (nbp->b_bcount - nbp->b_resid); 178921864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1790898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1791898c91eeSMatthew Dillon "spec_strategy: chain %p error %d bcount %d/%d\n", 179221864bc5SMatthew Dillon bp, bp->b_error, bp->b_bcount, 179321864bc5SMatthew Dillon bp->b_bcount - bp->b_resid); 179421864bc5SMatthew Dillon #endif 179521864bc5SMatthew Dillon } else if (nbp->b_resid) { 179621864bc5SMatthew Dillon /* 179721864bc5SMatthew Dillon * A short read or write terminates the chain 179821864bc5SMatthew Dillon */ 179921864bc5SMatthew Dillon bp->b_error = nbp->b_error; 180021864bc5SMatthew Dillon bp->b_resid = bp->b_bcount - boffset + 180121864bc5SMatthew Dillon (nbp->b_bcount - nbp->b_resid); 180221864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1803898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1804898c91eeSMatthew Dillon "spec_strategy: chain %p short read(1) " 1805898c91eeSMatthew Dillon "bcount %d/%d\n", 180621864bc5SMatthew Dillon bp, bp->b_bcount - bp->b_resid, bp->b_bcount); 180721864bc5SMatthew Dillon #endif 180821864bc5SMatthew Dillon } else if (nbp->b_bcount != nbp->b_bufsize) { 180921864bc5SMatthew Dillon /* 181021864bc5SMatthew Dillon * A short read or write can also occur by truncating b_bcount 181121864bc5SMatthew Dillon */ 181221864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1813898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1814898c91eeSMatthew Dillon "spec_strategy: chain %p short read(2) " 1815898c91eeSMatthew Dillon "bcount %d/%d\n", 181621864bc5SMatthew Dillon bp, nbp->b_bcount + boffset, bp->b_bcount); 181721864bc5SMatthew Dillon #endif 181821864bc5SMatthew Dillon bp->b_error = 0; 181921864bc5SMatthew Dillon bp->b_bcount = nbp->b_bcount + boffset; 182021864bc5SMatthew Dillon bp->b_resid = nbp->b_resid; 182121864bc5SMatthew Dillon } else if (nbp->b_bcount + boffset == bp->b_bcount) { 182221864bc5SMatthew Dillon /* 182321864bc5SMatthew Dillon * No more data terminates the chain 182421864bc5SMatthew Dillon */ 182521864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1826898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1827898c91eeSMatthew Dillon "spec_strategy: chain %p finished bcount %d\n", 182821864bc5SMatthew Dillon bp, bp->b_bcount); 182921864bc5SMatthew Dillon #endif 183021864bc5SMatthew Dillon bp->b_error = 0; 183121864bc5SMatthew Dillon bp->b_resid = 0; 183221864bc5SMatthew Dillon } else { 183321864bc5SMatthew Dillon /* 183421864bc5SMatthew Dillon * Continue the chain 183521864bc5SMatthew Dillon */ 183621864bc5SMatthew Dillon boffset += nbp->b_bcount; 183721864bc5SMatthew Dillon nbp->b_data = bp->b_data + boffset; 183821864bc5SMatthew Dillon nbp->b_bcount = bp->b_bcount - boffset; 183921864bc5SMatthew Dillon if (nbp->b_bcount > chunksize) 184021864bc5SMatthew Dillon nbp->b_bcount = chunksize; 184121864bc5SMatthew Dillon nbp->b_bio1.bio_done = devfs_spec_strategy_done; 184221864bc5SMatthew Dillon nbp->b_bio1.bio_offset = bio->bio_offset + boffset; 184321864bc5SMatthew Dillon 184421864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1 1845898c91eeSMatthew Dillon devfs_debug(DEVFS_DEBUG_DEBUG, 1846898c91eeSMatthew Dillon "spec_strategy: chain %p offset %d/%d bcount %d\n", 184721864bc5SMatthew Dillon bp, boffset, bp->b_bcount, nbp->b_bcount); 184821864bc5SMatthew Dillon #endif 184921864bc5SMatthew Dillon 185021864bc5SMatthew Dillon dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1); 1851b5d7061dSMatthew Dillon return; 185221864bc5SMatthew Dillon } 1853b5d7061dSMatthew Dillon 1854b5d7061dSMatthew Dillon /* 1855b5d7061dSMatthew Dillon * Fall through to here on termination. biodone(bp) and 1856b5d7061dSMatthew Dillon * clean up and free nbp. 1857b5d7061dSMatthew Dillon */ 1858b5d7061dSMatthew Dillon biodone(bio); 1859b5d7061dSMatthew Dillon BUF_UNLOCK(nbp); 1860b5d7061dSMatthew Dillon uninitbufbio(nbp); 1861b5d7061dSMatthew Dillon kfree(nbp, M_DEVBUF); 186221864bc5SMatthew Dillon } 186321864bc5SMatthew Dillon 186421864bc5SMatthew Dillon /* 186521864bc5SMatthew Dillon * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length) 186621864bc5SMatthew Dillon */ 186721864bc5SMatthew Dillon static int 186821864bc5SMatthew Dillon devfs_spec_freeblks(struct vop_freeblks_args *ap) 186921864bc5SMatthew Dillon { 187021864bc5SMatthew Dillon struct buf *bp; 187121864bc5SMatthew Dillon 187221864bc5SMatthew Dillon /* 187321864bc5SMatthew Dillon * XXX: This assumes that strategy does the deed right away. 187421864bc5SMatthew Dillon * XXX: this may not be TRTTD. 187521864bc5SMatthew Dillon */ 187621864bc5SMatthew Dillon KKASSERT(ap->a_vp->v_rdev != NULL); 1877bf390b25SAlex Hornung if ((ap->a_vp->v_rdev->si_flags & SI_CANFREE) == 0) 187821864bc5SMatthew Dillon return (0); 187921864bc5SMatthew Dillon bp = geteblk(ap->a_length); 188021864bc5SMatthew Dillon bp->b_cmd = BUF_CMD_FREEBLKS; 188121864bc5SMatthew Dillon bp->b_bio1.bio_offset = ap->a_offset; 188221864bc5SMatthew Dillon bp->b_bcount = ap->a_length; 188321864bc5SMatthew Dillon dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1); 188421864bc5SMatthew Dillon return (0); 188521864bc5SMatthew Dillon } 188621864bc5SMatthew Dillon 188721864bc5SMatthew Dillon /* 188821864bc5SMatthew Dillon * Implement degenerate case where the block requested is the block 188921864bc5SMatthew Dillon * returned, and assume that the entire device is contiguous in regards 189021864bc5SMatthew Dillon * to the contiguous block range (runp and runb). 189121864bc5SMatthew Dillon * 189221864bc5SMatthew Dillon * spec_bmap(struct vnode *a_vp, off_t a_loffset, 189321864bc5SMatthew Dillon * off_t *a_doffsetp, int *a_runp, int *a_runb) 189421864bc5SMatthew Dillon */ 189521864bc5SMatthew Dillon static int 189621864bc5SMatthew Dillon devfs_spec_bmap(struct vop_bmap_args *ap) 189721864bc5SMatthew Dillon { 189821864bc5SMatthew Dillon if (ap->a_doffsetp != NULL) 189921864bc5SMatthew Dillon *ap->a_doffsetp = ap->a_loffset; 190021864bc5SMatthew Dillon if (ap->a_runp != NULL) 190121864bc5SMatthew Dillon *ap->a_runp = MAXBSIZE; 190221864bc5SMatthew Dillon if (ap->a_runb != NULL) { 190321864bc5SMatthew Dillon if (ap->a_loffset < MAXBSIZE) 190421864bc5SMatthew Dillon *ap->a_runb = (int)ap->a_loffset; 190521864bc5SMatthew Dillon else 190621864bc5SMatthew Dillon *ap->a_runb = MAXBSIZE; 190721864bc5SMatthew Dillon } 190821864bc5SMatthew Dillon return (0); 190921864bc5SMatthew Dillon } 191021864bc5SMatthew Dillon 191121864bc5SMatthew Dillon 191221864bc5SMatthew Dillon /* 191321864bc5SMatthew Dillon * Special device advisory byte-level locks. 191421864bc5SMatthew Dillon * 191521864bc5SMatthew Dillon * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, 191621864bc5SMatthew Dillon * struct flock *a_fl, int a_flags) 191721864bc5SMatthew Dillon */ 191821864bc5SMatthew Dillon /* ARGSUSED */ 191921864bc5SMatthew Dillon static int 192021864bc5SMatthew Dillon devfs_spec_advlock(struct vop_advlock_args *ap) 192121864bc5SMatthew Dillon { 192221864bc5SMatthew Dillon return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP); 192321864bc5SMatthew Dillon } 192421864bc5SMatthew Dillon 192577912481SMatthew Dillon /* 192677912481SMatthew Dillon * NOTE: MPSAFE callback. 192777912481SMatthew Dillon */ 192821864bc5SMatthew Dillon static void 192921864bc5SMatthew Dillon devfs_spec_getpages_iodone(struct bio *bio) 193021864bc5SMatthew Dillon { 193121864bc5SMatthew Dillon bio->bio_buf->b_cmd = BUF_CMD_DONE; 193221864bc5SMatthew Dillon wakeup(bio->bio_buf); 193321864bc5SMatthew Dillon } 193421864bc5SMatthew Dillon 193521864bc5SMatthew Dillon /* 193621864bc5SMatthew Dillon * spec_getpages() - get pages associated with device vnode. 193721864bc5SMatthew Dillon * 193821864bc5SMatthew Dillon * Note that spec_read and spec_write do not use the buffer cache, so we 193921864bc5SMatthew Dillon * must fully implement getpages here. 194021864bc5SMatthew Dillon */ 194121864bc5SMatthew Dillon static int 194221864bc5SMatthew Dillon devfs_spec_getpages(struct vop_getpages_args *ap) 194321864bc5SMatthew Dillon { 194421864bc5SMatthew Dillon vm_offset_t kva; 194521864bc5SMatthew Dillon int error; 194621864bc5SMatthew Dillon int i, pcount, size; 194721864bc5SMatthew Dillon struct buf *bp; 194821864bc5SMatthew Dillon vm_page_t m; 194921864bc5SMatthew Dillon vm_ooffset_t offset; 195021864bc5SMatthew Dillon int toff, nextoff, nread; 195121864bc5SMatthew Dillon struct vnode *vp = ap->a_vp; 195221864bc5SMatthew Dillon int blksiz; 195321864bc5SMatthew Dillon int gotreqpage; 195421864bc5SMatthew Dillon 195521864bc5SMatthew Dillon error = 0; 195621864bc5SMatthew Dillon pcount = round_page(ap->a_count) / PAGE_SIZE; 195721864bc5SMatthew Dillon 195821864bc5SMatthew Dillon /* 195921864bc5SMatthew Dillon * Calculate the offset of the transfer and do sanity check. 196021864bc5SMatthew Dillon */ 196121864bc5SMatthew Dillon offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset; 196221864bc5SMatthew Dillon 196321864bc5SMatthew Dillon /* 196421864bc5SMatthew Dillon * Round up physical size for real devices. We cannot round using 196521864bc5SMatthew Dillon * v_mount's block size data because v_mount has nothing to do with 196621864bc5SMatthew Dillon * the device. i.e. it's usually '/dev'. We need the physical block 196721864bc5SMatthew Dillon * size for the device itself. 196821864bc5SMatthew Dillon * 196921864bc5SMatthew Dillon * We can't use v_rdev->si_mountpoint because it only exists when the 197021864bc5SMatthew Dillon * block device is mounted. However, we can use v_rdev. 197121864bc5SMatthew Dillon */ 197221864bc5SMatthew Dillon if (vn_isdisk(vp, NULL)) 197321864bc5SMatthew Dillon blksiz = vp->v_rdev->si_bsize_phys; 197421864bc5SMatthew Dillon else 197521864bc5SMatthew Dillon blksiz = DEV_BSIZE; 197621864bc5SMatthew Dillon 197721864bc5SMatthew Dillon size = (ap->a_count + blksiz - 1) & ~(blksiz - 1); 197821864bc5SMatthew Dillon 1979ad8b1a17SMatthew Dillon bp = getpbuf_kva(NULL); 198021864bc5SMatthew Dillon kva = (vm_offset_t)bp->b_data; 198121864bc5SMatthew Dillon 198221864bc5SMatthew Dillon /* 198321864bc5SMatthew Dillon * Map the pages to be read into the kva. 198421864bc5SMatthew Dillon */ 198521864bc5SMatthew Dillon pmap_qenter(kva, ap->a_m, pcount); 198621864bc5SMatthew Dillon 198721864bc5SMatthew Dillon /* Build a minimal buffer header. */ 198821864bc5SMatthew Dillon bp->b_cmd = BUF_CMD_READ; 198921864bc5SMatthew Dillon bp->b_bcount = size; 199021864bc5SMatthew Dillon bp->b_resid = 0; 199177912481SMatthew Dillon bsetrunningbufspace(bp, size); 199221864bc5SMatthew Dillon 199321864bc5SMatthew Dillon bp->b_bio1.bio_offset = offset; 199421864bc5SMatthew Dillon bp->b_bio1.bio_done = devfs_spec_getpages_iodone; 199521864bc5SMatthew Dillon 199621864bc5SMatthew Dillon mycpu->gd_cnt.v_vnodein++; 199721864bc5SMatthew Dillon mycpu->gd_cnt.v_vnodepgsin += pcount; 199821864bc5SMatthew Dillon 199921864bc5SMatthew Dillon /* Do the input. */ 200021864bc5SMatthew Dillon vn_strategy(ap->a_vp, &bp->b_bio1); 200121864bc5SMatthew Dillon 200221864bc5SMatthew Dillon crit_enter(); 200321864bc5SMatthew Dillon 200421864bc5SMatthew Dillon /* We definitely need to be at splbio here. */ 200521864bc5SMatthew Dillon while (bp->b_cmd != BUF_CMD_DONE) 200621864bc5SMatthew Dillon tsleep(bp, 0, "spread", 0); 200721864bc5SMatthew Dillon 200821864bc5SMatthew Dillon crit_exit(); 200921864bc5SMatthew Dillon 201021864bc5SMatthew Dillon if (bp->b_flags & B_ERROR) { 201121864bc5SMatthew Dillon if (bp->b_error) 201221864bc5SMatthew Dillon error = bp->b_error; 201321864bc5SMatthew Dillon else 201421864bc5SMatthew Dillon error = EIO; 201521864bc5SMatthew Dillon } 201621864bc5SMatthew Dillon 201721864bc5SMatthew Dillon /* 201821864bc5SMatthew Dillon * If EOF is encountered we must zero-extend the result in order 201921864bc5SMatthew Dillon * to ensure that the page does not contain garabge. When no 202021864bc5SMatthew Dillon * error occurs, an early EOF is indicated if b_bcount got truncated. 202121864bc5SMatthew Dillon * b_resid is relative to b_bcount and should be 0, but some devices 202221864bc5SMatthew Dillon * might indicate an EOF with b_resid instead of truncating b_bcount. 202321864bc5SMatthew Dillon */ 202421864bc5SMatthew Dillon nread = bp->b_bcount - bp->b_resid; 202521864bc5SMatthew Dillon if (nread < ap->a_count) 202621864bc5SMatthew Dillon bzero((caddr_t)kva + nread, ap->a_count - nread); 202721864bc5SMatthew Dillon pmap_qremove(kva, pcount); 202821864bc5SMatthew Dillon 202921864bc5SMatthew Dillon gotreqpage = 0; 203021864bc5SMatthew Dillon for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) { 203121864bc5SMatthew Dillon nextoff = toff + PAGE_SIZE; 203221864bc5SMatthew Dillon m = ap->a_m[i]; 203321864bc5SMatthew Dillon 203421864bc5SMatthew Dillon m->flags &= ~PG_ZERO; 203521864bc5SMatthew Dillon 2036cb1cf930SMatthew Dillon /* 2037cb1cf930SMatthew Dillon * NOTE: vm_page_undirty/clear_dirty etc do not clear the 2038cb1cf930SMatthew Dillon * pmap modified bit. pmap modified bit should have 2039cb1cf930SMatthew Dillon * already been cleared. 2040cb1cf930SMatthew Dillon */ 204121864bc5SMatthew Dillon if (nextoff <= nread) { 204221864bc5SMatthew Dillon m->valid = VM_PAGE_BITS_ALL; 204321864bc5SMatthew Dillon vm_page_undirty(m); 204421864bc5SMatthew Dillon } else if (toff < nread) { 204521864bc5SMatthew Dillon /* 204621864bc5SMatthew Dillon * Since this is a VM request, we have to supply the 2047cb1cf930SMatthew Dillon * unaligned offset to allow vm_page_set_valid() 204821864bc5SMatthew Dillon * to zero sub-DEV_BSIZE'd portions of the page. 204921864bc5SMatthew Dillon */ 20501a54183bSMatthew Dillon vm_page_set_valid(m, 0, nread - toff); 20511a54183bSMatthew Dillon vm_page_clear_dirty_end_nonincl(m, 0, nread - toff); 205221864bc5SMatthew Dillon } else { 205321864bc5SMatthew Dillon m->valid = 0; 205421864bc5SMatthew Dillon vm_page_undirty(m); 205521864bc5SMatthew Dillon } 205621864bc5SMatthew Dillon 205721864bc5SMatthew Dillon if (i != ap->a_reqpage) { 205821864bc5SMatthew Dillon /* 205921864bc5SMatthew Dillon * Just in case someone was asking for this page we 206021864bc5SMatthew Dillon * now tell them that it is ok to use. 206121864bc5SMatthew Dillon */ 206221864bc5SMatthew Dillon if (!error || (m->valid == VM_PAGE_BITS_ALL)) { 206321864bc5SMatthew Dillon if (m->valid) { 2064b12defdcSMatthew Dillon if (m->flags & PG_REFERENCED) { 206521864bc5SMatthew Dillon vm_page_activate(m); 206621864bc5SMatthew Dillon } else { 206721864bc5SMatthew Dillon vm_page_deactivate(m); 206821864bc5SMatthew Dillon } 206921864bc5SMatthew Dillon vm_page_wakeup(m); 207021864bc5SMatthew Dillon } else { 207121864bc5SMatthew Dillon vm_page_free(m); 207221864bc5SMatthew Dillon } 207321864bc5SMatthew Dillon } else { 207421864bc5SMatthew Dillon vm_page_free(m); 207521864bc5SMatthew Dillon } 207621864bc5SMatthew Dillon } else if (m->valid) { 207721864bc5SMatthew Dillon gotreqpage = 1; 207821864bc5SMatthew Dillon /* 207921864bc5SMatthew Dillon * Since this is a VM request, we need to make the 208021864bc5SMatthew Dillon * entire page presentable by zeroing invalid sections. 208121864bc5SMatthew Dillon */ 208221864bc5SMatthew Dillon if (m->valid != VM_PAGE_BITS_ALL) 208321864bc5SMatthew Dillon vm_page_zero_invalid(m, FALSE); 208421864bc5SMatthew Dillon } 208521864bc5SMatthew Dillon } 208621864bc5SMatthew Dillon if (!gotreqpage) { 208721864bc5SMatthew Dillon m = ap->a_m[ap->a_reqpage]; 208821864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_WARNING, 208921864bc5SMatthew Dillon "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", 209021864bc5SMatthew Dillon devtoname(vp->v_rdev), error, bp, bp->b_vp); 209121864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_WARNING, 209221864bc5SMatthew Dillon " size: %d, resid: %d, a_count: %d, valid: 0x%x\n", 209321864bc5SMatthew Dillon size, bp->b_resid, ap->a_count, m->valid); 209421864bc5SMatthew Dillon devfs_debug(DEVFS_DEBUG_WARNING, 209521864bc5SMatthew Dillon " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", 209621864bc5SMatthew Dillon nread, ap->a_reqpage, (u_long)m->pindex, pcount); 209721864bc5SMatthew Dillon /* 209821864bc5SMatthew Dillon * Free the buffer header back to the swap buffer pool. 209921864bc5SMatthew Dillon */ 210021864bc5SMatthew Dillon relpbuf(bp, NULL); 210121864bc5SMatthew Dillon return VM_PAGER_ERROR; 210221864bc5SMatthew Dillon } 210321864bc5SMatthew Dillon /* 210421864bc5SMatthew Dillon * Free the buffer header back to the swap buffer pool. 210521864bc5SMatthew Dillon */ 210621864bc5SMatthew Dillon relpbuf(bp, NULL); 210707dfa375SAlex Hornung if (DEVFS_NODE(ap->a_vp)) 210807dfa375SAlex Hornung nanotime(&DEVFS_NODE(ap->a_vp)->mtime); 210921864bc5SMatthew Dillon return VM_PAGER_OK; 211021864bc5SMatthew Dillon } 211121864bc5SMatthew Dillon 211221864bc5SMatthew Dillon static __inline 211321864bc5SMatthew Dillon int 211421864bc5SMatthew Dillon sequential_heuristic(struct uio *uio, struct file *fp) 211521864bc5SMatthew Dillon { 211621864bc5SMatthew Dillon /* 211721864bc5SMatthew Dillon * Sequential heuristic - detect sequential operation 211821864bc5SMatthew Dillon */ 211921864bc5SMatthew Dillon if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 212021864bc5SMatthew Dillon uio->uio_offset == fp->f_nextoff) { 212121864bc5SMatthew Dillon /* 212221864bc5SMatthew Dillon * XXX we assume that the filesystem block size is 212321864bc5SMatthew Dillon * the default. Not true, but still gives us a pretty 212421864bc5SMatthew Dillon * good indicator of how sequential the read operations 212521864bc5SMatthew Dillon * are. 212621864bc5SMatthew Dillon */ 2127898c91eeSMatthew Dillon int tmpseq = fp->f_seqcount; 2128898c91eeSMatthew Dillon 212921864bc5SMatthew Dillon tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 213021864bc5SMatthew Dillon if (tmpseq > IO_SEQMAX) 213121864bc5SMatthew Dillon tmpseq = IO_SEQMAX; 213221864bc5SMatthew Dillon fp->f_seqcount = tmpseq; 213321864bc5SMatthew Dillon return(fp->f_seqcount << IO_SEQSHIFT); 213421864bc5SMatthew Dillon } 213521864bc5SMatthew Dillon 213621864bc5SMatthew Dillon /* 213721864bc5SMatthew Dillon * Not sequential, quick draw-down of seqcount 213821864bc5SMatthew Dillon */ 213921864bc5SMatthew Dillon if (fp->f_seqcount > 1) 214021864bc5SMatthew Dillon fp->f_seqcount = 1; 214121864bc5SMatthew Dillon else 214221864bc5SMatthew Dillon fp->f_seqcount = 0; 214321864bc5SMatthew Dillon return(0); 214421864bc5SMatthew Dillon } 2145