xref: /dragonfly/sys/vfs/devfs/devfs_vnops.c (revision c512ab96)
121864bc5SMatthew Dillon /*
221864bc5SMatthew Dillon  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
321864bc5SMatthew Dillon  *
421864bc5SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
521864bc5SMatthew Dillon  * by Alex Hornung <ahornung@gmail.com>
621864bc5SMatthew Dillon  *
721864bc5SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
821864bc5SMatthew Dillon  * modification, are permitted provided that the following conditions
921864bc5SMatthew Dillon  * are met:
1021864bc5SMatthew Dillon  *
1121864bc5SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
1221864bc5SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
1321864bc5SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
1421864bc5SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
1521864bc5SMatthew Dillon  *    the documentation and/or other materials provided with the
1621864bc5SMatthew Dillon  *    distribution.
1721864bc5SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
1821864bc5SMatthew Dillon  *    contributors may be used to endorse or promote products derived
1921864bc5SMatthew Dillon  *    from this software without specific, prior written permission.
2021864bc5SMatthew Dillon  *
2121864bc5SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2221864bc5SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2321864bc5SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
2421864bc5SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
2521864bc5SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
2621864bc5SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
2721864bc5SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2821864bc5SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
2921864bc5SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
3021864bc5SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
3121864bc5SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3221864bc5SMatthew Dillon  * SUCH DAMAGE.
3321864bc5SMatthew Dillon  */
3421864bc5SMatthew Dillon #include <sys/param.h>
3521864bc5SMatthew Dillon #include <sys/systm.h>
3621864bc5SMatthew Dillon #include <sys/time.h>
3721864bc5SMatthew Dillon #include <sys/kernel.h>
3821864bc5SMatthew Dillon #include <sys/lock.h>
3921864bc5SMatthew Dillon #include <sys/fcntl.h>
4021864bc5SMatthew Dillon #include <sys/proc.h>
4121864bc5SMatthew Dillon #include <sys/priv.h>
4221864bc5SMatthew Dillon #include <sys/signalvar.h>
4321864bc5SMatthew Dillon #include <sys/vnode.h>
4421864bc5SMatthew Dillon #include <sys/uio.h>
4521864bc5SMatthew Dillon #include <sys/mount.h>
4621864bc5SMatthew Dillon #include <sys/file.h>
4721864bc5SMatthew Dillon #include <sys/fcntl.h>
4821864bc5SMatthew Dillon #include <sys/namei.h>
4921864bc5SMatthew Dillon #include <sys/dirent.h>
5021864bc5SMatthew Dillon #include <sys/malloc.h>
5121864bc5SMatthew Dillon #include <sys/stat.h>
5221864bc5SMatthew Dillon #include <sys/reg.h>
5321864bc5SMatthew Dillon #include <vm/vm_pager.h>
5421864bc5SMatthew Dillon #include <vm/vm_zone.h>
5521864bc5SMatthew Dillon #include <vm/vm_object.h>
5621864bc5SMatthew Dillon #include <sys/filio.h>
5721864bc5SMatthew Dillon #include <sys/ttycom.h>
5821864bc5SMatthew Dillon #include <sys/tty.h>
592c1e28ddSAlex Hornung #include <sys/devfs.h>
6021864bc5SMatthew Dillon #include <sys/pioctl.h>
6121864bc5SMatthew Dillon 
6221864bc5SMatthew Dillon #include <machine/limits.h>
631a54183bSMatthew Dillon #include <vm/vm_page2.h>
641a54183bSMatthew Dillon #include <sys/buf2.h>
651a54183bSMatthew Dillon #include <sys/sysref2.h>
6621864bc5SMatthew Dillon 
6721864bc5SMatthew Dillon MALLOC_DECLARE(M_DEVFS);
6821864bc5SMatthew Dillon #define DEVFS_BADOP	(void *)devfs_badop
6921864bc5SMatthew Dillon 
7021864bc5SMatthew Dillon static int devfs_badop(struct vop_generic_args *);
7121864bc5SMatthew Dillon static int devfs_access(struct vop_access_args *);
7221864bc5SMatthew Dillon static int devfs_inactive(struct vop_inactive_args *);
7321864bc5SMatthew Dillon static int devfs_reclaim(struct vop_reclaim_args *);
7421864bc5SMatthew Dillon static int devfs_readdir(struct vop_readdir_args *);
7521864bc5SMatthew Dillon static int devfs_getattr(struct vop_getattr_args *);
7621864bc5SMatthew Dillon static int devfs_setattr(struct vop_setattr_args *);
7721864bc5SMatthew Dillon static int devfs_readlink(struct vop_readlink_args *);
7821864bc5SMatthew Dillon static int devfs_print(struct vop_print_args *);
7921864bc5SMatthew Dillon 
8021864bc5SMatthew Dillon static int devfs_nresolve(struct vop_nresolve_args *);
8121864bc5SMatthew Dillon static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *);
8221864bc5SMatthew Dillon static int devfs_nsymlink(struct vop_nsymlink_args *);
8321864bc5SMatthew Dillon static int devfs_nremove(struct vop_nremove_args *);
8421864bc5SMatthew Dillon 
8521864bc5SMatthew Dillon static int devfs_spec_open(struct vop_open_args *);
8621864bc5SMatthew Dillon static int devfs_spec_close(struct vop_close_args *);
8721864bc5SMatthew Dillon static int devfs_spec_fsync(struct vop_fsync_args *);
8821864bc5SMatthew Dillon 
8921864bc5SMatthew Dillon static int devfs_spec_read(struct vop_read_args *);
9021864bc5SMatthew Dillon static int devfs_spec_write(struct vop_write_args *);
9121864bc5SMatthew Dillon static int devfs_spec_ioctl(struct vop_ioctl_args *);
9221864bc5SMatthew Dillon static int devfs_spec_poll(struct vop_poll_args *);
9321864bc5SMatthew Dillon static int devfs_spec_kqfilter(struct vop_kqfilter_args *);
9421864bc5SMatthew Dillon static int devfs_spec_strategy(struct vop_strategy_args *);
9521864bc5SMatthew Dillon static void devfs_spec_strategy_done(struct bio *);
9621864bc5SMatthew Dillon static int devfs_spec_freeblks(struct vop_freeblks_args *);
9721864bc5SMatthew Dillon static int devfs_spec_bmap(struct vop_bmap_args *);
9821864bc5SMatthew Dillon static int devfs_spec_advlock(struct vop_advlock_args *);
9921864bc5SMatthew Dillon static void devfs_spec_getpages_iodone(struct bio *);
10021864bc5SMatthew Dillon static int devfs_spec_getpages(struct vop_getpages_args *);
10121864bc5SMatthew Dillon 
10221864bc5SMatthew Dillon 
10321864bc5SMatthew Dillon static int devfs_specf_close(struct file *);
10421864bc5SMatthew Dillon static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int);
10521864bc5SMatthew Dillon static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int);
10621864bc5SMatthew Dillon static int devfs_specf_stat(struct file *, struct stat *, struct ucred *);
10721864bc5SMatthew Dillon static int devfs_specf_kqfilter(struct file *, struct knote *);
10821864bc5SMatthew Dillon static int devfs_specf_poll(struct file *, int, struct ucred *);
10987baaf0cSMatthew Dillon static int devfs_specf_ioctl(struct file *, u_long, caddr_t,
11087baaf0cSMatthew Dillon 				struct ucred *, struct sysmsg *);
11121864bc5SMatthew Dillon static __inline int sequential_heuristic(struct uio *, struct file *);
11287baaf0cSMatthew Dillon 
11321864bc5SMatthew Dillon extern struct lock devfs_lock;
11421864bc5SMatthew Dillon 
11521864bc5SMatthew Dillon /*
11621864bc5SMatthew Dillon  * devfs vnode operations for regular files
11721864bc5SMatthew Dillon  */
11821864bc5SMatthew Dillon struct vop_ops devfs_vnode_norm_vops = {
11921864bc5SMatthew Dillon 	.vop_default =		vop_defaultop,
12021864bc5SMatthew Dillon 	.vop_access =		devfs_access,
12121864bc5SMatthew Dillon 	.vop_advlock =		DEVFS_BADOP,
12221864bc5SMatthew Dillon 	.vop_bmap =			DEVFS_BADOP,
12321864bc5SMatthew Dillon 	.vop_close =		vop_stdclose,
12421864bc5SMatthew Dillon 	.vop_getattr =		devfs_getattr,
12521864bc5SMatthew Dillon 	.vop_inactive =		devfs_inactive,
12621864bc5SMatthew Dillon 	.vop_ncreate =		DEVFS_BADOP,
12721864bc5SMatthew Dillon 	.vop_nresolve =		devfs_nresolve,
12821864bc5SMatthew Dillon 	.vop_nlookupdotdot =	devfs_nlookupdotdot,
12921864bc5SMatthew Dillon 	.vop_nlink =		DEVFS_BADOP,
13021864bc5SMatthew Dillon 	.vop_nmkdir =		DEVFS_BADOP,
13121864bc5SMatthew Dillon 	.vop_nmknod =		DEVFS_BADOP,
13221864bc5SMatthew Dillon 	.vop_nremove =		devfs_nremove,
13321864bc5SMatthew Dillon 	.vop_nrename =		DEVFS_BADOP,
13421864bc5SMatthew Dillon 	.vop_nrmdir =		DEVFS_BADOP,
13521864bc5SMatthew Dillon 	.vop_nsymlink =		devfs_nsymlink,
13621864bc5SMatthew Dillon 	.vop_open =			vop_stdopen,
13721864bc5SMatthew Dillon 	.vop_pathconf =		vop_stdpathconf,
13821864bc5SMatthew Dillon 	.vop_print =		devfs_print,
13921864bc5SMatthew Dillon 	.vop_read =			DEVFS_BADOP,
14021864bc5SMatthew Dillon 	.vop_readdir =		devfs_readdir,
14121864bc5SMatthew Dillon 	.vop_readlink =		devfs_readlink,
14221864bc5SMatthew Dillon 	.vop_reclaim =		devfs_reclaim,
14321864bc5SMatthew Dillon 	.vop_setattr =		devfs_setattr,
14421864bc5SMatthew Dillon 	.vop_write =		DEVFS_BADOP,
14521864bc5SMatthew Dillon 	.vop_ioctl =		DEVFS_BADOP
14621864bc5SMatthew Dillon };
14721864bc5SMatthew Dillon 
14821864bc5SMatthew Dillon /*
14921864bc5SMatthew Dillon  * devfs vnode operations for character devices
15021864bc5SMatthew Dillon  */
15121864bc5SMatthew Dillon struct vop_ops devfs_vnode_dev_vops = {
15221864bc5SMatthew Dillon 	.vop_default =		vop_defaultop,
15321864bc5SMatthew Dillon 	.vop_access =		devfs_access,
15421864bc5SMatthew Dillon 	.vop_advlock =		devfs_spec_advlock,
15521864bc5SMatthew Dillon 	.vop_bmap =			devfs_spec_bmap,
15621864bc5SMatthew Dillon 	.vop_close =		devfs_spec_close,
15721864bc5SMatthew Dillon 	.vop_freeblks =		devfs_spec_freeblks,
15821864bc5SMatthew Dillon 	.vop_fsync =		devfs_spec_fsync,
15921864bc5SMatthew Dillon 	.vop_getattr =		devfs_getattr,
16021864bc5SMatthew Dillon 	.vop_getpages =		devfs_spec_getpages,
16121864bc5SMatthew Dillon 	.vop_inactive =		devfs_inactive,
16221864bc5SMatthew Dillon 	.vop_open =			devfs_spec_open,
16321864bc5SMatthew Dillon 	.vop_pathconf =		vop_stdpathconf,
16421864bc5SMatthew Dillon 	.vop_print =		devfs_print,
16521864bc5SMatthew Dillon 	.vop_poll =			devfs_spec_poll,
16621864bc5SMatthew Dillon 	.vop_kqfilter =		devfs_spec_kqfilter,
16721864bc5SMatthew Dillon 	.vop_read =			devfs_spec_read,
16821864bc5SMatthew Dillon 	.vop_readdir =		DEVFS_BADOP,
16921864bc5SMatthew Dillon 	.vop_readlink =		DEVFS_BADOP,
17021864bc5SMatthew Dillon 	.vop_reclaim =		devfs_reclaim,
17121864bc5SMatthew Dillon 	.vop_setattr =		devfs_setattr,
17221864bc5SMatthew Dillon 	.vop_strategy =		devfs_spec_strategy,
17321864bc5SMatthew Dillon 	.vop_write =		devfs_spec_write,
17421864bc5SMatthew Dillon 	.vop_ioctl =		devfs_spec_ioctl
17521864bc5SMatthew Dillon };
17621864bc5SMatthew Dillon 
17721864bc5SMatthew Dillon struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops;
17821864bc5SMatthew Dillon 
17921864bc5SMatthew Dillon struct fileops devfs_dev_fileops = {
18021864bc5SMatthew Dillon 	.fo_read = devfs_specf_read,
18121864bc5SMatthew Dillon 	.fo_write = devfs_specf_write,
18221864bc5SMatthew Dillon 	.fo_ioctl = devfs_specf_ioctl,
18321864bc5SMatthew Dillon 	.fo_poll = devfs_specf_poll,
18421864bc5SMatthew Dillon 	.fo_kqfilter = devfs_specf_kqfilter,
18521864bc5SMatthew Dillon 	.fo_stat = devfs_specf_stat,
18621864bc5SMatthew Dillon 	.fo_close = devfs_specf_close,
18721864bc5SMatthew Dillon 	.fo_shutdown = nofo_shutdown
18821864bc5SMatthew Dillon };
18921864bc5SMatthew Dillon 
1904062d050SMatthew Dillon /*
1914062d050SMatthew Dillon  * These two functions are possibly temporary hacks for
1924062d050SMatthew Dillon  * devices (aka the pty code) which want to control the
1934062d050SMatthew Dillon  * node attributes themselves.
1944062d050SMatthew Dillon  *
1954062d050SMatthew Dillon  * XXX we may ultimately desire to simply remove the uid/gid/mode
1964062d050SMatthew Dillon  * from the node entirely.
1974062d050SMatthew Dillon  */
1984062d050SMatthew Dillon static __inline void
1994062d050SMatthew Dillon node_sync_dev_get(struct devfs_node *node)
2004062d050SMatthew Dillon {
2014062d050SMatthew Dillon 	cdev_t dev;
2024062d050SMatthew Dillon 
2034062d050SMatthew Dillon 	if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) {
2044062d050SMatthew Dillon 		node->uid = dev->si_uid;
2054062d050SMatthew Dillon 		node->gid = dev->si_gid;
2064062d050SMatthew Dillon 		node->mode = dev->si_perms;
2074062d050SMatthew Dillon 	}
2084062d050SMatthew Dillon }
2094062d050SMatthew Dillon 
2104062d050SMatthew Dillon static __inline void
2114062d050SMatthew Dillon node_sync_dev_set(struct devfs_node *node)
2124062d050SMatthew Dillon {
2134062d050SMatthew Dillon 	cdev_t dev;
2144062d050SMatthew Dillon 
2154062d050SMatthew Dillon 	if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) {
2164062d050SMatthew Dillon 		dev->si_uid = node->uid;
2174062d050SMatthew Dillon 		dev->si_gid = node->gid;
2184062d050SMatthew Dillon 		dev->si_perms = node->mode;
2194062d050SMatthew Dillon 	}
2204062d050SMatthew Dillon }
22121864bc5SMatthew Dillon 
22221864bc5SMatthew Dillon /*
22321864bc5SMatthew Dillon  * generic entry point for unsupported operations
22421864bc5SMatthew Dillon  */
22521864bc5SMatthew Dillon static int
22621864bc5SMatthew Dillon devfs_badop(struct vop_generic_args *ap)
22721864bc5SMatthew Dillon {
22821864bc5SMatthew Dillon 	return (EIO);
22921864bc5SMatthew Dillon }
23021864bc5SMatthew Dillon 
23121864bc5SMatthew Dillon 
23221864bc5SMatthew Dillon static int
23321864bc5SMatthew Dillon devfs_access(struct vop_access_args *ap)
23421864bc5SMatthew Dillon {
23521864bc5SMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
236898c91eeSMatthew Dillon 	int error;
23721864bc5SMatthew Dillon 
238894bbb25SAlex Hornung 	if (!devfs_node_is_accessible(node))
239894bbb25SAlex Hornung 		return ENOENT;
2404062d050SMatthew Dillon 	node_sync_dev_get(node);
24121864bc5SMatthew Dillon 	error = vop_helper_access(ap, node->uid, node->gid,
24221864bc5SMatthew Dillon 				  node->mode, node->flags);
24321864bc5SMatthew Dillon 
24421864bc5SMatthew Dillon 	return error;
24521864bc5SMatthew Dillon }
24621864bc5SMatthew Dillon 
24721864bc5SMatthew Dillon 
24821864bc5SMatthew Dillon static int
24921864bc5SMatthew Dillon devfs_inactive(struct vop_inactive_args *ap)
25021864bc5SMatthew Dillon {
251ca8d7677SMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
25221864bc5SMatthew Dillon 
253ca8d7677SMatthew Dillon 	if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0)
25421864bc5SMatthew Dillon 		vrecycle(ap->a_vp);
25521864bc5SMatthew Dillon 	return 0;
25621864bc5SMatthew Dillon }
25721864bc5SMatthew Dillon 
25821864bc5SMatthew Dillon 
25921864bc5SMatthew Dillon static int
26021864bc5SMatthew Dillon devfs_reclaim(struct vop_reclaim_args *ap)
26121864bc5SMatthew Dillon {
262be6f2e86SMatthew Dillon 	struct devfs_node *node;
263be6f2e86SMatthew Dillon 	struct vnode *vp;
264be6f2e86SMatthew Dillon 	int locked;
265be6f2e86SMatthew Dillon 
266be6f2e86SMatthew Dillon 	/*
267be6f2e86SMatthew Dillon 	 * Check if it is locked already. if not, we acquire the devfs lock
268be6f2e86SMatthew Dillon 	 */
26921864bc5SMatthew Dillon 	if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
27021864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_EXCLUSIVE);
27121864bc5SMatthew Dillon 		locked = 1;
272be6f2e86SMatthew Dillon 	} else {
273be6f2e86SMatthew Dillon 		locked = 0;
27421864bc5SMatthew Dillon 	}
27521864bc5SMatthew Dillon 
276be6f2e86SMatthew Dillon 	/*
277be6f2e86SMatthew Dillon 	 * Get rid of the devfs_node if it is no longer linked into the
278be6f2e86SMatthew Dillon 	 * topology.
279be6f2e86SMatthew Dillon 	 */
280be6f2e86SMatthew Dillon 	vp = ap->a_vp;
281be6f2e86SMatthew Dillon 	if ((node = DEVFS_NODE(vp)) != NULL) {
282be6f2e86SMatthew Dillon 		node->v_node = NULL;
2834062d050SMatthew Dillon 		if ((node->flags & DEVFS_NODE_LINKED) == 0)
2844062d050SMatthew Dillon 			devfs_freep(node);
28521864bc5SMatthew Dillon 	}
28621864bc5SMatthew Dillon 
28721864bc5SMatthew Dillon 	if (locked)
28821864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_RELEASE);
28921864bc5SMatthew Dillon 
290be6f2e86SMatthew Dillon 	/*
2919b823501SAlex Hornung 	 * v_rdev needs to be properly released using v_release_rdev
2929b823501SAlex Hornung 	 * Make sure v_data is NULL as well.
293be6f2e86SMatthew Dillon 	 */
294be6f2e86SMatthew Dillon 	vp->v_data = NULL;
2959b823501SAlex Hornung 	v_release_rdev(vp);
29621864bc5SMatthew Dillon 	return 0;
29721864bc5SMatthew Dillon }
29821864bc5SMatthew Dillon 
29921864bc5SMatthew Dillon 
30021864bc5SMatthew Dillon static int
30121864bc5SMatthew Dillon devfs_readdir(struct vop_readdir_args *ap)
30221864bc5SMatthew Dillon {
303898c91eeSMatthew Dillon 	struct devfs_node *dnode = DEVFS_NODE(ap->a_vp);
30421864bc5SMatthew Dillon 	struct devfs_node *node;
30521864bc5SMatthew Dillon 	int cookie_index;
30621864bc5SMatthew Dillon 	int ncookies;
307898c91eeSMatthew Dillon 	int error2;
308898c91eeSMatthew Dillon 	int error;
309898c91eeSMatthew Dillon 	int r;
31021864bc5SMatthew Dillon 	off_t *cookies;
31121864bc5SMatthew Dillon 	off_t saveoff;
31221864bc5SMatthew Dillon 
31321864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n");
31421864bc5SMatthew Dillon 
31521864bc5SMatthew Dillon 	if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX)
31621864bc5SMatthew Dillon 		return (EINVAL);
31721864bc5SMatthew Dillon 	if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
31821864bc5SMatthew Dillon 		return (error);
31921864bc5SMatthew Dillon 
320*c512ab96SMatthew Dillon 	if (!devfs_node_is_accessible(dnode)) {
321*c512ab96SMatthew Dillon 		vn_unlock(ap->a_vp);
322ca8d7677SMatthew Dillon 		return ENOENT;
323*c512ab96SMatthew Dillon 	}
324ca8d7677SMatthew Dillon 
325ca8d7677SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
326ca8d7677SMatthew Dillon 
32721864bc5SMatthew Dillon 	saveoff = ap->a_uio->uio_offset;
32821864bc5SMatthew Dillon 
32921864bc5SMatthew Dillon 	if (ap->a_ncookies) {
33021864bc5SMatthew Dillon 		ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */
33121864bc5SMatthew Dillon 		if (ncookies > 256)
33221864bc5SMatthew Dillon 			ncookies = 256;
33321864bc5SMatthew Dillon 		cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK);
33421864bc5SMatthew Dillon 		cookie_index = 0;
33521864bc5SMatthew Dillon 	} else {
33621864bc5SMatthew Dillon 		ncookies = -1;
33721864bc5SMatthew Dillon 		cookies = NULL;
33821864bc5SMatthew Dillon 		cookie_index = 0;
33921864bc5SMatthew Dillon 	}
34021864bc5SMatthew Dillon 
341898c91eeSMatthew Dillon 	nanotime(&dnode->atime);
34221864bc5SMatthew Dillon 
34321864bc5SMatthew Dillon 	if (saveoff == 0) {
344898c91eeSMatthew Dillon 		r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino,
345898c91eeSMatthew Dillon 				     DT_DIR, 1, ".");
34621864bc5SMatthew Dillon 		if (r)
34721864bc5SMatthew Dillon 			goto done;
34821864bc5SMatthew Dillon 		if (cookies)
34921864bc5SMatthew Dillon 			cookies[cookie_index] = saveoff;
35021864bc5SMatthew Dillon 		saveoff++;
35121864bc5SMatthew Dillon 		cookie_index++;
35221864bc5SMatthew Dillon 		if (cookie_index == ncookies)
35321864bc5SMatthew Dillon 			goto done;
35421864bc5SMatthew Dillon 	}
35521864bc5SMatthew Dillon 
35621864bc5SMatthew Dillon 	if (saveoff == 1) {
357898c91eeSMatthew Dillon 		if (dnode->parent) {
35821864bc5SMatthew Dillon 			r = vop_write_dirent(&error, ap->a_uio,
359898c91eeSMatthew Dillon 					     dnode->parent->d_dir.d_ino,
36021864bc5SMatthew Dillon 					     DT_DIR, 2, "..");
36121864bc5SMatthew Dillon 		} else {
36221864bc5SMatthew Dillon 			r = vop_write_dirent(&error, ap->a_uio,
363898c91eeSMatthew Dillon 					     dnode->d_dir.d_ino,
364898c91eeSMatthew Dillon 					     DT_DIR, 2, "..");
36521864bc5SMatthew Dillon 		}
36621864bc5SMatthew Dillon 		if (r)
36721864bc5SMatthew Dillon 			goto done;
36821864bc5SMatthew Dillon 		if (cookies)
36921864bc5SMatthew Dillon 			cookies[cookie_index] = saveoff;
37021864bc5SMatthew Dillon 		saveoff++;
37121864bc5SMatthew Dillon 		cookie_index++;
37221864bc5SMatthew Dillon 		if (cookie_index == ncookies)
37321864bc5SMatthew Dillon 			goto done;
37421864bc5SMatthew Dillon 	}
37521864bc5SMatthew Dillon 
376898c91eeSMatthew Dillon 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
377898c91eeSMatthew Dillon 		if ((node->flags & DEVFS_HIDDEN) ||
378898c91eeSMatthew Dillon 		    (node->flags & DEVFS_INVISIBLE)) {
37921864bc5SMatthew Dillon 			continue;
380898c91eeSMatthew Dillon 		}
38121864bc5SMatthew Dillon 
382f7e8960cSAlex Hornung 		/*
383f7e8960cSAlex Hornung 		 * If the node type is a valid devfs alias, then we make sure that the
384f7e8960cSAlex Hornung 		 * target isn't hidden. If it is, we don't show the link in the
385f7e8960cSAlex Hornung 		 * directory listing.
386f7e8960cSAlex Hornung 		 */
387f7e8960cSAlex Hornung 		if ((node->node_type == Plink) && (node->link_target != NULL) &&
388f7e8960cSAlex Hornung 			(node->link_target->flags & DEVFS_HIDDEN))
389f7e8960cSAlex Hornung 			continue;
390f7e8960cSAlex Hornung 
39121864bc5SMatthew Dillon 		if (node->cookie < saveoff)
39221864bc5SMatthew Dillon 			continue;
393f7e8960cSAlex Hornung 
39421864bc5SMatthew Dillon 		saveoff = node->cookie;
39521864bc5SMatthew Dillon 
396898c91eeSMatthew Dillon 		error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino,
397898c91eeSMatthew Dillon 					  node->d_dir.d_type,
398898c91eeSMatthew Dillon 					  node->d_dir.d_namlen,
399898c91eeSMatthew Dillon 					  node->d_dir.d_name);
40021864bc5SMatthew Dillon 
40121864bc5SMatthew Dillon 		if (error2)
40221864bc5SMatthew Dillon 			break;
40321864bc5SMatthew Dillon 
40421864bc5SMatthew Dillon 		saveoff++;
40521864bc5SMatthew Dillon 
40621864bc5SMatthew Dillon 		if (cookies)
40721864bc5SMatthew Dillon 			cookies[cookie_index] = node->cookie;
40821864bc5SMatthew Dillon 		++cookie_index;
40921864bc5SMatthew Dillon 		if (cookie_index == ncookies)
41021864bc5SMatthew Dillon 			break;
41121864bc5SMatthew Dillon 	}
41221864bc5SMatthew Dillon 
41321864bc5SMatthew Dillon done:
414ca8d7677SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
41521864bc5SMatthew Dillon 	vn_unlock(ap->a_vp);
41621864bc5SMatthew Dillon 
41721864bc5SMatthew Dillon 	ap->a_uio->uio_offset = saveoff;
41821864bc5SMatthew Dillon 	if (error && cookie_index == 0) {
41921864bc5SMatthew Dillon 		if (cookies) {
42021864bc5SMatthew Dillon 			kfree(cookies, M_TEMP);
42121864bc5SMatthew Dillon 			*ap->a_ncookies = 0;
42221864bc5SMatthew Dillon 			*ap->a_cookies = NULL;
42321864bc5SMatthew Dillon 		}
42421864bc5SMatthew Dillon 	} else {
42521864bc5SMatthew Dillon 		if (cookies) {
42621864bc5SMatthew Dillon 			*ap->a_ncookies = cookie_index;
42721864bc5SMatthew Dillon 			*ap->a_cookies = cookies;
42821864bc5SMatthew Dillon 		}
42921864bc5SMatthew Dillon 	}
43021864bc5SMatthew Dillon 	return (error);
43121864bc5SMatthew Dillon }
43221864bc5SMatthew Dillon 
43321864bc5SMatthew Dillon 
43421864bc5SMatthew Dillon static int
43521864bc5SMatthew Dillon devfs_nresolve(struct vop_nresolve_args *ap)
43621864bc5SMatthew Dillon {
437898c91eeSMatthew Dillon 	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
43821864bc5SMatthew Dillon 	struct devfs_node *node, *found = NULL;
43921864bc5SMatthew Dillon 	struct namecache *ncp;
44021864bc5SMatthew Dillon 	struct vnode *vp = NULL;
44121864bc5SMatthew Dillon 	int error = 0;
44221864bc5SMatthew Dillon 	int len;
44321864bc5SMatthew Dillon 	int hidden = 0;
444260e4e8bSAlex Hornung 	int depth;
44521864bc5SMatthew Dillon 
44621864bc5SMatthew Dillon 	ncp = ap->a_nch->ncp;
44721864bc5SMatthew Dillon 	len = ncp->nc_nlen;
44821864bc5SMatthew Dillon 
449898c91eeSMatthew Dillon 	if (!devfs_node_is_accessible(dnode))
450ca8d7677SMatthew Dillon 		return ENOENT;
451ca8d7677SMatthew Dillon 
45221864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
45321864bc5SMatthew Dillon 
454898c91eeSMatthew Dillon 	if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) {
455e23485a5SMatthew Dillon 		error = ENOENT;
45621864bc5SMatthew Dillon 		cache_setvp(ap->a_nch, NULL);
45721864bc5SMatthew Dillon 		goto out;
45821864bc5SMatthew Dillon 	}
45921864bc5SMatthew Dillon 
460898c91eeSMatthew Dillon 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
46121864bc5SMatthew Dillon 		if (len == node->d_dir.d_namlen) {
46221864bc5SMatthew Dillon 			if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) {
46321864bc5SMatthew Dillon 				found = node;
46421864bc5SMatthew Dillon 				break;
46521864bc5SMatthew Dillon 			}
46621864bc5SMatthew Dillon 		}
46721864bc5SMatthew Dillon 	}
46821864bc5SMatthew Dillon 
46921864bc5SMatthew Dillon 	if (found) {
470260e4e8bSAlex Hornung 		depth = 0;
471260e4e8bSAlex Hornung 		while ((found->node_type == Plink) && (found->link_target)) {
472260e4e8bSAlex Hornung 			if (depth >= 8) {
473260e4e8bSAlex Hornung 				devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8");
474260e4e8bSAlex Hornung 				break;
475260e4e8bSAlex Hornung 			}
476260e4e8bSAlex Hornung 
47721864bc5SMatthew Dillon 			found = found->link_target;
478260e4e8bSAlex Hornung 			++depth;
479260e4e8bSAlex Hornung 		}
48021864bc5SMatthew Dillon 
48121864bc5SMatthew Dillon 		if (!(found->flags & DEVFS_HIDDEN))
48221864bc5SMatthew Dillon 			devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found);
48321864bc5SMatthew Dillon 		else
48421864bc5SMatthew Dillon 			hidden = 1;
48521864bc5SMatthew Dillon 	}
48621864bc5SMatthew Dillon 
48721864bc5SMatthew Dillon 	if (vp == NULL) {
48821864bc5SMatthew Dillon 		error = ENOENT;
48921864bc5SMatthew Dillon 		cache_setvp(ap->a_nch, NULL);
49021864bc5SMatthew Dillon 		goto out;
49121864bc5SMatthew Dillon 
49221864bc5SMatthew Dillon 	}
49321864bc5SMatthew Dillon 	KKASSERT(vp);
49421864bc5SMatthew Dillon 	vn_unlock(vp);
49521864bc5SMatthew Dillon 	cache_setvp(ap->a_nch, vp);
49621864bc5SMatthew Dillon 	vrele(vp);
49721864bc5SMatthew Dillon out:
49821864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
499898c91eeSMatthew Dillon 
50021864bc5SMatthew Dillon 	return error;
50121864bc5SMatthew Dillon }
50221864bc5SMatthew Dillon 
50321864bc5SMatthew Dillon 
50421864bc5SMatthew Dillon static int
50521864bc5SMatthew Dillon devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
50621864bc5SMatthew Dillon {
507898c91eeSMatthew Dillon 	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
50821864bc5SMatthew Dillon 
509898c91eeSMatthew Dillon 	*ap->a_vpp = NULL;
510898c91eeSMatthew Dillon 	if (!devfs_node_is_accessible(dnode))
511894bbb25SAlex Hornung 		return ENOENT;
512894bbb25SAlex Hornung 
51321864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
514898c91eeSMatthew Dillon 	if (dnode->parent != NULL) {
515898c91eeSMatthew Dillon 		devfs_allocv(ap->a_vpp, dnode->parent);
51621864bc5SMatthew Dillon 		vn_unlock(*ap->a_vpp);
51721864bc5SMatthew Dillon 	}
51821864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
51921864bc5SMatthew Dillon 
52021864bc5SMatthew Dillon 	return ((*ap->a_vpp == NULL) ? ENOENT : 0);
52121864bc5SMatthew Dillon }
52221864bc5SMatthew Dillon 
52321864bc5SMatthew Dillon 
52421864bc5SMatthew Dillon static int
52521864bc5SMatthew Dillon devfs_getattr(struct vop_getattr_args *ap)
52621864bc5SMatthew Dillon {
52721864bc5SMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
528898c91eeSMatthew Dillon 	struct vattr *vap = ap->a_vap;
52921864bc5SMatthew Dillon 	int error = 0;
53021864bc5SMatthew Dillon 
531952f0188SAlex Hornung #if 0
532894bbb25SAlex Hornung 	if (!devfs_node_is_accessible(node))
533ca8d7677SMatthew Dillon 		return ENOENT;
534952f0188SAlex Hornung #endif
5354062d050SMatthew Dillon 	node_sync_dev_get(node);
536ca8d7677SMatthew Dillon 
537ca8d7677SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
53821864bc5SMatthew Dillon 
53921864bc5SMatthew Dillon 	/* start by zeroing out the attributes */
54021864bc5SMatthew Dillon 	VATTR_NULL(vap);
54121864bc5SMatthew Dillon 
54221864bc5SMatthew Dillon 	/* next do all the common fields */
54321864bc5SMatthew Dillon 	vap->va_type = ap->a_vp->v_type;
54421864bc5SMatthew Dillon 	vap->va_mode = node->mode;
54521864bc5SMatthew Dillon 	vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
546894bbb25SAlex Hornung 	vap->va_flags = 0; /* XXX: what should this be? */
54721864bc5SMatthew Dillon 	vap->va_blocksize = DEV_BSIZE;
54821864bc5SMatthew Dillon 	vap->va_bytes = vap->va_size = sizeof(struct devfs_node);
54921864bc5SMatthew Dillon 
55021864bc5SMatthew Dillon 	vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
55121864bc5SMatthew Dillon 
55221864bc5SMatthew Dillon 	vap->va_atime = node->atime;
55321864bc5SMatthew Dillon 	vap->va_mtime = node->mtime;
55421864bc5SMatthew Dillon 	vap->va_ctime = node->ctime;
55521864bc5SMatthew Dillon 
55621864bc5SMatthew Dillon 	vap->va_nlink = 1; /* number of references to file */
55721864bc5SMatthew Dillon 
55821864bc5SMatthew Dillon 	vap->va_uid = node->uid;
55921864bc5SMatthew Dillon 	vap->va_gid = node->gid;
56021864bc5SMatthew Dillon 
56121864bc5SMatthew Dillon 	vap->va_rmajor = 0;
56221864bc5SMatthew Dillon 	vap->va_rminor = 0;
56321864bc5SMatthew Dillon 
564898c91eeSMatthew Dillon 	if ((node->node_type == Pdev) && node->d_dev)  {
565898c91eeSMatthew Dillon 		reference_dev(node->d_dev);
566898c91eeSMatthew Dillon 		vap->va_rminor = node->d_dev->si_uminor;
567898c91eeSMatthew Dillon 		release_dev(node->d_dev);
56821864bc5SMatthew Dillon 	}
56921864bc5SMatthew Dillon 
57021864bc5SMatthew Dillon 	/* For a softlink the va_size is the length of the softlink */
571898c91eeSMatthew Dillon 	if (node->symlink_name != 0) {
572898c91eeSMatthew Dillon 		vap->va_size = node->symlink_namelen;
57321864bc5SMatthew Dillon 	}
574ca8d7677SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
575898c91eeSMatthew Dillon 
576894bbb25SAlex Hornung 	return (error);
57721864bc5SMatthew Dillon }
57821864bc5SMatthew Dillon 
57921864bc5SMatthew Dillon 
58021864bc5SMatthew Dillon static int
58121864bc5SMatthew Dillon devfs_setattr(struct vop_setattr_args *ap)
58221864bc5SMatthew Dillon {
583898c91eeSMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
58421864bc5SMatthew Dillon 	struct vattr *vap;
58521864bc5SMatthew Dillon 	int error = 0;
58621864bc5SMatthew Dillon 
587894bbb25SAlex Hornung 	if (!devfs_node_is_accessible(node))
588ca8d7677SMatthew Dillon 		return ENOENT;
5894062d050SMatthew Dillon 	node_sync_dev_get(node);
59021864bc5SMatthew Dillon 
59121864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
59221864bc5SMatthew Dillon 
59321864bc5SMatthew Dillon 	vap = ap->a_vap;
59421864bc5SMatthew Dillon 
59521864bc5SMatthew Dillon 	if (vap->va_uid != (uid_t)VNOVAL) {
59621864bc5SMatthew Dillon 		if ((ap->a_cred->cr_uid != node->uid) &&
59721864bc5SMatthew Dillon 		    (!groupmember(node->gid, ap->a_cred))) {
59821864bc5SMatthew Dillon 			error = priv_check(curthread, PRIV_VFS_CHOWN);
599898c91eeSMatthew Dillon 			if (error)
60021864bc5SMatthew Dillon 				goto out;
60121864bc5SMatthew Dillon 		}
60221864bc5SMatthew Dillon 		node->uid = vap->va_uid;
60321864bc5SMatthew Dillon 	}
60421864bc5SMatthew Dillon 
60521864bc5SMatthew Dillon 	if (vap->va_gid != (uid_t)VNOVAL) {
60621864bc5SMatthew Dillon 		if ((ap->a_cred->cr_uid != node->uid) &&
60721864bc5SMatthew Dillon 		    (!groupmember(node->gid, ap->a_cred))) {
60821864bc5SMatthew Dillon 			error = priv_check(curthread, PRIV_VFS_CHOWN);
609898c91eeSMatthew Dillon 			if (error)
61021864bc5SMatthew Dillon 				goto out;
61121864bc5SMatthew Dillon 		}
61221864bc5SMatthew Dillon 		node->gid = vap->va_gid;
61321864bc5SMatthew Dillon 	}
61421864bc5SMatthew Dillon 
61521864bc5SMatthew Dillon 	if (vap->va_mode != (mode_t)VNOVAL) {
61621864bc5SMatthew Dillon 		if (ap->a_cred->cr_uid != node->uid) {
61721864bc5SMatthew Dillon 			error = priv_check(curthread, PRIV_VFS_ADMIN);
618898c91eeSMatthew Dillon 			if (error)
61921864bc5SMatthew Dillon 				goto out;
62021864bc5SMatthew Dillon 		}
62121864bc5SMatthew Dillon 		node->mode = vap->va_mode;
62221864bc5SMatthew Dillon 	}
62321864bc5SMatthew Dillon 
62421864bc5SMatthew Dillon out:
6254062d050SMatthew Dillon 	node_sync_dev_set(node);
62607dfa375SAlex Hornung 	nanotime(&node->ctime);
62721864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
628898c91eeSMatthew Dillon 
62921864bc5SMatthew Dillon 	return error;
63021864bc5SMatthew Dillon }
63121864bc5SMatthew Dillon 
63221864bc5SMatthew Dillon 
63321864bc5SMatthew Dillon static int
63421864bc5SMatthew Dillon devfs_readlink(struct vop_readlink_args *ap)
63521864bc5SMatthew Dillon {
63621864bc5SMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
637ca8d7677SMatthew Dillon 	int ret;
638ca8d7677SMatthew Dillon 
639894bbb25SAlex Hornung 	if (!devfs_node_is_accessible(node))
640ca8d7677SMatthew Dillon 		return ENOENT;
64121864bc5SMatthew Dillon 
642ca8d7677SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
643ca8d7677SMatthew Dillon 	ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio);
644ca8d7677SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
645ca8d7677SMatthew Dillon 
646ca8d7677SMatthew Dillon 	return ret;
64721864bc5SMatthew Dillon }
64821864bc5SMatthew Dillon 
64921864bc5SMatthew Dillon 
65021864bc5SMatthew Dillon static int
65121864bc5SMatthew Dillon devfs_print(struct vop_print_args *ap)
65221864bc5SMatthew Dillon {
65321864bc5SMatthew Dillon 	return (0);
65421864bc5SMatthew Dillon }
65521864bc5SMatthew Dillon 
65621864bc5SMatthew Dillon 
65721864bc5SMatthew Dillon static int
65821864bc5SMatthew Dillon devfs_nsymlink(struct vop_nsymlink_args *ap)
65921864bc5SMatthew Dillon {
660898c91eeSMatthew Dillon 	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
661898c91eeSMatthew Dillon 	struct devfs_node *node;
662898c91eeSMatthew Dillon 	size_t targetlen;
66321864bc5SMatthew Dillon 
664898c91eeSMatthew Dillon 	if (!devfs_node_is_accessible(dnode))
665ca8d7677SMatthew Dillon 		return ENOENT;
666ca8d7677SMatthew Dillon 
667894bbb25SAlex Hornung 	ap->a_vap->va_type = VLNK;
668894bbb25SAlex Hornung 
669898c91eeSMatthew Dillon 	if ((dnode->node_type != Proot) && (dnode->node_type != Pdir))
67021864bc5SMatthew Dillon 		goto out;
671898c91eeSMatthew Dillon 
67221864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
67321864bc5SMatthew Dillon 	devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink,
674898c91eeSMatthew Dillon 		      ap->a_nch->ncp->nc_name, dnode, NULL);
67521864bc5SMatthew Dillon 
676898c91eeSMatthew Dillon 	targetlen = strlen(ap->a_target);
67721864bc5SMatthew Dillon 	if (*ap->a_vpp) {
678898c91eeSMatthew Dillon 		node = DEVFS_NODE(*ap->a_vpp);
679898c91eeSMatthew Dillon 		node->flags |= DEVFS_USER_CREATED;
680898c91eeSMatthew Dillon 		node->symlink_namelen = targetlen;
681898c91eeSMatthew Dillon 		node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK);
682898c91eeSMatthew Dillon 		memcpy(node->symlink_name, ap->a_target, targetlen);
683898c91eeSMatthew Dillon 		node->symlink_name[targetlen] = '\0';
68421864bc5SMatthew Dillon 		cache_setunresolved(ap->a_nch);
68521864bc5SMatthew Dillon 		cache_setvp(ap->a_nch, *ap->a_vpp);
68621864bc5SMatthew Dillon 	}
68721864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
68821864bc5SMatthew Dillon out:
68921864bc5SMatthew Dillon 	return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
69021864bc5SMatthew Dillon }
69121864bc5SMatthew Dillon 
69221864bc5SMatthew Dillon 
69321864bc5SMatthew Dillon static int
69421864bc5SMatthew Dillon devfs_nremove(struct vop_nremove_args *ap)
69521864bc5SMatthew Dillon {
696898c91eeSMatthew Dillon 	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
69721864bc5SMatthew Dillon 	struct devfs_node *node;
69821864bc5SMatthew Dillon 	struct namecache *ncp;
69921864bc5SMatthew Dillon 	int error = ENOENT;
70021864bc5SMatthew Dillon 
70121864bc5SMatthew Dillon 	ncp = ap->a_nch->ncp;
70221864bc5SMatthew Dillon 
703898c91eeSMatthew Dillon 	if (!devfs_node_is_accessible(dnode))
704ca8d7677SMatthew Dillon 		return ENOENT;
705ca8d7677SMatthew Dillon 
70621864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
70721864bc5SMatthew Dillon 
708898c91eeSMatthew Dillon 	if ((dnode->node_type != Proot) && (dnode->node_type != Pdir))
70921864bc5SMatthew Dillon 		goto out;
71021864bc5SMatthew Dillon 
711898c91eeSMatthew Dillon 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
712898c91eeSMatthew Dillon 		if (ncp->nc_nlen != node->d_dir.d_namlen)
713898c91eeSMatthew Dillon 			continue;
714898c91eeSMatthew Dillon 		if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen))
715898c91eeSMatthew Dillon 			continue;
716898c91eeSMatthew Dillon 
717898c91eeSMatthew Dillon 		/*
718898c91eeSMatthew Dillon 		 * only allow removal of user created stuff (e.g. symlinks)
719898c91eeSMatthew Dillon 		 */
72021864bc5SMatthew Dillon 		if ((node->flags & DEVFS_USER_CREATED) == 0) {
72121864bc5SMatthew Dillon 			error = EPERM;
72221864bc5SMatthew Dillon 			goto out;
72321864bc5SMatthew Dillon 		} else {
72421864bc5SMatthew Dillon 			if (node->v_node)
72521864bc5SMatthew Dillon 				cache_inval_vp(node->v_node, CINV_DESTROY);
72621864bc5SMatthew Dillon 			devfs_unlinkp(node);
72721864bc5SMatthew Dillon 			error = 0;
72821864bc5SMatthew Dillon 			break;
72921864bc5SMatthew Dillon 		}
73021864bc5SMatthew Dillon 	}
73121864bc5SMatthew Dillon 
73221864bc5SMatthew Dillon 	cache_setunresolved(ap->a_nch);
73321864bc5SMatthew Dillon 	cache_setvp(ap->a_nch, NULL);
73421864bc5SMatthew Dillon 
73521864bc5SMatthew Dillon out:
73621864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
73721864bc5SMatthew Dillon 	return error;
73821864bc5SMatthew Dillon }
73921864bc5SMatthew Dillon 
74021864bc5SMatthew Dillon 
74121864bc5SMatthew Dillon static int
74221864bc5SMatthew Dillon devfs_spec_open(struct vop_open_args *ap)
74321864bc5SMatthew Dillon {
74421864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
745ca8d7677SMatthew Dillon 	struct vnode *orig_vp = NULL;
746898c91eeSMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(vp);
747898c91eeSMatthew Dillon 	struct devfs_node *newnode;
74821864bc5SMatthew Dillon 	cdev_t dev, ndev = NULL;
74921864bc5SMatthew Dillon 	int error = 0;
75021864bc5SMatthew Dillon 
751898c91eeSMatthew Dillon 	if (node) {
752898c91eeSMatthew Dillon 		if (node->d_dev == NULL)
75321864bc5SMatthew Dillon 			return ENXIO;
754898c91eeSMatthew Dillon 		if (!devfs_node_is_accessible(node))
755894bbb25SAlex Hornung 			return ENOENT;
75621864bc5SMatthew Dillon 	}
75721864bc5SMatthew Dillon 
75821864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
75921864bc5SMatthew Dillon 		return ENXIO;
76021864bc5SMatthew Dillon 
761898c91eeSMatthew Dillon 	if (node && ap->a_fp) {
76221864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n");
76321864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_EXCLUSIVE);
76407dfa375SAlex Hornung 
76507dfa375SAlex Hornung 		ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen,
76607dfa375SAlex Hornung 						ap->a_mode, ap->a_cred);
76707dfa375SAlex Hornung 		if (ndev != NULL) {
768898c91eeSMatthew Dillon 			newnode = devfs_create_device_node(
769898c91eeSMatthew Dillon 					DEVFS_MNTDATA(vp->v_mount)->root_node,
77007dfa375SAlex Hornung 					ndev, NULL, NULL);
77107dfa375SAlex Hornung 			/* XXX: possibly destroy device if this happens */
77207dfa375SAlex Hornung 
77307dfa375SAlex Hornung 			if (newnode != NULL) {
77407dfa375SAlex Hornung 				dev = ndev;
77507dfa375SAlex Hornung 				devfs_link_dev(dev);
77621864bc5SMatthew Dillon 
777898c91eeSMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG,
778898c91eeSMatthew Dillon 						"parent here is: %s, node is: |%s|\n",
779898c91eeSMatthew Dillon 						((node->parent->node_type == Proot) ?
780898c91eeSMatthew Dillon 						"ROOT!" : node->parent->d_dir.d_name),
781898c91eeSMatthew Dillon 						newnode->d_dir.d_name);
782898c91eeSMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG,
783898c91eeSMatthew Dillon 						"test: %s\n",
784898c91eeSMatthew Dillon 						((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name);
78521864bc5SMatthew Dillon 
786ca8d7677SMatthew Dillon 				/*
787ca8d7677SMatthew Dillon 				 * orig_vp is set to the original vp if we cloned.
788ca8d7677SMatthew Dillon 				 */
789ca8d7677SMatthew Dillon 				/* node->flags |= DEVFS_CLONED; */
790898c91eeSMatthew Dillon 				devfs_allocv(&vp, newnode);
791ca8d7677SMatthew Dillon 				orig_vp = ap->a_vp;
79221864bc5SMatthew Dillon 				ap->a_vp = vp;
79321864bc5SMatthew Dillon 			}
79407dfa375SAlex Hornung 		}
79521864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_RELEASE);
79621864bc5SMatthew Dillon 	}
79721864bc5SMatthew Dillon 
798898c91eeSMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG,
799898c91eeSMatthew Dillon 		    "devfs_spec_open() called on %s! \n",
800898c91eeSMatthew Dillon 		    dev->si_name);
801898c91eeSMatthew Dillon 
80221864bc5SMatthew Dillon 	/*
80321864bc5SMatthew Dillon 	 * Make this field valid before any I/O in ->d_open
80421864bc5SMatthew Dillon 	 */
80521864bc5SMatthew Dillon 	if (!dev->si_iosize_max)
80621864bc5SMatthew Dillon 		dev->si_iosize_max = DFLTPHYS;
80721864bc5SMatthew Dillon 
80821864bc5SMatthew Dillon 	if (dev_dflags(dev) & D_TTY)
80921864bc5SMatthew Dillon 		vp->v_flag |= VISTTY;
81021864bc5SMatthew Dillon 
81121864bc5SMatthew Dillon 	vn_unlock(vp);
81221864bc5SMatthew Dillon 	error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred);
81321864bc5SMatthew Dillon 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
81421864bc5SMatthew Dillon 
815ca8d7677SMatthew Dillon 	/*
816ca8d7677SMatthew Dillon 	 * Clean up any cloned vp if we error out.
817ca8d7677SMatthew Dillon 	 */
81821864bc5SMatthew Dillon 	if (error) {
819ca8d7677SMatthew Dillon 		if (orig_vp) {
82021864bc5SMatthew Dillon 			vput(vp);
821ca8d7677SMatthew Dillon 			ap->a_vp = orig_vp;
822ca8d7677SMatthew Dillon 			/* orig_vp = NULL; */
823ca8d7677SMatthew Dillon 		}
82421864bc5SMatthew Dillon 		return error;
82521864bc5SMatthew Dillon 	}
82621864bc5SMatthew Dillon 
82721864bc5SMatthew Dillon 
82821864bc5SMatthew Dillon 	if (dev_dflags(dev) & D_TTY) {
82921864bc5SMatthew Dillon 		if (dev->si_tty) {
83021864bc5SMatthew Dillon 			struct tty *tp;
83121864bc5SMatthew Dillon 			tp = dev->si_tty;
83221864bc5SMatthew Dillon 			if (!tp->t_stop) {
833898c91eeSMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG,
834898c91eeSMatthew Dillon 					    "devfs: no t_stop\n");
83521864bc5SMatthew Dillon 				tp->t_stop = nottystop;
83621864bc5SMatthew Dillon 			}
83721864bc5SMatthew Dillon 		}
83821864bc5SMatthew Dillon 	}
83921864bc5SMatthew Dillon 
84021864bc5SMatthew Dillon 
84121864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL)) {
84221864bc5SMatthew Dillon 		if (!dev->si_bsize_phys)
84321864bc5SMatthew Dillon 			dev->si_bsize_phys = DEV_BSIZE;
84421864bc5SMatthew Dillon 		vinitvmio(vp, IDX_TO_OFF(INT_MAX));
84521864bc5SMatthew Dillon 	}
84621864bc5SMatthew Dillon 
84721864bc5SMatthew Dillon 	vop_stdopen(ap);
84807dfa375SAlex Hornung #if 0
849898c91eeSMatthew Dillon 	if (node)
850898c91eeSMatthew Dillon 		nanotime(&node->atime);
85107dfa375SAlex Hornung #endif
85221864bc5SMatthew Dillon 
853ca8d7677SMatthew Dillon 	if (orig_vp)
85421864bc5SMatthew Dillon 		vn_unlock(vp);
85521864bc5SMatthew Dillon 
85621864bc5SMatthew Dillon 	/* Ugly pty magic, to make pty devices appear once they are opened */
857898c91eeSMatthew Dillon 	if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY)
858898c91eeSMatthew Dillon 		node->flags &= ~DEVFS_INVISIBLE;
85921864bc5SMatthew Dillon 
86021864bc5SMatthew Dillon 	if (ap->a_fp) {
86121864bc5SMatthew Dillon 		ap->a_fp->f_type = DTYPE_VNODE;
86221864bc5SMatthew Dillon 		ap->a_fp->f_flag = ap->a_mode & FMASK;
86321864bc5SMatthew Dillon 		ap->a_fp->f_ops = &devfs_dev_fileops;
86421864bc5SMatthew Dillon 		ap->a_fp->f_data = vp;
86521864bc5SMatthew Dillon 	}
86621864bc5SMatthew Dillon 
86721864bc5SMatthew Dillon 	return 0;
86821864bc5SMatthew Dillon }
86921864bc5SMatthew Dillon 
87021864bc5SMatthew Dillon 
87121864bc5SMatthew Dillon static int
87221864bc5SMatthew Dillon devfs_spec_close(struct vop_close_args *ap)
87321864bc5SMatthew Dillon {
874898c91eeSMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
87521864bc5SMatthew Dillon 	struct proc *p = curproc;
87621864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
87721864bc5SMatthew Dillon 	cdev_t dev = vp->v_rdev;
87821864bc5SMatthew Dillon 	int error = 0;
87921864bc5SMatthew Dillon 	int needrelock;
88021864bc5SMatthew Dillon 
881898c91eeSMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG,
882898c91eeSMatthew Dillon 		    "devfs_spec_close() called on %s! \n",
883898c91eeSMatthew Dillon 		    dev->si_name);
88421864bc5SMatthew Dillon 
88521864bc5SMatthew Dillon 	/*
88621864bc5SMatthew Dillon 	 * A couple of hacks for devices and tty devices.  The
88721864bc5SMatthew Dillon 	 * vnode ref count cannot be used to figure out the
88821864bc5SMatthew Dillon 	 * last close, but we can use v_opencount now that
88921864bc5SMatthew Dillon 	 * revoke works properly.
89021864bc5SMatthew Dillon 	 *
89121864bc5SMatthew Dillon 	 * Detect the last close on a controlling terminal and clear
89221864bc5SMatthew Dillon 	 * the session (half-close).
89321864bc5SMatthew Dillon 	 */
89421864bc5SMatthew Dillon 	if (dev)
89521864bc5SMatthew Dillon 		reference_dev(dev);
89621864bc5SMatthew Dillon 
89721864bc5SMatthew Dillon 	if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
89821864bc5SMatthew Dillon 		p->p_session->s_ttyvp = NULL;
89921864bc5SMatthew Dillon 		vrele(vp);
90021864bc5SMatthew Dillon 	}
90121864bc5SMatthew Dillon 
90221864bc5SMatthew Dillon 	/*
90321864bc5SMatthew Dillon 	 * Vnodes can be opened and closed multiple times.  Do not really
90421864bc5SMatthew Dillon 	 * close the device unless (1) it is being closed forcibly,
90521864bc5SMatthew Dillon 	 * (2) the device wants to track closes, or (3) this is the last
90621864bc5SMatthew Dillon 	 * vnode doing its last close on the device.
90721864bc5SMatthew Dillon 	 *
90821864bc5SMatthew Dillon 	 * XXX the VXLOCK (force close) case can leave vnodes referencing
90921864bc5SMatthew Dillon 	 * a closed device.  This might not occur now that our revoke is
91021864bc5SMatthew Dillon 	 * fixed.
91121864bc5SMatthew Dillon 	 */
91221864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
91321864bc5SMatthew Dillon 	if (dev && ((vp->v_flag & VRECLAIMED) ||
91421864bc5SMatthew Dillon 	    (dev_dflags(dev) & D_TRACKCLOSE) ||
91521864bc5SMatthew Dillon 	    (vp->v_opencount == 1))) {
916898c91eeSMatthew Dillon 		/*
917898c91eeSMatthew Dillon 		 * Unlock around dev_dclose()
918898c91eeSMatthew Dillon 		 */
91921864bc5SMatthew Dillon 		needrelock = 0;
92021864bc5SMatthew Dillon 		if (vn_islocked(vp)) {
92121864bc5SMatthew Dillon 			needrelock = 1;
92221864bc5SMatthew Dillon 			vn_unlock(vp);
92321864bc5SMatthew Dillon 		}
92421864bc5SMatthew Dillon 		error = dev_dclose(dev, ap->a_fflag, S_IFCHR);
925898c91eeSMatthew Dillon 
926898c91eeSMatthew Dillon 		/*
927898c91eeSMatthew Dillon 		 * Ugly pty magic, to make pty devices disappear again once
928898c91eeSMatthew Dillon 		 * they are closed
929898c91eeSMatthew Dillon 		 */
930898c91eeSMatthew Dillon 		if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY)
931898c91eeSMatthew Dillon 			node->flags |= DEVFS_INVISIBLE;
93221864bc5SMatthew Dillon 
93321864bc5SMatthew Dillon 		if (needrelock)
93421864bc5SMatthew Dillon 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
93521864bc5SMatthew Dillon 	} else {
93621864bc5SMatthew Dillon 		error = 0;
93721864bc5SMatthew Dillon 	}
93821864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");
939898c91eeSMatthew Dillon 
94021864bc5SMatthew Dillon 	/*
94121864bc5SMatthew Dillon 	 * Track the actual opens and closes on the vnode.  The last close
942898c91eeSMatthew Dillon 	 * disassociates the rdev.  If the rdev is already disassociated or
943898c91eeSMatthew Dillon 	 * the opencount is already 0, the vnode might have been revoked
944898c91eeSMatthew Dillon 	 * and no further opencount tracking occurs.
94521864bc5SMatthew Dillon 	 */
946898c91eeSMatthew Dillon 	if (dev)
94721864bc5SMatthew Dillon 		release_dev(dev);
948898c91eeSMatthew Dillon 	if (vp->v_opencount > 0)
94921864bc5SMatthew Dillon 		vop_stdclose(ap);
95021864bc5SMatthew Dillon 	return(error);
95121864bc5SMatthew Dillon 
95221864bc5SMatthew Dillon }
95321864bc5SMatthew Dillon 
95421864bc5SMatthew Dillon 
95521864bc5SMatthew Dillon static int
95621864bc5SMatthew Dillon devfs_specf_close(struct file *fp)
95721864bc5SMatthew Dillon {
95821864bc5SMatthew Dillon 	struct vnode *vp = (struct vnode *)fp->f_data;
959898c91eeSMatthew Dillon 	int error;
96021864bc5SMatthew Dillon 
96121864bc5SMatthew Dillon 	get_mplock();
96221864bc5SMatthew Dillon 	fp->f_ops = &badfileops;
96321864bc5SMatthew Dillon 	error = vn_close(vp, fp->f_flag);
96421864bc5SMatthew Dillon 	rel_mplock();
96521864bc5SMatthew Dillon 
96621864bc5SMatthew Dillon 	return (error);
96721864bc5SMatthew Dillon }
96821864bc5SMatthew Dillon 
96921864bc5SMatthew Dillon 
97021864bc5SMatthew Dillon /*
97121864bc5SMatthew Dillon  * Device-optimized file table vnode read routine.
97221864bc5SMatthew Dillon  *
97321864bc5SMatthew Dillon  * This bypasses the VOP table and talks directly to the device.  Most
97421864bc5SMatthew Dillon  * filesystems just route to specfs and can make this optimization.
97521864bc5SMatthew Dillon  *
97621864bc5SMatthew Dillon  * MPALMOSTSAFE - acquires mplock
97721864bc5SMatthew Dillon  */
97821864bc5SMatthew Dillon static int
979898c91eeSMatthew Dillon devfs_specf_read(struct file *fp, struct uio *uio,
980898c91eeSMatthew Dillon 		 struct ucred *cred, int flags)
98121864bc5SMatthew Dillon {
982898c91eeSMatthew Dillon 	struct devfs_node *node;
98321864bc5SMatthew Dillon 	struct vnode *vp;
98421864bc5SMatthew Dillon 	int ioflag;
98521864bc5SMatthew Dillon 	int error;
98621864bc5SMatthew Dillon 	cdev_t dev;
98721864bc5SMatthew Dillon 
98821864bc5SMatthew Dillon 	get_mplock();
98921864bc5SMatthew Dillon 	KASSERT(uio->uio_td == curthread,
99021864bc5SMatthew Dillon 		("uio_td %p is not td %p", uio->uio_td, curthread));
99121864bc5SMatthew Dillon 
99221864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
99321864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
99421864bc5SMatthew Dillon 		error = EBADF;
99521864bc5SMatthew Dillon 		goto done;
99621864bc5SMatthew Dillon 	}
997898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
99821864bc5SMatthew Dillon 
99921864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
100021864bc5SMatthew Dillon 		error = EBADF;
100121864bc5SMatthew Dillon 		goto done;
100221864bc5SMatthew Dillon 	}
100321864bc5SMatthew Dillon 
100421864bc5SMatthew Dillon 	reference_dev(dev);
100521864bc5SMatthew Dillon 
100621864bc5SMatthew Dillon 	if (uio->uio_resid == 0) {
100721864bc5SMatthew Dillon 		error = 0;
100821864bc5SMatthew Dillon 		goto done;
100921864bc5SMatthew Dillon 	}
101021864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
101121864bc5SMatthew Dillon 		uio->uio_offset = fp->f_offset;
101221864bc5SMatthew Dillon 
101321864bc5SMatthew Dillon 	ioflag = 0;
101421864bc5SMatthew Dillon 	if (flags & O_FBLOCKING) {
101521864bc5SMatthew Dillon 		/* ioflag &= ~IO_NDELAY; */
101621864bc5SMatthew Dillon 	} else if (flags & O_FNONBLOCKING) {
101721864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
101821864bc5SMatthew Dillon 	} else if (fp->f_flag & FNONBLOCK) {
101921864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
102021864bc5SMatthew Dillon 	}
102121864bc5SMatthew Dillon 	if (flags & O_FBUFFERED) {
102221864bc5SMatthew Dillon 		/* ioflag &= ~IO_DIRECT; */
102321864bc5SMatthew Dillon 	} else if (flags & O_FUNBUFFERED) {
102421864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
102521864bc5SMatthew Dillon 	} else if (fp->f_flag & O_DIRECT) {
102621864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
102721864bc5SMatthew Dillon 	}
102821864bc5SMatthew Dillon 	ioflag |= sequential_heuristic(uio, fp);
102921864bc5SMatthew Dillon 
103021864bc5SMatthew Dillon 	error = dev_dread(dev, uio, ioflag);
103121864bc5SMatthew Dillon 
103221864bc5SMatthew Dillon 	release_dev(dev);
1033898c91eeSMatthew Dillon 	if (node)
1034898c91eeSMatthew Dillon 		nanotime(&node->atime);
103521864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
103621864bc5SMatthew Dillon 		fp->f_offset = uio->uio_offset;
103721864bc5SMatthew Dillon 	fp->f_nextoff = uio->uio_offset;
103821864bc5SMatthew Dillon done:
103921864bc5SMatthew Dillon 	rel_mplock();
104021864bc5SMatthew Dillon 	return (error);
104121864bc5SMatthew Dillon }
104221864bc5SMatthew Dillon 
104321864bc5SMatthew Dillon 
104421864bc5SMatthew Dillon static int
1045898c91eeSMatthew Dillon devfs_specf_write(struct file *fp, struct uio *uio,
1046898c91eeSMatthew Dillon 		  struct ucred *cred, int flags)
104721864bc5SMatthew Dillon {
1048898c91eeSMatthew Dillon 	struct devfs_node *node;
104921864bc5SMatthew Dillon 	struct vnode *vp;
105021864bc5SMatthew Dillon 	int ioflag;
105121864bc5SMatthew Dillon 	int error;
105221864bc5SMatthew Dillon 	cdev_t dev;
105321864bc5SMatthew Dillon 
105421864bc5SMatthew Dillon 	get_mplock();
105521864bc5SMatthew Dillon 	KASSERT(uio->uio_td == curthread,
105621864bc5SMatthew Dillon 		("uio_td %p is not p %p", uio->uio_td, curthread));
105721864bc5SMatthew Dillon 
105821864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
105921864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
106021864bc5SMatthew Dillon 		error = EBADF;
106121864bc5SMatthew Dillon 		goto done;
106221864bc5SMatthew Dillon 	}
1063898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
106421864bc5SMatthew Dillon 	if (vp->v_type == VREG)
106521864bc5SMatthew Dillon 		bwillwrite(uio->uio_resid);
106621864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
106721864bc5SMatthew Dillon 
106821864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
106921864bc5SMatthew Dillon 		error = EBADF;
107021864bc5SMatthew Dillon 		goto done;
107121864bc5SMatthew Dillon 	}
107221864bc5SMatthew Dillon 	reference_dev(dev);
107321864bc5SMatthew Dillon 
107421864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
107521864bc5SMatthew Dillon 		uio->uio_offset = fp->f_offset;
107621864bc5SMatthew Dillon 
107721864bc5SMatthew Dillon 	ioflag = IO_UNIT;
107821864bc5SMatthew Dillon 	if (vp->v_type == VREG &&
107921864bc5SMatthew Dillon 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
108021864bc5SMatthew Dillon 		ioflag |= IO_APPEND;
108121864bc5SMatthew Dillon 	}
108221864bc5SMatthew Dillon 
108321864bc5SMatthew Dillon 	if (flags & O_FBLOCKING) {
108421864bc5SMatthew Dillon 		/* ioflag &= ~IO_NDELAY; */
108521864bc5SMatthew Dillon 	} else if (flags & O_FNONBLOCKING) {
108621864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
108721864bc5SMatthew Dillon 	} else if (fp->f_flag & FNONBLOCK) {
108821864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
108921864bc5SMatthew Dillon 	}
109021864bc5SMatthew Dillon 	if (flags & O_FBUFFERED) {
109121864bc5SMatthew Dillon 		/* ioflag &= ~IO_DIRECT; */
109221864bc5SMatthew Dillon 	} else if (flags & O_FUNBUFFERED) {
109321864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
109421864bc5SMatthew Dillon 	} else if (fp->f_flag & O_DIRECT) {
109521864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
109621864bc5SMatthew Dillon 	}
109721864bc5SMatthew Dillon 	if (flags & O_FASYNCWRITE) {
109821864bc5SMatthew Dillon 		/* ioflag &= ~IO_SYNC; */
109921864bc5SMatthew Dillon 	} else if (flags & O_FSYNCWRITE) {
110021864bc5SMatthew Dillon 		ioflag |= IO_SYNC;
110121864bc5SMatthew Dillon 	} else if (fp->f_flag & O_FSYNC) {
110221864bc5SMatthew Dillon 		ioflag |= IO_SYNC;
110321864bc5SMatthew Dillon 	}
110421864bc5SMatthew Dillon 
110521864bc5SMatthew Dillon 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
110621864bc5SMatthew Dillon 		ioflag |= IO_SYNC;
110721864bc5SMatthew Dillon 	ioflag |= sequential_heuristic(uio, fp);
110821864bc5SMatthew Dillon 
110921864bc5SMatthew Dillon 	error = dev_dwrite(dev, uio, ioflag);
111021864bc5SMatthew Dillon 
111121864bc5SMatthew Dillon 	release_dev(dev);
111207dfa375SAlex Hornung 	if (node) {
111307dfa375SAlex Hornung 		nanotime(&node->atime);
1114898c91eeSMatthew Dillon 		nanotime(&node->mtime);
111507dfa375SAlex Hornung 	}
111621864bc5SMatthew Dillon 
111721864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
111821864bc5SMatthew Dillon 		fp->f_offset = uio->uio_offset;
111921864bc5SMatthew Dillon 	fp->f_nextoff = uio->uio_offset;
112021864bc5SMatthew Dillon done:
112121864bc5SMatthew Dillon 	rel_mplock();
112221864bc5SMatthew Dillon 	return (error);
112321864bc5SMatthew Dillon }
112421864bc5SMatthew Dillon 
112521864bc5SMatthew Dillon 
112621864bc5SMatthew Dillon static int
112721864bc5SMatthew Dillon devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred)
112821864bc5SMatthew Dillon {
112921864bc5SMatthew Dillon 	struct vnode *vp;
113021864bc5SMatthew Dillon 	int error;
113121864bc5SMatthew Dillon 
113221864bc5SMatthew Dillon 	get_mplock();
113321864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
113421864bc5SMatthew Dillon 	error = vn_stat(vp, sb, cred);
113521864bc5SMatthew Dillon 	if (error) {
113621864bc5SMatthew Dillon 		rel_mplock();
113721864bc5SMatthew Dillon 		return (error);
113821864bc5SMatthew Dillon 	}
113921864bc5SMatthew Dillon 
114021864bc5SMatthew Dillon 	struct vattr vattr;
114121864bc5SMatthew Dillon 	struct vattr *vap;
114221864bc5SMatthew Dillon 	u_short mode;
114321864bc5SMatthew Dillon 	cdev_t dev;
114421864bc5SMatthew Dillon 
114521864bc5SMatthew Dillon 	vap = &vattr;
114621864bc5SMatthew Dillon 	error = VOP_GETATTR(vp, vap);
114721864bc5SMatthew Dillon 	if (error) {
114821864bc5SMatthew Dillon 		rel_mplock();
114921864bc5SMatthew Dillon 		return (error);
115021864bc5SMatthew Dillon 	}
115121864bc5SMatthew Dillon 
115221864bc5SMatthew Dillon 	/*
115321864bc5SMatthew Dillon 	 * Zero the spare stat fields
115421864bc5SMatthew Dillon 	 */
115521864bc5SMatthew Dillon 	sb->st_lspare = 0;
115621864bc5SMatthew Dillon 	sb->st_qspare = 0;
115721864bc5SMatthew Dillon 
115821864bc5SMatthew Dillon 	/*
115921864bc5SMatthew Dillon 	 * Copy from vattr table ... or not in case it's a cloned device
116021864bc5SMatthew Dillon 	 */
116121864bc5SMatthew Dillon 	if (vap->va_fsid != VNOVAL)
116221864bc5SMatthew Dillon 		sb->st_dev = vap->va_fsid;
116321864bc5SMatthew Dillon 	else
116421864bc5SMatthew Dillon 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
116521864bc5SMatthew Dillon 
116621864bc5SMatthew Dillon 	sb->st_ino = vap->va_fileid;
116721864bc5SMatthew Dillon 
116821864bc5SMatthew Dillon 	mode = vap->va_mode;
116921864bc5SMatthew Dillon 	mode |= S_IFCHR;
117021864bc5SMatthew Dillon 	sb->st_mode = mode;
117121864bc5SMatthew Dillon 
117221864bc5SMatthew Dillon 	if (vap->va_nlink > (nlink_t)-1)
117321864bc5SMatthew Dillon 		sb->st_nlink = (nlink_t)-1;
117421864bc5SMatthew Dillon 	else
117521864bc5SMatthew Dillon 		sb->st_nlink = vap->va_nlink;
117621864bc5SMatthew Dillon 	sb->st_uid = vap->va_uid;
117721864bc5SMatthew Dillon 	sb->st_gid = vap->va_gid;
1178ca8d7677SMatthew Dillon 	sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev);
117921864bc5SMatthew Dillon 	sb->st_size = vap->va_size;
118021864bc5SMatthew Dillon 	sb->st_atimespec = vap->va_atime;
118121864bc5SMatthew Dillon 	sb->st_mtimespec = vap->va_mtime;
118221864bc5SMatthew Dillon 	sb->st_ctimespec = vap->va_ctime;
118321864bc5SMatthew Dillon 
118421864bc5SMatthew Dillon 	/*
118521864bc5SMatthew Dillon 	 * A VCHR and VBLK device may track the last access and last modified
118621864bc5SMatthew Dillon 	 * time independantly of the filesystem.  This is particularly true
118721864bc5SMatthew Dillon 	 * because device read and write calls may bypass the filesystem.
118821864bc5SMatthew Dillon 	 */
118921864bc5SMatthew Dillon 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
119021864bc5SMatthew Dillon 		dev = vp->v_rdev;
119121864bc5SMatthew Dillon 		if (dev != NULL) {
119221864bc5SMatthew Dillon 			if (dev->si_lastread) {
119321864bc5SMatthew Dillon 				sb->st_atimespec.tv_sec = dev->si_lastread;
119421864bc5SMatthew Dillon 				sb->st_atimespec.tv_nsec = 0;
119521864bc5SMatthew Dillon 			}
119621864bc5SMatthew Dillon 			if (dev->si_lastwrite) {
119721864bc5SMatthew Dillon 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
119821864bc5SMatthew Dillon 				sb->st_atimespec.tv_nsec = 0;
119921864bc5SMatthew Dillon 			}
120021864bc5SMatthew Dillon 		}
120121864bc5SMatthew Dillon 	}
120221864bc5SMatthew Dillon 
120321864bc5SMatthew Dillon         /*
120421864bc5SMatthew Dillon 	 * According to www.opengroup.org, the meaning of st_blksize is
120521864bc5SMatthew Dillon 	 *   "a filesystem-specific preferred I/O block size for this
120621864bc5SMatthew Dillon 	 *    object.  In some filesystem types, this may vary from file
120721864bc5SMatthew Dillon 	 *    to file"
120821864bc5SMatthew Dillon 	 * Default to PAGE_SIZE after much discussion.
120921864bc5SMatthew Dillon 	 */
121021864bc5SMatthew Dillon 
121121864bc5SMatthew Dillon 	sb->st_blksize = PAGE_SIZE;
121221864bc5SMatthew Dillon 
121321864bc5SMatthew Dillon 	sb->st_flags = vap->va_flags;
121421864bc5SMatthew Dillon 
121521864bc5SMatthew Dillon 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
121621864bc5SMatthew Dillon 	if (error)
121721864bc5SMatthew Dillon 		sb->st_gen = 0;
121821864bc5SMatthew Dillon 	else
121921864bc5SMatthew Dillon 		sb->st_gen = (u_int32_t)vap->va_gen;
122021864bc5SMatthew Dillon 
122121864bc5SMatthew Dillon 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
122221864bc5SMatthew Dillon 	sb->st_fsmid = vap->va_fsmid;
122321864bc5SMatthew Dillon 
122421864bc5SMatthew Dillon 	rel_mplock();
122521864bc5SMatthew Dillon 	return (0);
122621864bc5SMatthew Dillon }
122721864bc5SMatthew Dillon 
122821864bc5SMatthew Dillon 
122921864bc5SMatthew Dillon static int
123021864bc5SMatthew Dillon devfs_specf_kqfilter(struct file *fp, struct knote *kn)
123121864bc5SMatthew Dillon {
1232898c91eeSMatthew Dillon 	struct devfs_node *node;
123321864bc5SMatthew Dillon 	struct vnode *vp;
123421864bc5SMatthew Dillon 	int error;
123521864bc5SMatthew Dillon 	cdev_t dev;
123621864bc5SMatthew Dillon 
123721864bc5SMatthew Dillon 	get_mplock();
123821864bc5SMatthew Dillon 
123921864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
124021864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
124121864bc5SMatthew Dillon 		error = EBADF;
124221864bc5SMatthew Dillon 		goto done;
124321864bc5SMatthew Dillon 	}
1244898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
124521864bc5SMatthew Dillon 
124621864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
124721864bc5SMatthew Dillon 		error = EBADF;
124821864bc5SMatthew Dillon 		goto done;
124921864bc5SMatthew Dillon 	}
125021864bc5SMatthew Dillon 	reference_dev(dev);
125121864bc5SMatthew Dillon 
125221864bc5SMatthew Dillon 	error = dev_dkqfilter(dev, kn);
125321864bc5SMatthew Dillon 
125421864bc5SMatthew Dillon 	release_dev(dev);
125521864bc5SMatthew Dillon 
125621864bc5SMatthew Dillon done:
125721864bc5SMatthew Dillon 	rel_mplock();
125821864bc5SMatthew Dillon 	return (error);
125921864bc5SMatthew Dillon }
126021864bc5SMatthew Dillon 
126121864bc5SMatthew Dillon 
126221864bc5SMatthew Dillon static int
126321864bc5SMatthew Dillon devfs_specf_poll(struct file *fp, int events, struct ucred *cred)
126421864bc5SMatthew Dillon {
1265898c91eeSMatthew Dillon 	struct devfs_node *node;
126621864bc5SMatthew Dillon 	struct vnode *vp;
126721864bc5SMatthew Dillon 	int error;
126821864bc5SMatthew Dillon 	cdev_t dev;
126921864bc5SMatthew Dillon 
127021864bc5SMatthew Dillon 	get_mplock();
127121864bc5SMatthew Dillon 
127221864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
127321864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
127421864bc5SMatthew Dillon 		error = EBADF;
127521864bc5SMatthew Dillon 		goto done;
127621864bc5SMatthew Dillon 	}
1277898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
127821864bc5SMatthew Dillon 
127921864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
128021864bc5SMatthew Dillon 		error = EBADF;
128121864bc5SMatthew Dillon 		goto done;
128221864bc5SMatthew Dillon 	}
128321864bc5SMatthew Dillon 	reference_dev(dev);
128421864bc5SMatthew Dillon 	error = dev_dpoll(dev, events);
128521864bc5SMatthew Dillon 
128621864bc5SMatthew Dillon 	release_dev(dev);
128721864bc5SMatthew Dillon 
128807dfa375SAlex Hornung #if 0
1289898c91eeSMatthew Dillon 	if (node)
1290898c91eeSMatthew Dillon 		nanotime(&node->atime);
129107dfa375SAlex Hornung #endif
129221864bc5SMatthew Dillon done:
129321864bc5SMatthew Dillon 	rel_mplock();
129421864bc5SMatthew Dillon 	return (error);
129521864bc5SMatthew Dillon }
129621864bc5SMatthew Dillon 
129721864bc5SMatthew Dillon 
129821864bc5SMatthew Dillon /*
129921864bc5SMatthew Dillon  * MPALMOSTSAFE - acquires mplock
130021864bc5SMatthew Dillon  */
130121864bc5SMatthew Dillon static int
130287baaf0cSMatthew Dillon devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data,
130387baaf0cSMatthew Dillon 		  struct ucred *ucred, struct sysmsg *msg)
130421864bc5SMatthew Dillon {
1305898c91eeSMatthew Dillon 	struct devfs_node *node;
1306898c91eeSMatthew Dillon 	struct vnode *vp;
130721864bc5SMatthew Dillon 	struct vnode *ovp;
130821864bc5SMatthew Dillon 	cdev_t	dev;
130921864bc5SMatthew Dillon 	int error;
131021864bc5SMatthew Dillon 	struct fiodname_args *name_args;
131121864bc5SMatthew Dillon 	size_t namlen;
131221864bc5SMatthew Dillon 	const char *name;
131321864bc5SMatthew Dillon 
131421864bc5SMatthew Dillon 	get_mplock();
1315898c91eeSMatthew Dillon 	vp = ((struct vnode *)fp->f_data);
131621864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
131721864bc5SMatthew Dillon 		error = EBADF;		/* device was revoked */
131821864bc5SMatthew Dillon 		goto out;
131921864bc5SMatthew Dillon 	}
132021864bc5SMatthew Dillon 
1321898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
1322898c91eeSMatthew Dillon 
1323898c91eeSMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG,
1324898c91eeSMatthew Dillon 		    "devfs_specf_ioctl() called! for dev %s\n",
1325898c91eeSMatthew Dillon 		    dev->si_name);
132621864bc5SMatthew Dillon 
132721864bc5SMatthew Dillon 	if (com == FIODTYPE) {
132821864bc5SMatthew Dillon 		*(int *)data = dev_dflags(dev) & D_TYPEMASK;
132921864bc5SMatthew Dillon 		error = 0;
133021864bc5SMatthew Dillon 		goto out;
133121864bc5SMatthew Dillon 	} else if (com == FIODNAME) {
133221864bc5SMatthew Dillon 		name_args = (struct fiodname_args *)data;
133321864bc5SMatthew Dillon 		name = dev->si_name;
133421864bc5SMatthew Dillon 		namlen = strlen(name) + 1;
133521864bc5SMatthew Dillon 
1336898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1337898c91eeSMatthew Dillon 			    "ioctl, got: FIODNAME for %s\n", name);
133821864bc5SMatthew Dillon 
133921864bc5SMatthew Dillon 		if (namlen <= name_args->len)
134021864bc5SMatthew Dillon 			error = copyout(dev->si_name, name_args->name, namlen);
134121864bc5SMatthew Dillon 		else
134221864bc5SMatthew Dillon 			error = EINVAL;
134321864bc5SMatthew Dillon 
1344898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1345898c91eeSMatthew Dillon 			    "ioctl stuff: error: %d\n", error);
134621864bc5SMatthew Dillon 		goto out;
134721864bc5SMatthew Dillon 	}
134821864bc5SMatthew Dillon 	reference_dev(dev);
134987baaf0cSMatthew Dillon 	error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg);
135021864bc5SMatthew Dillon 	release_dev(dev);
135107dfa375SAlex Hornung #if 0
1352898c91eeSMatthew Dillon 	if (node) {
1353898c91eeSMatthew Dillon 		nanotime(&node->atime);
1354898c91eeSMatthew Dillon 		nanotime(&node->mtime);
135521864bc5SMatthew Dillon 	}
135607dfa375SAlex Hornung #endif
135721864bc5SMatthew Dillon 
1358898c91eeSMatthew Dillon 	if (com == TIOCSCTTY) {
1359898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1360898c91eeSMatthew Dillon 			    "devfs_specf_ioctl: got TIOCSCTTY on %s\n",
1361898c91eeSMatthew Dillon 			    dev->si_name);
1362898c91eeSMatthew Dillon 	}
136321864bc5SMatthew Dillon 	if (error == 0 && com == TIOCSCTTY) {
136421864bc5SMatthew Dillon 		struct proc *p = curthread->td_proc;
136521864bc5SMatthew Dillon 		struct session *sess;
1366898c91eeSMatthew Dillon 
1367898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1368898c91eeSMatthew Dillon 			    "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n",
1369898c91eeSMatthew Dillon 			    dev->si_name);
137021864bc5SMatthew Dillon 		if (p == NULL) {
137121864bc5SMatthew Dillon 			error = ENOTTY;
137221864bc5SMatthew Dillon 			goto out;
137321864bc5SMatthew Dillon 		}
137421864bc5SMatthew Dillon 		sess = p->p_session;
1375898c91eeSMatthew Dillon 
1376898c91eeSMatthew Dillon 		/*
1377898c91eeSMatthew Dillon 		 * Do nothing if reassigning same control tty
1378898c91eeSMatthew Dillon 		 */
137921864bc5SMatthew Dillon 		if (sess->s_ttyvp == vp) {
138021864bc5SMatthew Dillon 			error = 0;
138121864bc5SMatthew Dillon 			goto out;
138221864bc5SMatthew Dillon 		}
1383898c91eeSMatthew Dillon 
1384898c91eeSMatthew Dillon 		/*
1385898c91eeSMatthew Dillon 		 * Get rid of reference to old control tty
1386898c91eeSMatthew Dillon 		 */
138721864bc5SMatthew Dillon 		ovp = sess->s_ttyvp;
138821864bc5SMatthew Dillon 		vref(vp);
138921864bc5SMatthew Dillon 		sess->s_ttyvp = vp;
139021864bc5SMatthew Dillon 		if (ovp)
139121864bc5SMatthew Dillon 			vrele(ovp);
139221864bc5SMatthew Dillon 	}
139321864bc5SMatthew Dillon 
139421864bc5SMatthew Dillon out:
139521864bc5SMatthew Dillon 	rel_mplock();
139621864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n");
139721864bc5SMatthew Dillon 	return (error);
139821864bc5SMatthew Dillon }
139921864bc5SMatthew Dillon 
140021864bc5SMatthew Dillon 
140121864bc5SMatthew Dillon static int
140221864bc5SMatthew Dillon devfs_spec_fsync(struct vop_fsync_args *ap)
140321864bc5SMatthew Dillon {
140421864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
140521864bc5SMatthew Dillon 	int error;
140621864bc5SMatthew Dillon 
140721864bc5SMatthew Dillon 	if (!vn_isdisk(vp, NULL))
140821864bc5SMatthew Dillon 		return (0);
140921864bc5SMatthew Dillon 
141021864bc5SMatthew Dillon 	/*
141121864bc5SMatthew Dillon 	 * Flush all dirty buffers associated with a block device.
141221864bc5SMatthew Dillon 	 */
141321864bc5SMatthew Dillon 	error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL);
141421864bc5SMatthew Dillon 	return (error);
141521864bc5SMatthew Dillon }
141621864bc5SMatthew Dillon 
141721864bc5SMatthew Dillon static int
141821864bc5SMatthew Dillon devfs_spec_read(struct vop_read_args *ap)
141921864bc5SMatthew Dillon {
1420898c91eeSMatthew Dillon 	struct devfs_node *node;
142121864bc5SMatthew Dillon 	struct vnode *vp;
142221864bc5SMatthew Dillon 	struct uio *uio;
142321864bc5SMatthew Dillon 	cdev_t dev;
142421864bc5SMatthew Dillon 	int error;
142521864bc5SMatthew Dillon 
142621864bc5SMatthew Dillon 	vp = ap->a_vp;
142721864bc5SMatthew Dillon 	dev = vp->v_rdev;
142821864bc5SMatthew Dillon 	uio = ap->a_uio;
1429898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
143021864bc5SMatthew Dillon 
143121864bc5SMatthew Dillon 	if (dev == NULL)		/* device was revoked */
143221864bc5SMatthew Dillon 		return (EBADF);
143321864bc5SMatthew Dillon 	if (uio->uio_resid == 0)
143421864bc5SMatthew Dillon 		return (0);
143521864bc5SMatthew Dillon 
143621864bc5SMatthew Dillon 	vn_unlock(vp);
143721864bc5SMatthew Dillon 	error = dev_dread(dev, uio, ap->a_ioflag);
143821864bc5SMatthew Dillon 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
143921864bc5SMatthew Dillon 
1440898c91eeSMatthew Dillon 	if (node)
1441898c91eeSMatthew Dillon 		nanotime(&node->atime);
144221864bc5SMatthew Dillon 
144321864bc5SMatthew Dillon 	return (error);
144421864bc5SMatthew Dillon }
144521864bc5SMatthew Dillon 
144621864bc5SMatthew Dillon /*
144721864bc5SMatthew Dillon  * Vnode op for write
144821864bc5SMatthew Dillon  *
144921864bc5SMatthew Dillon  * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
145021864bc5SMatthew Dillon  *	      struct ucred *a_cred)
145121864bc5SMatthew Dillon  */
145221864bc5SMatthew Dillon static int
145321864bc5SMatthew Dillon devfs_spec_write(struct vop_write_args *ap)
145421864bc5SMatthew Dillon {
1455898c91eeSMatthew Dillon 	struct devfs_node *node;
145621864bc5SMatthew Dillon 	struct vnode *vp;
145721864bc5SMatthew Dillon 	struct uio *uio;
145821864bc5SMatthew Dillon 	cdev_t dev;
145921864bc5SMatthew Dillon 	int error;
146021864bc5SMatthew Dillon 
146121864bc5SMatthew Dillon 	vp = ap->a_vp;
146221864bc5SMatthew Dillon 	dev = vp->v_rdev;
146321864bc5SMatthew Dillon 	uio = ap->a_uio;
1464898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
146521864bc5SMatthew Dillon 
146621864bc5SMatthew Dillon 	KKASSERT(uio->uio_segflg != UIO_NOCOPY);
146721864bc5SMatthew Dillon 
146821864bc5SMatthew Dillon 	if (dev == NULL)		/* device was revoked */
146921864bc5SMatthew Dillon 		return (EBADF);
147021864bc5SMatthew Dillon 
147121864bc5SMatthew Dillon 	vn_unlock(vp);
147221864bc5SMatthew Dillon 	error = dev_dwrite(dev, uio, ap->a_ioflag);
147321864bc5SMatthew Dillon 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
147421864bc5SMatthew Dillon 
147507dfa375SAlex Hornung 	if (node) {
147607dfa375SAlex Hornung 		nanotime(&node->atime);
1477898c91eeSMatthew Dillon 		nanotime(&node->mtime);
147807dfa375SAlex Hornung 	}
147921864bc5SMatthew Dillon 
148021864bc5SMatthew Dillon 	return (error);
148121864bc5SMatthew Dillon }
148221864bc5SMatthew Dillon 
148321864bc5SMatthew Dillon /*
148421864bc5SMatthew Dillon  * Device ioctl operation.
148521864bc5SMatthew Dillon  *
148621864bc5SMatthew Dillon  * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
148787baaf0cSMatthew Dillon  *	      int a_fflag, struct ucred *a_cred, struct sysmsg *msg)
148821864bc5SMatthew Dillon  */
148921864bc5SMatthew Dillon static int
149021864bc5SMatthew Dillon devfs_spec_ioctl(struct vop_ioctl_args *ap)
149121864bc5SMatthew Dillon {
149221864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1493898c91eeSMatthew Dillon 	struct devfs_node *node;
1494898c91eeSMatthew Dillon 	cdev_t dev;
149521864bc5SMatthew Dillon 
149621864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
149721864bc5SMatthew Dillon 		return (EBADF);		/* device was revoked */
1498898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
149921864bc5SMatthew Dillon 
150007dfa375SAlex Hornung #if 0
1501898c91eeSMatthew Dillon 	if (node) {
1502898c91eeSMatthew Dillon 		nanotime(&node->atime);
1503898c91eeSMatthew Dillon 		nanotime(&node->mtime);
150421864bc5SMatthew Dillon 	}
150507dfa375SAlex Hornung #endif
150621864bc5SMatthew Dillon 
150787baaf0cSMatthew Dillon 	return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag,
150887baaf0cSMatthew Dillon 			   ap->a_cred, ap->a_sysmsg));
150921864bc5SMatthew Dillon }
151021864bc5SMatthew Dillon 
151121864bc5SMatthew Dillon /*
151221864bc5SMatthew Dillon  * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
151321864bc5SMatthew Dillon  */
151421864bc5SMatthew Dillon /* ARGSUSED */
151521864bc5SMatthew Dillon static int
151621864bc5SMatthew Dillon devfs_spec_poll(struct vop_poll_args *ap)
151721864bc5SMatthew Dillon {
151821864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1519898c91eeSMatthew Dillon 	struct devfs_node *node;
1520898c91eeSMatthew Dillon 	cdev_t dev;
152121864bc5SMatthew Dillon 
152221864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
152321864bc5SMatthew Dillon 		return (EBADF);		/* device was revoked */
1524898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
152521864bc5SMatthew Dillon 
152607dfa375SAlex Hornung #if 0
1527898c91eeSMatthew Dillon 	if (node)
1528898c91eeSMatthew Dillon 		nanotime(&node->atime);
152907dfa375SAlex Hornung #endif
153021864bc5SMatthew Dillon 
153121864bc5SMatthew Dillon 	return (dev_dpoll(dev, ap->a_events));
153221864bc5SMatthew Dillon }
153321864bc5SMatthew Dillon 
153421864bc5SMatthew Dillon /*
153521864bc5SMatthew Dillon  * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
153621864bc5SMatthew Dillon  */
153721864bc5SMatthew Dillon /* ARGSUSED */
153821864bc5SMatthew Dillon static int
153921864bc5SMatthew Dillon devfs_spec_kqfilter(struct vop_kqfilter_args *ap)
154021864bc5SMatthew Dillon {
154121864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1542898c91eeSMatthew Dillon 	struct devfs_node *node;
1543898c91eeSMatthew Dillon 	cdev_t dev;
154421864bc5SMatthew Dillon 
154521864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
154621864bc5SMatthew Dillon 		return (EBADF);		/* device was revoked */
1547898c91eeSMatthew Dillon 	node = DEVFS_NODE(vp);
154821864bc5SMatthew Dillon 
154907dfa375SAlex Hornung #if 0
1550898c91eeSMatthew Dillon 	if (node)
1551898c91eeSMatthew Dillon 		nanotime(&node->atime);
155207dfa375SAlex Hornung #endif
155321864bc5SMatthew Dillon 
155421864bc5SMatthew Dillon 	return (dev_dkqfilter(dev, ap->a_kn));
155521864bc5SMatthew Dillon }
155621864bc5SMatthew Dillon 
155721864bc5SMatthew Dillon /*
155821864bc5SMatthew Dillon  * Convert a vnode strategy call into a device strategy call.  Vnode strategy
155921864bc5SMatthew Dillon  * calls are not limited to device DMA limits so we have to deal with the
156021864bc5SMatthew Dillon  * case.
156121864bc5SMatthew Dillon  *
156221864bc5SMatthew Dillon  * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
156321864bc5SMatthew Dillon  */
156421864bc5SMatthew Dillon static int
156521864bc5SMatthew Dillon devfs_spec_strategy(struct vop_strategy_args *ap)
156621864bc5SMatthew Dillon {
156721864bc5SMatthew Dillon 	struct bio *bio = ap->a_bio;
156821864bc5SMatthew Dillon 	struct buf *bp = bio->bio_buf;
156921864bc5SMatthew Dillon 	struct buf *nbp;
157021864bc5SMatthew Dillon 	struct vnode *vp;
157121864bc5SMatthew Dillon 	struct mount *mp;
157221864bc5SMatthew Dillon 	int chunksize;
157321864bc5SMatthew Dillon 	int maxiosize;
157421864bc5SMatthew Dillon 
157521864bc5SMatthew Dillon 	if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL)
157621864bc5SMatthew Dillon 		buf_start(bp);
157721864bc5SMatthew Dillon 
157821864bc5SMatthew Dillon 	/*
157921864bc5SMatthew Dillon 	 * Collect statistics on synchronous and asynchronous read
158021864bc5SMatthew Dillon 	 * and write counts for disks that have associated filesystems.
158121864bc5SMatthew Dillon 	 */
158221864bc5SMatthew Dillon 	vp = ap->a_vp;
158321864bc5SMatthew Dillon 	KKASSERT(vp->v_rdev != NULL);	/* XXX */
158421864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) {
158521864bc5SMatthew Dillon 		if (bp->b_cmd == BUF_CMD_READ) {
158621864bc5SMatthew Dillon 			if (bp->b_flags & BIO_SYNC)
158721864bc5SMatthew Dillon 				mp->mnt_stat.f_syncreads++;
158821864bc5SMatthew Dillon 			else
158921864bc5SMatthew Dillon 				mp->mnt_stat.f_asyncreads++;
159021864bc5SMatthew Dillon 		} else {
159121864bc5SMatthew Dillon 			if (bp->b_flags & BIO_SYNC)
159221864bc5SMatthew Dillon 				mp->mnt_stat.f_syncwrites++;
159321864bc5SMatthew Dillon 			else
159421864bc5SMatthew Dillon 				mp->mnt_stat.f_asyncwrites++;
159521864bc5SMatthew Dillon 		}
159621864bc5SMatthew Dillon 	}
159721864bc5SMatthew Dillon 
159821864bc5SMatthew Dillon         /*
159921864bc5SMatthew Dillon          * Device iosize limitations only apply to read and write.  Shortcut
160021864bc5SMatthew Dillon          * the I/O if it fits.
160121864bc5SMatthew Dillon          */
160221864bc5SMatthew Dillon 	if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) {
1603898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1604898c91eeSMatthew Dillon 			    "%s: si_iosize_max not set!\n",
1605898c91eeSMatthew Dillon 			    dev_dname(vp->v_rdev));
160621864bc5SMatthew Dillon 		maxiosize = MAXPHYS;
160721864bc5SMatthew Dillon 	}
160821864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 2
160921864bc5SMatthew Dillon 	maxiosize = 4096;
161021864bc5SMatthew Dillon #endif
161121864bc5SMatthew Dillon         if (bp->b_bcount <= maxiosize ||
161221864bc5SMatthew Dillon             (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) {
161321864bc5SMatthew Dillon                 dev_dstrategy_chain(vp->v_rdev, bio);
161421864bc5SMatthew Dillon                 return (0);
161521864bc5SMatthew Dillon         }
161621864bc5SMatthew Dillon 
161721864bc5SMatthew Dillon 	/*
161821864bc5SMatthew Dillon 	 * Clone the buffer and set up an I/O chain to chunk up the I/O.
161921864bc5SMatthew Dillon 	 */
162021864bc5SMatthew Dillon 	nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO);
162121864bc5SMatthew Dillon 	initbufbio(nbp);
162221864bc5SMatthew Dillon 	buf_dep_init(nbp);
162321864bc5SMatthew Dillon 	BUF_LOCKINIT(nbp);
162421864bc5SMatthew Dillon 	BUF_LOCK(nbp, LK_EXCLUSIVE);
162521864bc5SMatthew Dillon 	BUF_KERNPROC(nbp);
162621864bc5SMatthew Dillon 	nbp->b_vp = vp;
162721864bc5SMatthew Dillon 	nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP);
162821864bc5SMatthew Dillon 	nbp->b_data = bp->b_data;
162921864bc5SMatthew Dillon 	nbp->b_bio1.bio_done = devfs_spec_strategy_done;
163021864bc5SMatthew Dillon 	nbp->b_bio1.bio_offset = bio->bio_offset;
163121864bc5SMatthew Dillon 	nbp->b_bio1.bio_caller_info1.ptr = bio;
163221864bc5SMatthew Dillon 
163321864bc5SMatthew Dillon 	/*
163421864bc5SMatthew Dillon 	 * Start the first transfer
163521864bc5SMatthew Dillon 	 */
163621864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL))
163721864bc5SMatthew Dillon 		chunksize = vp->v_rdev->si_bsize_phys;
163821864bc5SMatthew Dillon 	else
163921864bc5SMatthew Dillon 		chunksize = DEV_BSIZE;
164021864bc5SMatthew Dillon 	chunksize = maxiosize / chunksize * chunksize;
164121864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1642898c91eeSMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG,
1643898c91eeSMatthew Dillon 		    "spec_strategy chained I/O chunksize=%d\n",
1644898c91eeSMatthew Dillon 		    chunksize);
164521864bc5SMatthew Dillon #endif
164621864bc5SMatthew Dillon 	nbp->b_cmd = bp->b_cmd;
164721864bc5SMatthew Dillon 	nbp->b_bcount = chunksize;
164821864bc5SMatthew Dillon 	nbp->b_bufsize = chunksize;	/* used to detect a short I/O */
164921864bc5SMatthew Dillon 	nbp->b_bio1.bio_caller_info2.index = chunksize;
165021864bc5SMatthew Dillon 
165121864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1652898c91eeSMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG,
1653898c91eeSMatthew Dillon 		    "spec_strategy: chain %p offset %d/%d bcount %d\n",
165421864bc5SMatthew Dillon 		    bp, 0, bp->b_bcount, nbp->b_bcount);
165521864bc5SMatthew Dillon #endif
165621864bc5SMatthew Dillon 
165721864bc5SMatthew Dillon 	dev_dstrategy(vp->v_rdev, &nbp->b_bio1);
165821864bc5SMatthew Dillon 
165921864bc5SMatthew Dillon 	if (DEVFS_NODE(vp)) {
166021864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
166121864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->mtime);
166221864bc5SMatthew Dillon 	}
166321864bc5SMatthew Dillon 
166421864bc5SMatthew Dillon 	return (0);
166521864bc5SMatthew Dillon }
166621864bc5SMatthew Dillon 
166721864bc5SMatthew Dillon /*
166821864bc5SMatthew Dillon  * Chunked up transfer completion routine - chain transfers until done
166921864bc5SMatthew Dillon  */
167021864bc5SMatthew Dillon static
167121864bc5SMatthew Dillon void
167221864bc5SMatthew Dillon devfs_spec_strategy_done(struct bio *nbio)
167321864bc5SMatthew Dillon {
167421864bc5SMatthew Dillon 	struct buf *nbp = nbio->bio_buf;
167521864bc5SMatthew Dillon 	struct bio *bio = nbio->bio_caller_info1.ptr;	/* original bio */
167621864bc5SMatthew Dillon 	struct buf *bp = bio->bio_buf;			/* original bp */
167721864bc5SMatthew Dillon 	int chunksize = nbio->bio_caller_info2.index;	/* chunking */
167821864bc5SMatthew Dillon 	int boffset = nbp->b_data - bp->b_data;
167921864bc5SMatthew Dillon 
168021864bc5SMatthew Dillon 	if (nbp->b_flags & B_ERROR) {
168121864bc5SMatthew Dillon 		/*
168221864bc5SMatthew Dillon 		 * An error terminates the chain, propogate the error back
168321864bc5SMatthew Dillon 		 * to the original bp
168421864bc5SMatthew Dillon 		 */
168521864bc5SMatthew Dillon 		bp->b_flags |= B_ERROR;
168621864bc5SMatthew Dillon 		bp->b_error = nbp->b_error;
168721864bc5SMatthew Dillon 		bp->b_resid = bp->b_bcount - boffset +
168821864bc5SMatthew Dillon 			      (nbp->b_bcount - nbp->b_resid);
168921864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1690898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1691898c91eeSMatthew Dillon 			    "spec_strategy: chain %p error %d bcount %d/%d\n",
169221864bc5SMatthew Dillon 			    bp, bp->b_error, bp->b_bcount,
169321864bc5SMatthew Dillon 			    bp->b_bcount - bp->b_resid);
169421864bc5SMatthew Dillon #endif
169521864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
169621864bc5SMatthew Dillon 		biodone(bio);
169721864bc5SMatthew Dillon 	} else if (nbp->b_resid) {
169821864bc5SMatthew Dillon 		/*
169921864bc5SMatthew Dillon 		 * A short read or write terminates the chain
170021864bc5SMatthew Dillon 		 */
170121864bc5SMatthew Dillon 		bp->b_error = nbp->b_error;
170221864bc5SMatthew Dillon 		bp->b_resid = bp->b_bcount - boffset +
170321864bc5SMatthew Dillon 			      (nbp->b_bcount - nbp->b_resid);
170421864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1705898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1706898c91eeSMatthew Dillon 			    "spec_strategy: chain %p short read(1) "
1707898c91eeSMatthew Dillon 			    "bcount %d/%d\n",
170821864bc5SMatthew Dillon 			    bp, bp->b_bcount - bp->b_resid, bp->b_bcount);
170921864bc5SMatthew Dillon #endif
171021864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
171121864bc5SMatthew Dillon 		biodone(bio);
171221864bc5SMatthew Dillon 	} else if (nbp->b_bcount != nbp->b_bufsize) {
171321864bc5SMatthew Dillon 		/*
171421864bc5SMatthew Dillon 		 * A short read or write can also occur by truncating b_bcount
171521864bc5SMatthew Dillon 		 */
171621864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1717898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1718898c91eeSMatthew Dillon 			    "spec_strategy: chain %p short read(2) "
1719898c91eeSMatthew Dillon 			    "bcount %d/%d\n",
172021864bc5SMatthew Dillon 			    bp, nbp->b_bcount + boffset, bp->b_bcount);
172121864bc5SMatthew Dillon #endif
172221864bc5SMatthew Dillon 		bp->b_error = 0;
172321864bc5SMatthew Dillon 		bp->b_bcount = nbp->b_bcount + boffset;
172421864bc5SMatthew Dillon 		bp->b_resid = nbp->b_resid;
172521864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
172621864bc5SMatthew Dillon 		biodone(bio);
172721864bc5SMatthew Dillon 	} else if (nbp->b_bcount + boffset == bp->b_bcount) {
172821864bc5SMatthew Dillon 		/*
172921864bc5SMatthew Dillon 		 * No more data terminates the chain
173021864bc5SMatthew Dillon 		 */
173121864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1732898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1733898c91eeSMatthew Dillon 			    "spec_strategy: chain %p finished bcount %d\n",
173421864bc5SMatthew Dillon 			    bp, bp->b_bcount);
173521864bc5SMatthew Dillon #endif
173621864bc5SMatthew Dillon 		bp->b_error = 0;
173721864bc5SMatthew Dillon 		bp->b_resid = 0;
173821864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
173921864bc5SMatthew Dillon 		biodone(bio);
174021864bc5SMatthew Dillon 	} else {
174121864bc5SMatthew Dillon 		/*
174221864bc5SMatthew Dillon 		 * Continue the chain
174321864bc5SMatthew Dillon 		 */
174421864bc5SMatthew Dillon 		boffset += nbp->b_bcount;
174521864bc5SMatthew Dillon 		nbp->b_data = bp->b_data + boffset;
174621864bc5SMatthew Dillon 		nbp->b_bcount = bp->b_bcount - boffset;
174721864bc5SMatthew Dillon 		if (nbp->b_bcount > chunksize)
174821864bc5SMatthew Dillon 			nbp->b_bcount = chunksize;
174921864bc5SMatthew Dillon 		nbp->b_bio1.bio_done = devfs_spec_strategy_done;
175021864bc5SMatthew Dillon 		nbp->b_bio1.bio_offset = bio->bio_offset + boffset;
175121864bc5SMatthew Dillon 
175221864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1753898c91eeSMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG,
1754898c91eeSMatthew Dillon 			    "spec_strategy: chain %p offset %d/%d bcount %d\n",
175521864bc5SMatthew Dillon 			    bp, boffset, bp->b_bcount, nbp->b_bcount);
175621864bc5SMatthew Dillon #endif
175721864bc5SMatthew Dillon 
175821864bc5SMatthew Dillon 		dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1);
175921864bc5SMatthew Dillon 	}
176021864bc5SMatthew Dillon }
176121864bc5SMatthew Dillon 
176221864bc5SMatthew Dillon /*
176321864bc5SMatthew Dillon  * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
176421864bc5SMatthew Dillon  */
176521864bc5SMatthew Dillon static int
176621864bc5SMatthew Dillon devfs_spec_freeblks(struct vop_freeblks_args *ap)
176721864bc5SMatthew Dillon {
176821864bc5SMatthew Dillon 	struct buf *bp;
176921864bc5SMatthew Dillon 
177021864bc5SMatthew Dillon 	/*
177121864bc5SMatthew Dillon 	 * XXX: This assumes that strategy does the deed right away.
177221864bc5SMatthew Dillon 	 * XXX: this may not be TRTTD.
177321864bc5SMatthew Dillon 	 */
177421864bc5SMatthew Dillon 	KKASSERT(ap->a_vp->v_rdev != NULL);
177521864bc5SMatthew Dillon 	if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
177621864bc5SMatthew Dillon 		return (0);
177721864bc5SMatthew Dillon 	bp = geteblk(ap->a_length);
177821864bc5SMatthew Dillon 	bp->b_cmd = BUF_CMD_FREEBLKS;
177921864bc5SMatthew Dillon 	bp->b_bio1.bio_offset = ap->a_offset;
178021864bc5SMatthew Dillon 	bp->b_bcount = ap->a_length;
178121864bc5SMatthew Dillon 	dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1);
178221864bc5SMatthew Dillon 	return (0);
178321864bc5SMatthew Dillon }
178421864bc5SMatthew Dillon 
178521864bc5SMatthew Dillon /*
178621864bc5SMatthew Dillon  * Implement degenerate case where the block requested is the block
178721864bc5SMatthew Dillon  * returned, and assume that the entire device is contiguous in regards
178821864bc5SMatthew Dillon  * to the contiguous block range (runp and runb).
178921864bc5SMatthew Dillon  *
179021864bc5SMatthew Dillon  * spec_bmap(struct vnode *a_vp, off_t a_loffset,
179121864bc5SMatthew Dillon  *	     off_t *a_doffsetp, int *a_runp, int *a_runb)
179221864bc5SMatthew Dillon  */
179321864bc5SMatthew Dillon static int
179421864bc5SMatthew Dillon devfs_spec_bmap(struct vop_bmap_args *ap)
179521864bc5SMatthew Dillon {
179621864bc5SMatthew Dillon 	if (ap->a_doffsetp != NULL)
179721864bc5SMatthew Dillon 		*ap->a_doffsetp = ap->a_loffset;
179821864bc5SMatthew Dillon 	if (ap->a_runp != NULL)
179921864bc5SMatthew Dillon 		*ap->a_runp = MAXBSIZE;
180021864bc5SMatthew Dillon 	if (ap->a_runb != NULL) {
180121864bc5SMatthew Dillon 		if (ap->a_loffset < MAXBSIZE)
180221864bc5SMatthew Dillon 			*ap->a_runb = (int)ap->a_loffset;
180321864bc5SMatthew Dillon 		else
180421864bc5SMatthew Dillon 			*ap->a_runb = MAXBSIZE;
180521864bc5SMatthew Dillon 	}
180621864bc5SMatthew Dillon 	return (0);
180721864bc5SMatthew Dillon }
180821864bc5SMatthew Dillon 
180921864bc5SMatthew Dillon 
181021864bc5SMatthew Dillon /*
181121864bc5SMatthew Dillon  * Special device advisory byte-level locks.
181221864bc5SMatthew Dillon  *
181321864bc5SMatthew Dillon  * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
181421864bc5SMatthew Dillon  *		struct flock *a_fl, int a_flags)
181521864bc5SMatthew Dillon  */
181621864bc5SMatthew Dillon /* ARGSUSED */
181721864bc5SMatthew Dillon static int
181821864bc5SMatthew Dillon devfs_spec_advlock(struct vop_advlock_args *ap)
181921864bc5SMatthew Dillon {
182021864bc5SMatthew Dillon 	return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP);
182121864bc5SMatthew Dillon }
182221864bc5SMatthew Dillon 
182321864bc5SMatthew Dillon static void
182421864bc5SMatthew Dillon devfs_spec_getpages_iodone(struct bio *bio)
182521864bc5SMatthew Dillon {
182621864bc5SMatthew Dillon 	bio->bio_buf->b_cmd = BUF_CMD_DONE;
182721864bc5SMatthew Dillon 	wakeup(bio->bio_buf);
182821864bc5SMatthew Dillon }
182921864bc5SMatthew Dillon 
183021864bc5SMatthew Dillon /*
183121864bc5SMatthew Dillon  * spec_getpages() - get pages associated with device vnode.
183221864bc5SMatthew Dillon  *
183321864bc5SMatthew Dillon  * Note that spec_read and spec_write do not use the buffer cache, so we
183421864bc5SMatthew Dillon  * must fully implement getpages here.
183521864bc5SMatthew Dillon  */
183621864bc5SMatthew Dillon static int
183721864bc5SMatthew Dillon devfs_spec_getpages(struct vop_getpages_args *ap)
183821864bc5SMatthew Dillon {
183921864bc5SMatthew Dillon 	vm_offset_t kva;
184021864bc5SMatthew Dillon 	int error;
184121864bc5SMatthew Dillon 	int i, pcount, size;
184221864bc5SMatthew Dillon 	struct buf *bp;
184321864bc5SMatthew Dillon 	vm_page_t m;
184421864bc5SMatthew Dillon 	vm_ooffset_t offset;
184521864bc5SMatthew Dillon 	int toff, nextoff, nread;
184621864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
184721864bc5SMatthew Dillon 	int blksiz;
184821864bc5SMatthew Dillon 	int gotreqpage;
184921864bc5SMatthew Dillon 
185021864bc5SMatthew Dillon 	error = 0;
185121864bc5SMatthew Dillon 	pcount = round_page(ap->a_count) / PAGE_SIZE;
185221864bc5SMatthew Dillon 
185321864bc5SMatthew Dillon 	/*
185421864bc5SMatthew Dillon 	 * Calculate the offset of the transfer and do sanity check.
185521864bc5SMatthew Dillon 	 */
185621864bc5SMatthew Dillon 	offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
185721864bc5SMatthew Dillon 
185821864bc5SMatthew Dillon 	/*
185921864bc5SMatthew Dillon 	 * Round up physical size for real devices.  We cannot round using
186021864bc5SMatthew Dillon 	 * v_mount's block size data because v_mount has nothing to do with
186121864bc5SMatthew Dillon 	 * the device.  i.e. it's usually '/dev'.  We need the physical block
186221864bc5SMatthew Dillon 	 * size for the device itself.
186321864bc5SMatthew Dillon 	 *
186421864bc5SMatthew Dillon 	 * We can't use v_rdev->si_mountpoint because it only exists when the
186521864bc5SMatthew Dillon 	 * block device is mounted.  However, we can use v_rdev.
186621864bc5SMatthew Dillon 	 */
186721864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL))
186821864bc5SMatthew Dillon 		blksiz = vp->v_rdev->si_bsize_phys;
186921864bc5SMatthew Dillon 	else
187021864bc5SMatthew Dillon 		blksiz = DEV_BSIZE;
187121864bc5SMatthew Dillon 
187221864bc5SMatthew Dillon 	size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
187321864bc5SMatthew Dillon 
187421864bc5SMatthew Dillon 	bp = getpbuf(NULL);
187521864bc5SMatthew Dillon 	kva = (vm_offset_t)bp->b_data;
187621864bc5SMatthew Dillon 
187721864bc5SMatthew Dillon 	/*
187821864bc5SMatthew Dillon 	 * Map the pages to be read into the kva.
187921864bc5SMatthew Dillon 	 */
188021864bc5SMatthew Dillon 	pmap_qenter(kva, ap->a_m, pcount);
188121864bc5SMatthew Dillon 
188221864bc5SMatthew Dillon 	/* Build a minimal buffer header. */
188321864bc5SMatthew Dillon 	bp->b_cmd = BUF_CMD_READ;
188421864bc5SMatthew Dillon 	bp->b_bcount = size;
188521864bc5SMatthew Dillon 	bp->b_resid = 0;
188621864bc5SMatthew Dillon 	bp->b_runningbufspace = size;
188721864bc5SMatthew Dillon 	if (size) {
188821864bc5SMatthew Dillon 		runningbufspace += bp->b_runningbufspace;
188921864bc5SMatthew Dillon 		++runningbufcount;
189021864bc5SMatthew Dillon 	}
189121864bc5SMatthew Dillon 
189221864bc5SMatthew Dillon 	bp->b_bio1.bio_offset = offset;
189321864bc5SMatthew Dillon 	bp->b_bio1.bio_done = devfs_spec_getpages_iodone;
189421864bc5SMatthew Dillon 
189521864bc5SMatthew Dillon 	mycpu->gd_cnt.v_vnodein++;
189621864bc5SMatthew Dillon 	mycpu->gd_cnt.v_vnodepgsin += pcount;
189721864bc5SMatthew Dillon 
189821864bc5SMatthew Dillon 	/* Do the input. */
189921864bc5SMatthew Dillon 	vn_strategy(ap->a_vp, &bp->b_bio1);
190021864bc5SMatthew Dillon 
190121864bc5SMatthew Dillon 	crit_enter();
190221864bc5SMatthew Dillon 
190321864bc5SMatthew Dillon 	/* We definitely need to be at splbio here. */
190421864bc5SMatthew Dillon 	while (bp->b_cmd != BUF_CMD_DONE)
190521864bc5SMatthew Dillon 		tsleep(bp, 0, "spread", 0);
190621864bc5SMatthew Dillon 
190721864bc5SMatthew Dillon 	crit_exit();
190821864bc5SMatthew Dillon 
190921864bc5SMatthew Dillon 	if (bp->b_flags & B_ERROR) {
191021864bc5SMatthew Dillon 		if (bp->b_error)
191121864bc5SMatthew Dillon 			error = bp->b_error;
191221864bc5SMatthew Dillon 		else
191321864bc5SMatthew Dillon 			error = EIO;
191421864bc5SMatthew Dillon 	}
191521864bc5SMatthew Dillon 
191621864bc5SMatthew Dillon 	/*
191721864bc5SMatthew Dillon 	 * If EOF is encountered we must zero-extend the result in order
191821864bc5SMatthew Dillon 	 * to ensure that the page does not contain garabge.  When no
191921864bc5SMatthew Dillon 	 * error occurs, an early EOF is indicated if b_bcount got truncated.
192021864bc5SMatthew Dillon 	 * b_resid is relative to b_bcount and should be 0, but some devices
192121864bc5SMatthew Dillon 	 * might indicate an EOF with b_resid instead of truncating b_bcount.
192221864bc5SMatthew Dillon 	 */
192321864bc5SMatthew Dillon 	nread = bp->b_bcount - bp->b_resid;
192421864bc5SMatthew Dillon 	if (nread < ap->a_count)
192521864bc5SMatthew Dillon 		bzero((caddr_t)kva + nread, ap->a_count - nread);
192621864bc5SMatthew Dillon 	pmap_qremove(kva, pcount);
192721864bc5SMatthew Dillon 
192821864bc5SMatthew Dillon 	gotreqpage = 0;
192921864bc5SMatthew Dillon 	for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
193021864bc5SMatthew Dillon 		nextoff = toff + PAGE_SIZE;
193121864bc5SMatthew Dillon 		m = ap->a_m[i];
193221864bc5SMatthew Dillon 
193321864bc5SMatthew Dillon 		m->flags &= ~PG_ZERO;
193421864bc5SMatthew Dillon 
1935cb1cf930SMatthew Dillon 		/*
1936cb1cf930SMatthew Dillon 		 * NOTE: vm_page_undirty/clear_dirty etc do not clear the
1937cb1cf930SMatthew Dillon 		 *	 pmap modified bit.  pmap modified bit should have
1938cb1cf930SMatthew Dillon 		 *	 already been cleared.
1939cb1cf930SMatthew Dillon 		 */
194021864bc5SMatthew Dillon 		if (nextoff <= nread) {
194121864bc5SMatthew Dillon 			m->valid = VM_PAGE_BITS_ALL;
194221864bc5SMatthew Dillon 			vm_page_undirty(m);
194321864bc5SMatthew Dillon 		} else if (toff < nread) {
194421864bc5SMatthew Dillon 			/*
194521864bc5SMatthew Dillon 			 * Since this is a VM request, we have to supply the
1946cb1cf930SMatthew Dillon 			 * unaligned offset to allow vm_page_set_valid()
194721864bc5SMatthew Dillon 			 * to zero sub-DEV_BSIZE'd portions of the page.
194821864bc5SMatthew Dillon 			 */
19491a54183bSMatthew Dillon 			vm_page_set_valid(m, 0, nread - toff);
19501a54183bSMatthew Dillon 			vm_page_clear_dirty_end_nonincl(m, 0, nread - toff);
195121864bc5SMatthew Dillon 		} else {
195221864bc5SMatthew Dillon 			m->valid = 0;
195321864bc5SMatthew Dillon 			vm_page_undirty(m);
195421864bc5SMatthew Dillon 		}
195521864bc5SMatthew Dillon 
195621864bc5SMatthew Dillon 		if (i != ap->a_reqpage) {
195721864bc5SMatthew Dillon 			/*
195821864bc5SMatthew Dillon 			 * Just in case someone was asking for this page we
195921864bc5SMatthew Dillon 			 * now tell them that it is ok to use.
196021864bc5SMatthew Dillon 			 */
196121864bc5SMatthew Dillon 			if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
196221864bc5SMatthew Dillon 				if (m->valid) {
196321864bc5SMatthew Dillon 					if (m->flags & PG_WANTED) {
196421864bc5SMatthew Dillon 						vm_page_activate(m);
196521864bc5SMatthew Dillon 					} else {
196621864bc5SMatthew Dillon 						vm_page_deactivate(m);
196721864bc5SMatthew Dillon 					}
196821864bc5SMatthew Dillon 					vm_page_wakeup(m);
196921864bc5SMatthew Dillon 				} else {
197021864bc5SMatthew Dillon 					vm_page_free(m);
197121864bc5SMatthew Dillon 				}
197221864bc5SMatthew Dillon 			} else {
197321864bc5SMatthew Dillon 				vm_page_free(m);
197421864bc5SMatthew Dillon 			}
197521864bc5SMatthew Dillon 		} else if (m->valid) {
197621864bc5SMatthew Dillon 			gotreqpage = 1;
197721864bc5SMatthew Dillon 			/*
197821864bc5SMatthew Dillon 			 * Since this is a VM request, we need to make the
197921864bc5SMatthew Dillon 			 * entire page presentable by zeroing invalid sections.
198021864bc5SMatthew Dillon 			 */
198121864bc5SMatthew Dillon 			if (m->valid != VM_PAGE_BITS_ALL)
198221864bc5SMatthew Dillon 			    vm_page_zero_invalid(m, FALSE);
198321864bc5SMatthew Dillon 		}
198421864bc5SMatthew Dillon 	}
198521864bc5SMatthew Dillon 	if (!gotreqpage) {
198621864bc5SMatthew Dillon 		m = ap->a_m[ap->a_reqpage];
198721864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_WARNING,
198821864bc5SMatthew Dillon 	    "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
198921864bc5SMatthew Dillon 			devtoname(vp->v_rdev), error, bp, bp->b_vp);
199021864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_WARNING,
199121864bc5SMatthew Dillon 	    "               size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
199221864bc5SMatthew Dillon 		    size, bp->b_resid, ap->a_count, m->valid);
199321864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_WARNING,
199421864bc5SMatthew Dillon 	    "               nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
199521864bc5SMatthew Dillon 		    nread, ap->a_reqpage, (u_long)m->pindex, pcount);
199621864bc5SMatthew Dillon 		/*
199721864bc5SMatthew Dillon 		 * Free the buffer header back to the swap buffer pool.
199821864bc5SMatthew Dillon 		 */
199921864bc5SMatthew Dillon 		relpbuf(bp, NULL);
200021864bc5SMatthew Dillon 		return VM_PAGER_ERROR;
200121864bc5SMatthew Dillon 	}
200221864bc5SMatthew Dillon 	/*
200321864bc5SMatthew Dillon 	 * Free the buffer header back to the swap buffer pool.
200421864bc5SMatthew Dillon 	 */
200521864bc5SMatthew Dillon 	relpbuf(bp, NULL);
200607dfa375SAlex Hornung 	if (DEVFS_NODE(ap->a_vp))
200707dfa375SAlex Hornung 		nanotime(&DEVFS_NODE(ap->a_vp)->mtime);
200821864bc5SMatthew Dillon 	return VM_PAGER_OK;
200921864bc5SMatthew Dillon }
201021864bc5SMatthew Dillon 
201121864bc5SMatthew Dillon static __inline
201221864bc5SMatthew Dillon int
201321864bc5SMatthew Dillon sequential_heuristic(struct uio *uio, struct file *fp)
201421864bc5SMatthew Dillon {
201521864bc5SMatthew Dillon 	/*
201621864bc5SMatthew Dillon 	 * Sequential heuristic - detect sequential operation
201721864bc5SMatthew Dillon 	 */
201821864bc5SMatthew Dillon 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
201921864bc5SMatthew Dillon 	    uio->uio_offset == fp->f_nextoff) {
202021864bc5SMatthew Dillon 		/*
202121864bc5SMatthew Dillon 		 * XXX we assume that the filesystem block size is
202221864bc5SMatthew Dillon 		 * the default.  Not true, but still gives us a pretty
202321864bc5SMatthew Dillon 		 * good indicator of how sequential the read operations
202421864bc5SMatthew Dillon 		 * are.
202521864bc5SMatthew Dillon 		 */
2026898c91eeSMatthew Dillon 		int tmpseq = fp->f_seqcount;
2027898c91eeSMatthew Dillon 
202821864bc5SMatthew Dillon 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
202921864bc5SMatthew Dillon 		if (tmpseq > IO_SEQMAX)
203021864bc5SMatthew Dillon 			tmpseq = IO_SEQMAX;
203121864bc5SMatthew Dillon 		fp->f_seqcount = tmpseq;
203221864bc5SMatthew Dillon 		return(fp->f_seqcount << IO_SEQSHIFT);
203321864bc5SMatthew Dillon 	}
203421864bc5SMatthew Dillon 
203521864bc5SMatthew Dillon 	/*
203621864bc5SMatthew Dillon 	 * Not sequential, quick draw-down of seqcount
203721864bc5SMatthew Dillon 	 */
203821864bc5SMatthew Dillon 	if (fp->f_seqcount > 1)
203921864bc5SMatthew Dillon 		fp->f_seqcount = 1;
204021864bc5SMatthew Dillon 	else
204121864bc5SMatthew Dillon 		fp->f_seqcount = 0;
204221864bc5SMatthew Dillon 	return(0);
204321864bc5SMatthew Dillon }
2044