xref: /dragonfly/sys/vfs/devfs/devfs_vnops.c (revision 21864bc5)
1*21864bc5SMatthew Dillon /*
2*21864bc5SMatthew Dillon  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3*21864bc5SMatthew Dillon  *
4*21864bc5SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
5*21864bc5SMatthew Dillon  * by Alex Hornung <ahornung@gmail.com>
6*21864bc5SMatthew Dillon  *
7*21864bc5SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
8*21864bc5SMatthew Dillon  * modification, are permitted provided that the following conditions
9*21864bc5SMatthew Dillon  * are met:
10*21864bc5SMatthew Dillon  *
11*21864bc5SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
12*21864bc5SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
13*21864bc5SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
14*21864bc5SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
15*21864bc5SMatthew Dillon  *    the documentation and/or other materials provided with the
16*21864bc5SMatthew Dillon  *    distribution.
17*21864bc5SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
18*21864bc5SMatthew Dillon  *    contributors may be used to endorse or promote products derived
19*21864bc5SMatthew Dillon  *    from this software without specific, prior written permission.
20*21864bc5SMatthew Dillon  *
21*21864bc5SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22*21864bc5SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23*21864bc5SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24*21864bc5SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25*21864bc5SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26*21864bc5SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27*21864bc5SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28*21864bc5SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29*21864bc5SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30*21864bc5SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31*21864bc5SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32*21864bc5SMatthew Dillon  * SUCH DAMAGE.
33*21864bc5SMatthew Dillon  */
34*21864bc5SMatthew Dillon #include <sys/param.h>
35*21864bc5SMatthew Dillon #include <sys/systm.h>
36*21864bc5SMatthew Dillon #include <sys/time.h>
37*21864bc5SMatthew Dillon #include <sys/kernel.h>
38*21864bc5SMatthew Dillon #include <sys/lock.h>
39*21864bc5SMatthew Dillon #include <sys/fcntl.h>
40*21864bc5SMatthew Dillon #include <sys/proc.h>
41*21864bc5SMatthew Dillon #include <sys/priv.h>
42*21864bc5SMatthew Dillon #include <sys/signalvar.h>
43*21864bc5SMatthew Dillon #include <sys/vnode.h>
44*21864bc5SMatthew Dillon #include <sys/uio.h>
45*21864bc5SMatthew Dillon #include <sys/mount.h>
46*21864bc5SMatthew Dillon #include <sys/file.h>
47*21864bc5SMatthew Dillon #include <sys/fcntl.h>
48*21864bc5SMatthew Dillon #include <sys/namei.h>
49*21864bc5SMatthew Dillon #include <sys/dirent.h>
50*21864bc5SMatthew Dillon #include <sys/malloc.h>
51*21864bc5SMatthew Dillon #include <sys/stat.h>
52*21864bc5SMatthew Dillon #include <sys/reg.h>
53*21864bc5SMatthew Dillon #include <sys/buf2.h>
54*21864bc5SMatthew Dillon #include <vm/vm_pager.h>
55*21864bc5SMatthew Dillon #include <vm/vm_zone.h>
56*21864bc5SMatthew Dillon #include <vm/vm_object.h>
57*21864bc5SMatthew Dillon #include <sys/filio.h>
58*21864bc5SMatthew Dillon #include <sys/ttycom.h>
59*21864bc5SMatthew Dillon #include <sys/sysref2.h>
60*21864bc5SMatthew Dillon #include <sys/tty.h>
61*21864bc5SMatthew Dillon #include <vfs/devfs/devfs.h>
62*21864bc5SMatthew Dillon #include <sys/pioctl.h>
63*21864bc5SMatthew Dillon 
64*21864bc5SMatthew Dillon #include <machine/limits.h>
65*21864bc5SMatthew Dillon 
66*21864bc5SMatthew Dillon MALLOC_DECLARE(M_DEVFS);
67*21864bc5SMatthew Dillon #define DEVFS_BADOP	(void *)devfs_badop
68*21864bc5SMatthew Dillon 
69*21864bc5SMatthew Dillon static int devfs_badop(struct vop_generic_args *);
70*21864bc5SMatthew Dillon static int devfs_access(struct vop_access_args *);
71*21864bc5SMatthew Dillon static int devfs_inactive(struct vop_inactive_args *);
72*21864bc5SMatthew Dillon static int devfs_reclaim(struct vop_reclaim_args *);
73*21864bc5SMatthew Dillon static int devfs_readdir(struct vop_readdir_args *);
74*21864bc5SMatthew Dillon static int devfs_getattr(struct vop_getattr_args *);
75*21864bc5SMatthew Dillon static int devfs_setattr(struct vop_setattr_args *);
76*21864bc5SMatthew Dillon static int devfs_readlink(struct vop_readlink_args *);
77*21864bc5SMatthew Dillon static int devfs_print(struct vop_print_args *);
78*21864bc5SMatthew Dillon 
79*21864bc5SMatthew Dillon static int devfs_nresolve(struct vop_nresolve_args *);
80*21864bc5SMatthew Dillon static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *);
81*21864bc5SMatthew Dillon static int devfs_nsymlink(struct vop_nsymlink_args *);
82*21864bc5SMatthew Dillon static int devfs_nremove(struct vop_nremove_args *);
83*21864bc5SMatthew Dillon 
84*21864bc5SMatthew Dillon static int devfs_spec_open(struct vop_open_args *);
85*21864bc5SMatthew Dillon static int devfs_spec_close(struct vop_close_args *);
86*21864bc5SMatthew Dillon static int devfs_spec_fsync(struct vop_fsync_args *);
87*21864bc5SMatthew Dillon 
88*21864bc5SMatthew Dillon static int devfs_spec_read(struct vop_read_args *);
89*21864bc5SMatthew Dillon static int devfs_spec_write(struct vop_write_args *);
90*21864bc5SMatthew Dillon static int devfs_spec_ioctl(struct vop_ioctl_args *);
91*21864bc5SMatthew Dillon static int devfs_spec_poll(struct vop_poll_args *);
92*21864bc5SMatthew Dillon static int devfs_spec_kqfilter(struct vop_kqfilter_args *);
93*21864bc5SMatthew Dillon static int devfs_spec_strategy(struct vop_strategy_args *);
94*21864bc5SMatthew Dillon static void devfs_spec_strategy_done(struct bio *);
95*21864bc5SMatthew Dillon static int devfs_spec_freeblks(struct vop_freeblks_args *);
96*21864bc5SMatthew Dillon static int devfs_spec_bmap(struct vop_bmap_args *);
97*21864bc5SMatthew Dillon static int devfs_spec_advlock(struct vop_advlock_args *);
98*21864bc5SMatthew Dillon static void devfs_spec_getpages_iodone(struct bio *);
99*21864bc5SMatthew Dillon static int devfs_spec_getpages(struct vop_getpages_args *);
100*21864bc5SMatthew Dillon 
101*21864bc5SMatthew Dillon 
102*21864bc5SMatthew Dillon static int devfs_specf_close(struct file *);
103*21864bc5SMatthew Dillon static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int);
104*21864bc5SMatthew Dillon static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int);
105*21864bc5SMatthew Dillon static int devfs_specf_stat(struct file *, struct stat *, struct ucred *);
106*21864bc5SMatthew Dillon static int devfs_specf_kqfilter(struct file *, struct knote *);
107*21864bc5SMatthew Dillon static int devfs_specf_poll(struct file *, int, struct ucred *);
108*21864bc5SMatthew Dillon static int devfs_specf_ioctl(struct file *, u_long, caddr_t, struct ucred *);
109*21864bc5SMatthew Dillon 
110*21864bc5SMatthew Dillon 
111*21864bc5SMatthew Dillon static __inline int sequential_heuristic(struct uio *, struct file *);
112*21864bc5SMatthew Dillon extern struct lock 		devfs_lock;
113*21864bc5SMatthew Dillon 
114*21864bc5SMatthew Dillon /*
115*21864bc5SMatthew Dillon  * devfs vnode operations for regular files
116*21864bc5SMatthew Dillon  */
117*21864bc5SMatthew Dillon struct vop_ops devfs_vnode_norm_vops = {
118*21864bc5SMatthew Dillon 	.vop_default =		vop_defaultop,
119*21864bc5SMatthew Dillon 	.vop_access =		devfs_access,
120*21864bc5SMatthew Dillon 	.vop_advlock =		DEVFS_BADOP,
121*21864bc5SMatthew Dillon 	.vop_bmap =			DEVFS_BADOP,
122*21864bc5SMatthew Dillon 	.vop_close =		vop_stdclose,
123*21864bc5SMatthew Dillon 	.vop_getattr =		devfs_getattr,
124*21864bc5SMatthew Dillon 	.vop_inactive =		devfs_inactive,
125*21864bc5SMatthew Dillon 	.vop_ncreate =		DEVFS_BADOP,
126*21864bc5SMatthew Dillon 	.vop_nresolve =		devfs_nresolve,
127*21864bc5SMatthew Dillon 	.vop_nlookupdotdot =	devfs_nlookupdotdot,
128*21864bc5SMatthew Dillon 	.vop_nlink =		DEVFS_BADOP,
129*21864bc5SMatthew Dillon 	.vop_nmkdir =		DEVFS_BADOP,
130*21864bc5SMatthew Dillon 	.vop_nmknod =		DEVFS_BADOP,
131*21864bc5SMatthew Dillon 	.vop_nremove =		devfs_nremove,
132*21864bc5SMatthew Dillon 	.vop_nrename =		DEVFS_BADOP,
133*21864bc5SMatthew Dillon 	.vop_nrmdir =		DEVFS_BADOP,
134*21864bc5SMatthew Dillon 	.vop_nsymlink =		devfs_nsymlink,
135*21864bc5SMatthew Dillon 	.vop_open =			vop_stdopen,
136*21864bc5SMatthew Dillon 	.vop_pathconf =		vop_stdpathconf,
137*21864bc5SMatthew Dillon 	.vop_print =		devfs_print,
138*21864bc5SMatthew Dillon 	.vop_read =			DEVFS_BADOP,
139*21864bc5SMatthew Dillon 	.vop_readdir =		devfs_readdir,
140*21864bc5SMatthew Dillon 	.vop_readlink =		devfs_readlink,
141*21864bc5SMatthew Dillon 	.vop_reclaim =		devfs_reclaim,
142*21864bc5SMatthew Dillon 	.vop_setattr =		devfs_setattr,
143*21864bc5SMatthew Dillon 	.vop_write =		DEVFS_BADOP,
144*21864bc5SMatthew Dillon 	.vop_ioctl =		DEVFS_BADOP
145*21864bc5SMatthew Dillon };
146*21864bc5SMatthew Dillon 
147*21864bc5SMatthew Dillon /*
148*21864bc5SMatthew Dillon  * devfs vnode operations for character devices
149*21864bc5SMatthew Dillon  */
150*21864bc5SMatthew Dillon struct vop_ops devfs_vnode_dev_vops = {
151*21864bc5SMatthew Dillon 	.vop_default =		vop_defaultop,
152*21864bc5SMatthew Dillon 	.vop_access =		devfs_access,
153*21864bc5SMatthew Dillon 	.vop_advlock =		devfs_spec_advlock,
154*21864bc5SMatthew Dillon 	.vop_bmap =			devfs_spec_bmap,
155*21864bc5SMatthew Dillon 	.vop_close =		devfs_spec_close,
156*21864bc5SMatthew Dillon 	.vop_freeblks =		devfs_spec_freeblks,
157*21864bc5SMatthew Dillon 	.vop_fsync =		devfs_spec_fsync,
158*21864bc5SMatthew Dillon 	.vop_getattr =		devfs_getattr,
159*21864bc5SMatthew Dillon 	.vop_getpages =		devfs_spec_getpages,
160*21864bc5SMatthew Dillon 	.vop_inactive =		devfs_inactive,
161*21864bc5SMatthew Dillon 	.vop_open =			devfs_spec_open,
162*21864bc5SMatthew Dillon 	.vop_pathconf =		vop_stdpathconf,
163*21864bc5SMatthew Dillon 	.vop_print =		devfs_print,
164*21864bc5SMatthew Dillon 	.vop_poll =			devfs_spec_poll,
165*21864bc5SMatthew Dillon 	.vop_kqfilter =		devfs_spec_kqfilter,
166*21864bc5SMatthew Dillon 	.vop_read =			devfs_spec_read,
167*21864bc5SMatthew Dillon 	.vop_readdir =		DEVFS_BADOP,
168*21864bc5SMatthew Dillon 	.vop_readlink =		DEVFS_BADOP,
169*21864bc5SMatthew Dillon 	.vop_reclaim =		devfs_reclaim,
170*21864bc5SMatthew Dillon 	.vop_setattr =		devfs_setattr,
171*21864bc5SMatthew Dillon 	.vop_strategy =		devfs_spec_strategy,
172*21864bc5SMatthew Dillon 	.vop_write =		devfs_spec_write,
173*21864bc5SMatthew Dillon 	.vop_ioctl =		devfs_spec_ioctl
174*21864bc5SMatthew Dillon };
175*21864bc5SMatthew Dillon 
176*21864bc5SMatthew Dillon struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops;
177*21864bc5SMatthew Dillon 
178*21864bc5SMatthew Dillon struct fileops devfs_dev_fileops = {
179*21864bc5SMatthew Dillon 	.fo_read = devfs_specf_read,
180*21864bc5SMatthew Dillon 	.fo_write = devfs_specf_write,
181*21864bc5SMatthew Dillon 	.fo_ioctl = devfs_specf_ioctl,
182*21864bc5SMatthew Dillon 	.fo_poll = devfs_specf_poll,
183*21864bc5SMatthew Dillon 	.fo_kqfilter = devfs_specf_kqfilter,
184*21864bc5SMatthew Dillon 	.fo_stat = devfs_specf_stat,
185*21864bc5SMatthew Dillon 	.fo_close = devfs_specf_close,
186*21864bc5SMatthew Dillon 	.fo_shutdown = nofo_shutdown
187*21864bc5SMatthew Dillon };
188*21864bc5SMatthew Dillon 
189*21864bc5SMatthew Dillon 
190*21864bc5SMatthew Dillon /*
191*21864bc5SMatthew Dillon  * generic entry point for unsupported operations
192*21864bc5SMatthew Dillon  */
193*21864bc5SMatthew Dillon static int
194*21864bc5SMatthew Dillon devfs_badop(struct vop_generic_args *ap)
195*21864bc5SMatthew Dillon {
196*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: specified vnode operation is not implemented (yet)\n");
197*21864bc5SMatthew Dillon 	return (EIO);
198*21864bc5SMatthew Dillon }
199*21864bc5SMatthew Dillon 
200*21864bc5SMatthew Dillon 
201*21864bc5SMatthew Dillon static int
202*21864bc5SMatthew Dillon devfs_access(struct vop_access_args *ap)
203*21864bc5SMatthew Dillon {
204*21864bc5SMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
205*21864bc5SMatthew Dillon 	int error = 0;
206*21864bc5SMatthew Dillon 
207*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_access() called!\n");
208*21864bc5SMatthew Dillon 
209*21864bc5SMatthew Dillon 	error = vop_helper_access(ap, node->uid, node->gid,
210*21864bc5SMatthew Dillon 				node->mode, node->flags);
211*21864bc5SMatthew Dillon 
212*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_access ruled over %s: %d\n", "UNKNOWN", error);
213*21864bc5SMatthew Dillon 
214*21864bc5SMatthew Dillon 	return error;
215*21864bc5SMatthew Dillon 	//XXX: consider possible special cases? terminal, ...?
216*21864bc5SMatthew Dillon }
217*21864bc5SMatthew Dillon 
218*21864bc5SMatthew Dillon 
219*21864bc5SMatthew Dillon static int
220*21864bc5SMatthew Dillon devfs_inactive(struct vop_inactive_args *ap)
221*21864bc5SMatthew Dillon {
222*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_inactive() called!\n");
223*21864bc5SMatthew Dillon 
224*21864bc5SMatthew Dillon 	/* Check if the devfs_node is not linked anymore into the topology.
225*21864bc5SMatthew Dillon 	 * If this is the case, we suggest that the vnode is recycled. */
226*21864bc5SMatthew Dillon 	if (DEVFS_NODE(ap->a_vp)) {
227*21864bc5SMatthew Dillon 		if ((DEVFS_NODE(ap->a_vp)->flags & DEVFS_NODE_LINKED) == 0) {
228*21864bc5SMatthew Dillon 			vrecycle(ap->a_vp);
229*21864bc5SMatthew Dillon 		}
230*21864bc5SMatthew Dillon 	}
231*21864bc5SMatthew Dillon 
232*21864bc5SMatthew Dillon 	return 0;
233*21864bc5SMatthew Dillon }
234*21864bc5SMatthew Dillon 
235*21864bc5SMatthew Dillon 
236*21864bc5SMatthew Dillon static int
237*21864bc5SMatthew Dillon devfs_reclaim(struct vop_reclaim_args *ap)
238*21864bc5SMatthew Dillon {
239*21864bc5SMatthew Dillon 	int locked = 0;
240*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_reclaim() called!\n");
241*21864bc5SMatthew Dillon 
242*21864bc5SMatthew Dillon 	/* Check if it is locked already. if not, we acquire the devfs lock */
243*21864bc5SMatthew Dillon 	if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
244*21864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_EXCLUSIVE);
245*21864bc5SMatthew Dillon 		locked = 1;
246*21864bc5SMatthew Dillon 	}
247*21864bc5SMatthew Dillon 
248*21864bc5SMatthew Dillon 	/* Check if the devfs_node is not linked anymore into the topology.
249*21864bc5SMatthew Dillon 	 * If this is the case, we get rid of the devfs_node. */
250*21864bc5SMatthew Dillon 	if (DEVFS_NODE(ap->a_vp)) {
251*21864bc5SMatthew Dillon 		if ((DEVFS_NODE(ap->a_vp)->flags & DEVFS_NODE_LINKED) == 0) {
252*21864bc5SMatthew Dillon 				devfs_freep(DEVFS_NODE(ap->a_vp));
253*21864bc5SMatthew Dillon 				//devfs_tracer_del_orphan(DEVFS_NODE(ap->a_vp));
254*21864bc5SMatthew Dillon 		}
255*21864bc5SMatthew Dillon 
256*21864bc5SMatthew Dillon 		/* unlink vnode <--> devfs_node */
257*21864bc5SMatthew Dillon 		DEVFS_NODE(ap->a_vp)->v_node = NULL;
258*21864bc5SMatthew Dillon 	}
259*21864bc5SMatthew Dillon 
260*21864bc5SMatthew Dillon 	/* If we acquired the lock, we also get rid of it */
261*21864bc5SMatthew Dillon 	if (locked)
262*21864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_RELEASE);
263*21864bc5SMatthew Dillon 
264*21864bc5SMatthew Dillon 	ap->a_vp->v_data = NULL;
265*21864bc5SMatthew Dillon 	/* avoid a panic on release because of not adding it with v_associate_rdev */
266*21864bc5SMatthew Dillon 	ap->a_vp->v_rdev = NULL;
267*21864bc5SMatthew Dillon 
268*21864bc5SMatthew Dillon 	return 0;
269*21864bc5SMatthew Dillon }
270*21864bc5SMatthew Dillon 
271*21864bc5SMatthew Dillon 
272*21864bc5SMatthew Dillon static int
273*21864bc5SMatthew Dillon devfs_readdir(struct vop_readdir_args *ap)
274*21864bc5SMatthew Dillon {
275*21864bc5SMatthew Dillon 	struct devfs_node *node;
276*21864bc5SMatthew Dillon 	int error2 = 0, r, error = 0;
277*21864bc5SMatthew Dillon 
278*21864bc5SMatthew Dillon 	int cookie_index;
279*21864bc5SMatthew Dillon 	int ncookies;
280*21864bc5SMatthew Dillon 	off_t *cookies;
281*21864bc5SMatthew Dillon 	off_t saveoff;
282*21864bc5SMatthew Dillon 
283*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n");
284*21864bc5SMatthew Dillon 
285*21864bc5SMatthew Dillon 	if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX)
286*21864bc5SMatthew Dillon 		return (EINVAL);
287*21864bc5SMatthew Dillon 	if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
288*21864bc5SMatthew Dillon 		return (error);
289*21864bc5SMatthew Dillon 
290*21864bc5SMatthew Dillon 	saveoff = ap->a_uio->uio_offset;
291*21864bc5SMatthew Dillon 
292*21864bc5SMatthew Dillon 	if (ap->a_ncookies) {
293*21864bc5SMatthew Dillon 		ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */
294*21864bc5SMatthew Dillon 		if (ncookies > 256)
295*21864bc5SMatthew Dillon 			ncookies = 256;
296*21864bc5SMatthew Dillon 		cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK);
297*21864bc5SMatthew Dillon 		cookie_index = 0;
298*21864bc5SMatthew Dillon 	} else {
299*21864bc5SMatthew Dillon 		ncookies = -1;
300*21864bc5SMatthew Dillon 		cookies = NULL;
301*21864bc5SMatthew Dillon 		cookie_index = 0;
302*21864bc5SMatthew Dillon 	}
303*21864bc5SMatthew Dillon 
304*21864bc5SMatthew Dillon 	nanotime(&DEVFS_NODE(ap->a_vp)->atime);
305*21864bc5SMatthew Dillon 
306*21864bc5SMatthew Dillon 	if (saveoff == 0) {
307*21864bc5SMatthew Dillon 		r = vop_write_dirent(&error, ap->a_uio, DEVFS_NODE(ap->a_vp)->d_dir.d_ino, DT_DIR, 1, ".");
308*21864bc5SMatthew Dillon 		if (r)
309*21864bc5SMatthew Dillon 			goto done;
310*21864bc5SMatthew Dillon 		if (cookies)
311*21864bc5SMatthew Dillon 			cookies[cookie_index] = saveoff;
312*21864bc5SMatthew Dillon 		saveoff++;
313*21864bc5SMatthew Dillon 		cookie_index++;
314*21864bc5SMatthew Dillon 		if (cookie_index == ncookies)
315*21864bc5SMatthew Dillon 			goto done;
316*21864bc5SMatthew Dillon 	}
317*21864bc5SMatthew Dillon 
318*21864bc5SMatthew Dillon 	if (saveoff == 1) {
319*21864bc5SMatthew Dillon 		if (DEVFS_NODE(ap->a_vp)->parent) {
320*21864bc5SMatthew Dillon 			r = vop_write_dirent(&error, ap->a_uio,
321*21864bc5SMatthew Dillon 					     DEVFS_NODE(ap->a_vp)->d_dir.d_ino,
322*21864bc5SMatthew Dillon 					     DT_DIR, 2, "..");
323*21864bc5SMatthew Dillon 		} else {
324*21864bc5SMatthew Dillon 			r = vop_write_dirent(&error, ap->a_uio,
325*21864bc5SMatthew Dillon 					     DEVFS_NODE(ap->a_vp)->d_dir.d_ino, DT_DIR, 2, "..");
326*21864bc5SMatthew Dillon 		}
327*21864bc5SMatthew Dillon 		if (r)
328*21864bc5SMatthew Dillon 			goto done;
329*21864bc5SMatthew Dillon 		if (cookies)
330*21864bc5SMatthew Dillon 			cookies[cookie_index] = saveoff;
331*21864bc5SMatthew Dillon 		saveoff++;
332*21864bc5SMatthew Dillon 		cookie_index++;
333*21864bc5SMatthew Dillon 		if (cookie_index == ncookies)
334*21864bc5SMatthew Dillon 			goto done;
335*21864bc5SMatthew Dillon 	}
336*21864bc5SMatthew Dillon 
337*21864bc5SMatthew Dillon 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_vp)), link) {
338*21864bc5SMatthew Dillon 		if ((node->flags & DEVFS_HIDDEN) || (node->flags & DEVFS_INVISIBLE))
339*21864bc5SMatthew Dillon 			continue;
340*21864bc5SMatthew Dillon 
341*21864bc5SMatthew Dillon 		if (node->cookie < saveoff)
342*21864bc5SMatthew Dillon 			continue;
343*21864bc5SMatthew Dillon /*
344*21864bc5SMatthew Dillon 		if (skip > 0) {
345*21864bc5SMatthew Dillon 			skip--;
346*21864bc5SMatthew Dillon 			continue;
347*21864bc5SMatthew Dillon 		}
348*21864bc5SMatthew Dillon */
349*21864bc5SMatthew Dillon 		saveoff = node->cookie;
350*21864bc5SMatthew Dillon 
351*21864bc5SMatthew Dillon 		error2 = vop_write_dirent(&error, ap->a_uio,
352*21864bc5SMatthew Dillon 			node->d_dir.d_ino, node->d_dir.d_type,
353*21864bc5SMatthew Dillon 			node->d_dir.d_namlen, node->d_dir.d_name);
354*21864bc5SMatthew Dillon 
355*21864bc5SMatthew Dillon 		if(error2)
356*21864bc5SMatthew Dillon 			break;
357*21864bc5SMatthew Dillon 
358*21864bc5SMatthew Dillon 		saveoff++;
359*21864bc5SMatthew Dillon 
360*21864bc5SMatthew Dillon 		if (cookies)
361*21864bc5SMatthew Dillon 			cookies[cookie_index] = node->cookie;
362*21864bc5SMatthew Dillon 		++cookie_index;
363*21864bc5SMatthew Dillon 		if (cookie_index == ncookies)
364*21864bc5SMatthew Dillon 			break;
365*21864bc5SMatthew Dillon 
366*21864bc5SMatthew Dillon 		//count++;
367*21864bc5SMatthew Dillon 	}
368*21864bc5SMatthew Dillon 
369*21864bc5SMatthew Dillon done:
370*21864bc5SMatthew Dillon 	vn_unlock(ap->a_vp);
371*21864bc5SMatthew Dillon 
372*21864bc5SMatthew Dillon 	ap->a_uio->uio_offset = saveoff;
373*21864bc5SMatthew Dillon 	if (error && cookie_index == 0) {
374*21864bc5SMatthew Dillon 		if (cookies) {
375*21864bc5SMatthew Dillon 			kfree(cookies, M_TEMP);
376*21864bc5SMatthew Dillon 			*ap->a_ncookies = 0;
377*21864bc5SMatthew Dillon 			*ap->a_cookies = NULL;
378*21864bc5SMatthew Dillon 		}
379*21864bc5SMatthew Dillon 	} else {
380*21864bc5SMatthew Dillon 		if (cookies) {
381*21864bc5SMatthew Dillon 			*ap->a_ncookies = cookie_index;
382*21864bc5SMatthew Dillon 			*ap->a_cookies = cookies;
383*21864bc5SMatthew Dillon 		}
384*21864bc5SMatthew Dillon 	}
385*21864bc5SMatthew Dillon 	return (error);
386*21864bc5SMatthew Dillon }
387*21864bc5SMatthew Dillon 
388*21864bc5SMatthew Dillon 
389*21864bc5SMatthew Dillon static int
390*21864bc5SMatthew Dillon devfs_nresolve(struct vop_nresolve_args *ap)
391*21864bc5SMatthew Dillon {
392*21864bc5SMatthew Dillon 	struct devfs_node *node, *found = NULL;
393*21864bc5SMatthew Dillon 	struct namecache *ncp;
394*21864bc5SMatthew Dillon 	struct vnode *vp = NULL;
395*21864bc5SMatthew Dillon 	//void *ident;
396*21864bc5SMatthew Dillon 	int error = 0;
397*21864bc5SMatthew Dillon 	int len;
398*21864bc5SMatthew Dillon 	int hidden = 0;
399*21864bc5SMatthew Dillon 
400*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve() called!\n");
401*21864bc5SMatthew Dillon 
402*21864bc5SMatthew Dillon 	ncp = ap->a_nch->ncp;
403*21864bc5SMatthew Dillon 	len = ncp->nc_nlen;
404*21864bc5SMatthew Dillon 
405*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
406*21864bc5SMatthew Dillon 
407*21864bc5SMatthew Dillon 	if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
408*21864bc5SMatthew Dillon 		(DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
409*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve: ap->a_dvp is not a dir!!!\n");
410*21864bc5SMatthew Dillon 		cache_setvp(ap->a_nch, NULL);
411*21864bc5SMatthew Dillon 		goto out;
412*21864bc5SMatthew Dillon 	}
413*21864bc5SMatthew Dillon 
414*21864bc5SMatthew Dillon search:
415*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -search- \n");
416*21864bc5SMatthew Dillon 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_dvp)), link) {
417*21864bc5SMatthew Dillon 		if (len == node->d_dir.d_namlen) {
418*21864bc5SMatthew Dillon 			if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) {
419*21864bc5SMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve: found: %s\n", ncp->nc_name);
420*21864bc5SMatthew Dillon 				found = node;
421*21864bc5SMatthew Dillon 				break;
422*21864bc5SMatthew Dillon 			}
423*21864bc5SMatthew Dillon 		}
424*21864bc5SMatthew Dillon 	}
425*21864bc5SMatthew Dillon 
426*21864bc5SMatthew Dillon 	if (found) {
427*21864bc5SMatthew Dillon 		if ((found->node_type == Plink) && (found->link_target))
428*21864bc5SMatthew Dillon 			found = found->link_target;
429*21864bc5SMatthew Dillon 
430*21864bc5SMatthew Dillon 		if (!(found->flags & DEVFS_HIDDEN))
431*21864bc5SMatthew Dillon 			devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found);
432*21864bc5SMatthew Dillon 		else
433*21864bc5SMatthew Dillon 			hidden = 1;
434*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -2- \n");
435*21864bc5SMatthew Dillon 	}
436*21864bc5SMatthew Dillon 
437*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -3- %c%c%c\n", ncp->nc_name[0], ncp->nc_name[1], ncp->nc_name[2]);
438*21864bc5SMatthew Dillon 	if (vp == NULL) {
439*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve vp==NULL \n");
440*21864bc5SMatthew Dillon 		/* XXX: len is int, devfs_clone expects size_t*, not int* */
441*21864bc5SMatthew Dillon 		if ((!hidden) && (!devfs_clone(ncp->nc_name, &len, NULL, 0, ap->a_cred))) {
442*21864bc5SMatthew Dillon 			goto search;
443*21864bc5SMatthew Dillon 		}
444*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -4- \n");
445*21864bc5SMatthew Dillon 		error = ENOENT;
446*21864bc5SMatthew Dillon 		cache_setvp(ap->a_nch, NULL);
447*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -5- \n");
448*21864bc5SMatthew Dillon 		goto out;
449*21864bc5SMatthew Dillon 
450*21864bc5SMatthew Dillon 	}
451*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -6- \n");
452*21864bc5SMatthew Dillon 	KKASSERT(vp);
453*21864bc5SMatthew Dillon 	vn_unlock(vp);
454*21864bc5SMatthew Dillon 	cache_setvp(ap->a_nch, vp);
455*21864bc5SMatthew Dillon 	vrele(vp);
456*21864bc5SMatthew Dillon 
457*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -9- \n");
458*21864bc5SMatthew Dillon out:
459*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nresolve -end:10- failed? %s \n", (error)?"FAILED!":"OK!");
460*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
461*21864bc5SMatthew Dillon 	return error;
462*21864bc5SMatthew Dillon }
463*21864bc5SMatthew Dillon 
464*21864bc5SMatthew Dillon 
465*21864bc5SMatthew Dillon static int
466*21864bc5SMatthew Dillon devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
467*21864bc5SMatthew Dillon {
468*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nlookupdotdot() called!\n");
469*21864bc5SMatthew Dillon 	*ap->a_vpp = NULL;
470*21864bc5SMatthew Dillon 
471*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
472*21864bc5SMatthew Dillon 	if (DEVFS_NODE(ap->a_dvp)->parent != NULL) {
473*21864bc5SMatthew Dillon 		devfs_allocv(/*ap->a_dvp->v_mount, */ap->a_vpp, DEVFS_NODE(ap->a_dvp)->parent);
474*21864bc5SMatthew Dillon 		vn_unlock(*ap->a_vpp);
475*21864bc5SMatthew Dillon 	}
476*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
477*21864bc5SMatthew Dillon 
478*21864bc5SMatthew Dillon 	return ((*ap->a_vpp == NULL) ? ENOENT : 0);
479*21864bc5SMatthew Dillon }
480*21864bc5SMatthew Dillon 
481*21864bc5SMatthew Dillon 
482*21864bc5SMatthew Dillon static int
483*21864bc5SMatthew Dillon devfs_getattr(struct vop_getattr_args *ap)
484*21864bc5SMatthew Dillon {
485*21864bc5SMatthew Dillon 	struct vattr *vap = ap->a_vap;
486*21864bc5SMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
487*21864bc5SMatthew Dillon 	int error = 0;
488*21864bc5SMatthew Dillon 
489*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_getattr() called for %s!\n", DEVFS_NODE(ap->a_vp)->d_dir.d_name);
490*21864bc5SMatthew Dillon 
491*21864bc5SMatthew Dillon 	/* start by zeroing out the attributes */
492*21864bc5SMatthew Dillon 	VATTR_NULL(vap);
493*21864bc5SMatthew Dillon 
494*21864bc5SMatthew Dillon 	/* next do all the common fields */
495*21864bc5SMatthew Dillon 	vap->va_type = ap->a_vp->v_type;
496*21864bc5SMatthew Dillon 	vap->va_mode = node->mode;
497*21864bc5SMatthew Dillon 	vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
498*21864bc5SMatthew Dillon 	vap->va_flags = 0; //what should this be?
499*21864bc5SMatthew Dillon 	vap->va_blocksize = DEV_BSIZE;
500*21864bc5SMatthew Dillon 	vap->va_bytes = vap->va_size = sizeof(struct devfs_node);
501*21864bc5SMatthew Dillon 
502*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_getattr() check dev %s!\n", (DEVFS_NODE(ap->a_vp)->d_dev)?(DEVFS_NODE(ap->a_vp)->d_dev->si_name):"Not a device");
503*21864bc5SMatthew Dillon 
504*21864bc5SMatthew Dillon 	vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
505*21864bc5SMatthew Dillon 
506*21864bc5SMatthew Dillon 
507*21864bc5SMatthew Dillon 	vap->va_atime = node->atime;
508*21864bc5SMatthew Dillon 	vap->va_mtime = node->mtime;
509*21864bc5SMatthew Dillon 	vap->va_ctime = node->ctime;
510*21864bc5SMatthew Dillon 
511*21864bc5SMatthew Dillon 	vap->va_nlink = 1; /* number of references to file */
512*21864bc5SMatthew Dillon 
513*21864bc5SMatthew Dillon 	vap->va_uid = node->uid;
514*21864bc5SMatthew Dillon 	vap->va_gid = node->gid;
515*21864bc5SMatthew Dillon 
516*21864bc5SMatthew Dillon 	vap->va_rmajor = 0;
517*21864bc5SMatthew Dillon 	vap->va_rminor = 0;
518*21864bc5SMatthew Dillon 
519*21864bc5SMatthew Dillon 	if ((DEVFS_NODE(ap->a_vp)->node_type == Pdev) &&
520*21864bc5SMatthew Dillon 		(DEVFS_NODE(ap->a_vp)->d_dev))  {
521*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "getattr: dev is: %p\n", DEVFS_NODE(ap->a_vp)->d_dev);
522*21864bc5SMatthew Dillon 		reference_dev(DEVFS_NODE(ap->a_vp)->d_dev);
523*21864bc5SMatthew Dillon 		vap->va_fsid = dev2udev(DEVFS_NODE(ap->a_vp)->d_dev);
524*21864bc5SMatthew Dillon 		vap->va_rminor = DEVFS_NODE(ap->a_vp)->d_dev->si_uminor;
525*21864bc5SMatthew Dillon 		release_dev(DEVFS_NODE(ap->a_vp)->d_dev);
526*21864bc5SMatthew Dillon 	}
527*21864bc5SMatthew Dillon 
528*21864bc5SMatthew Dillon 	/* For a softlink the va_size is the length of the softlink */
529*21864bc5SMatthew Dillon 	if (DEVFS_NODE(ap->a_vp)->symlink_name != 0) {
530*21864bc5SMatthew Dillon 		vap->va_size = DEVFS_NODE(ap->a_vp)->symlink_namelen;
531*21864bc5SMatthew Dillon 	}
532*21864bc5SMatthew Dillon 	nanotime(&node->atime);
533*21864bc5SMatthew Dillon 	return (error); //XXX: set error usefully
534*21864bc5SMatthew Dillon }
535*21864bc5SMatthew Dillon 
536*21864bc5SMatthew Dillon 
537*21864bc5SMatthew Dillon static int
538*21864bc5SMatthew Dillon devfs_setattr(struct vop_setattr_args *ap)
539*21864bc5SMatthew Dillon {
540*21864bc5SMatthew Dillon 	struct devfs_node *node;
541*21864bc5SMatthew Dillon 	struct vattr *vap;
542*21864bc5SMatthew Dillon 	int error = 0;
543*21864bc5SMatthew Dillon 
544*21864bc5SMatthew Dillon 
545*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr() called!\n");
546*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
547*21864bc5SMatthew Dillon 
548*21864bc5SMatthew Dillon 	vap = ap->a_vap;
549*21864bc5SMatthew Dillon 	node = DEVFS_NODE(ap->a_vp);
550*21864bc5SMatthew Dillon 
551*21864bc5SMatthew Dillon 	if (vap->va_uid != (uid_t)VNOVAL) {
552*21864bc5SMatthew Dillon 		if ((ap->a_cred->cr_uid != node->uid) &&
553*21864bc5SMatthew Dillon 			(!groupmember(node->gid, ap->a_cred))) {
554*21864bc5SMatthew Dillon 			error = priv_check(curthread, PRIV_VFS_CHOWN);
555*21864bc5SMatthew Dillon 			if (error) {
556*21864bc5SMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -1-\n");
557*21864bc5SMatthew Dillon 				goto out;
558*21864bc5SMatthew Dillon 			}
559*21864bc5SMatthew Dillon 		}
560*21864bc5SMatthew Dillon 		node->uid = vap->va_uid;
561*21864bc5SMatthew Dillon 	}
562*21864bc5SMatthew Dillon 
563*21864bc5SMatthew Dillon 	if (vap->va_gid != (uid_t)VNOVAL) {
564*21864bc5SMatthew Dillon 		if ((ap->a_cred->cr_uid != node->uid) &&
565*21864bc5SMatthew Dillon 			(!groupmember(node->gid, ap->a_cred))) {
566*21864bc5SMatthew Dillon 			error = priv_check(curthread, PRIV_VFS_CHOWN);
567*21864bc5SMatthew Dillon 			if (error) {
568*21864bc5SMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -2-\n");
569*21864bc5SMatthew Dillon 				goto out;
570*21864bc5SMatthew Dillon 			}
571*21864bc5SMatthew Dillon 		}
572*21864bc5SMatthew Dillon 		node->gid = vap->va_gid;
573*21864bc5SMatthew Dillon 	}
574*21864bc5SMatthew Dillon 
575*21864bc5SMatthew Dillon 	if (vap->va_mode != (mode_t)VNOVAL) {
576*21864bc5SMatthew Dillon 		if (ap->a_cred->cr_uid != node->uid) {
577*21864bc5SMatthew Dillon 			error = priv_check(curthread, PRIV_VFS_ADMIN);
578*21864bc5SMatthew Dillon 			if (error) {
579*21864bc5SMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_setattr, erroring out -3-\n");
580*21864bc5SMatthew Dillon 				goto out;
581*21864bc5SMatthew Dillon 			}
582*21864bc5SMatthew Dillon 		}
583*21864bc5SMatthew Dillon 		node->mode = vap->va_mode;
584*21864bc5SMatthew Dillon 	}
585*21864bc5SMatthew Dillon 
586*21864bc5SMatthew Dillon out:
587*21864bc5SMatthew Dillon 	nanotime(&node->mtime);
588*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
589*21864bc5SMatthew Dillon 	return error;
590*21864bc5SMatthew Dillon }
591*21864bc5SMatthew Dillon 
592*21864bc5SMatthew Dillon 
593*21864bc5SMatthew Dillon static int
594*21864bc5SMatthew Dillon devfs_readlink(struct vop_readlink_args *ap)
595*21864bc5SMatthew Dillon {
596*21864bc5SMatthew Dillon 	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
597*21864bc5SMatthew Dillon 
598*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readlink()  called!\n");
599*21864bc5SMatthew Dillon 
600*21864bc5SMatthew Dillon 	return (uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio));
601*21864bc5SMatthew Dillon }
602*21864bc5SMatthew Dillon 
603*21864bc5SMatthew Dillon 
604*21864bc5SMatthew Dillon static int
605*21864bc5SMatthew Dillon devfs_print(struct vop_print_args *ap)
606*21864bc5SMatthew Dillon {
607*21864bc5SMatthew Dillon 	//struct devfs_node *node = DEVFS_NODE(ap->a_vp);
608*21864bc5SMatthew Dillon 
609*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_print() called!\n");
610*21864bc5SMatthew Dillon 
611*21864bc5SMatthew Dillon 	//XXX: print some useful debugging about node.
612*21864bc5SMatthew Dillon 	return (0);
613*21864bc5SMatthew Dillon }
614*21864bc5SMatthew Dillon 
615*21864bc5SMatthew Dillon 
616*21864bc5SMatthew Dillon static int
617*21864bc5SMatthew Dillon devfs_nsymlink(struct vop_nsymlink_args *ap)
618*21864bc5SMatthew Dillon {
619*21864bc5SMatthew Dillon 	size_t targetlen = strlen(ap->a_target);
620*21864bc5SMatthew Dillon 
621*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nsymlink() called!\n");
622*21864bc5SMatthew Dillon 
623*21864bc5SMatthew Dillon 	ap->a_vap->va_type = VLNK;
624*21864bc5SMatthew Dillon 
625*21864bc5SMatthew Dillon 	if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
626*21864bc5SMatthew Dillon 		(DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
627*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nsymlink: ap->a_dvp is not a dir!!!\n");
628*21864bc5SMatthew Dillon 		goto out;
629*21864bc5SMatthew Dillon 	}
630*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
631*21864bc5SMatthew Dillon 	devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink,
632*21864bc5SMatthew Dillon 				ap->a_nch->ncp->nc_name, DEVFS_NODE(ap->a_dvp), NULL);
633*21864bc5SMatthew Dillon 
634*21864bc5SMatthew Dillon 	if (*ap->a_vpp) {
635*21864bc5SMatthew Dillon 		DEVFS_NODE(*ap->a_vpp)->flags |= DEVFS_USER_CREATED;
636*21864bc5SMatthew Dillon 
637*21864bc5SMatthew Dillon 		DEVFS_NODE(*ap->a_vpp)->symlink_namelen = targetlen;
638*21864bc5SMatthew Dillon 		DEVFS_NODE(*ap->a_vpp)->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK);
639*21864bc5SMatthew Dillon 		memcpy(DEVFS_NODE(*ap->a_vpp)->symlink_name, ap->a_target, targetlen);
640*21864bc5SMatthew Dillon 		DEVFS_NODE(*ap->a_vpp)->symlink_name[targetlen] = '\0';
641*21864bc5SMatthew Dillon 		cache_setunresolved(ap->a_nch);
642*21864bc5SMatthew Dillon 		//problematic to use cache_* inside lockmgr() ? Probably not...
643*21864bc5SMatthew Dillon 		cache_setvp(ap->a_nch, *ap->a_vpp);
644*21864bc5SMatthew Dillon 	}
645*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
646*21864bc5SMatthew Dillon out:
647*21864bc5SMatthew Dillon 	return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
648*21864bc5SMatthew Dillon 
649*21864bc5SMatthew Dillon }
650*21864bc5SMatthew Dillon 
651*21864bc5SMatthew Dillon 
652*21864bc5SMatthew Dillon static int
653*21864bc5SMatthew Dillon devfs_nremove(struct vop_nremove_args *ap)
654*21864bc5SMatthew Dillon {
655*21864bc5SMatthew Dillon 	struct devfs_node *node;
656*21864bc5SMatthew Dillon 	struct namecache *ncp;
657*21864bc5SMatthew Dillon 	//struct vnode *vp = NULL;
658*21864bc5SMatthew Dillon 	int error = ENOENT;
659*21864bc5SMatthew Dillon 
660*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nremove() called!\n");
661*21864bc5SMatthew Dillon 
662*21864bc5SMatthew Dillon 	ncp = ap->a_nch->ncp;
663*21864bc5SMatthew Dillon 
664*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_EXCLUSIVE);
665*21864bc5SMatthew Dillon 
666*21864bc5SMatthew Dillon 	if ((DEVFS_NODE(ap->a_dvp)->node_type != Proot) &&
667*21864bc5SMatthew Dillon 		(DEVFS_NODE(ap->a_dvp)->node_type != Pdir)) {
668*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nremove: ap->a_dvp is not a dir!!!\n");
669*21864bc5SMatthew Dillon 		goto out;
670*21864bc5SMatthew Dillon 	}
671*21864bc5SMatthew Dillon 
672*21864bc5SMatthew Dillon 	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(DEVFS_NODE(ap->a_dvp)), link)	{
673*21864bc5SMatthew Dillon 		if (ncp->nc_nlen == node->d_dir.d_namlen) {
674*21864bc5SMatthew Dillon 			if (!memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) {
675*21864bc5SMatthew Dillon 				// allow only removal of user created stuff (e.g. symlinks)
676*21864bc5SMatthew Dillon 				if ((node->flags & DEVFS_USER_CREATED) == 0) {
677*21864bc5SMatthew Dillon 					error = EPERM;
678*21864bc5SMatthew Dillon 					goto out;
679*21864bc5SMatthew Dillon 				} else {
680*21864bc5SMatthew Dillon 					if (node->v_node)
681*21864bc5SMatthew Dillon 						cache_inval_vp(node->v_node, CINV_DESTROY);
682*21864bc5SMatthew Dillon 
683*21864bc5SMatthew Dillon 					devfs_unlinkp(node);
684*21864bc5SMatthew Dillon 					error = 0;
685*21864bc5SMatthew Dillon 					break;
686*21864bc5SMatthew Dillon 				}
687*21864bc5SMatthew Dillon 			}
688*21864bc5SMatthew Dillon 		}
689*21864bc5SMatthew Dillon 	}
690*21864bc5SMatthew Dillon 
691*21864bc5SMatthew Dillon 	cache_setunresolved(ap->a_nch);
692*21864bc5SMatthew Dillon 	cache_setvp(ap->a_nch, NULL);
693*21864bc5SMatthew Dillon 	//cache_inval_vp(node->v_node, CINV_DESTROY);
694*21864bc5SMatthew Dillon 
695*21864bc5SMatthew Dillon out:
696*21864bc5SMatthew Dillon 	lockmgr(&devfs_lock, LK_RELEASE);
697*21864bc5SMatthew Dillon 	//vrele(ap->a_dvp);
698*21864bc5SMatthew Dillon 	//vput(ap->a_dvp);
699*21864bc5SMatthew Dillon 	return error;
700*21864bc5SMatthew Dillon }
701*21864bc5SMatthew Dillon 
702*21864bc5SMatthew Dillon 
703*21864bc5SMatthew Dillon static int
704*21864bc5SMatthew Dillon devfs_spec_open(struct vop_open_args *ap)
705*21864bc5SMatthew Dillon {
706*21864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
707*21864bc5SMatthew Dillon 	cdev_t dev, ndev = NULL;
708*21864bc5SMatthew Dillon 	struct devfs_node *node = NULL;
709*21864bc5SMatthew Dillon 	int error = 0;
710*21864bc5SMatthew Dillon 	size_t len;
711*21864bc5SMatthew Dillon 
712*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called\n");
713*21864bc5SMatthew Dillon 
714*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp)) {
715*21864bc5SMatthew Dillon 		if (DEVFS_NODE(vp)->d_dev == NULL)
716*21864bc5SMatthew Dillon 			return ENXIO;
717*21864bc5SMatthew Dillon 	}
718*21864bc5SMatthew Dillon 
719*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1-\n");
720*21864bc5SMatthew Dillon 
721*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
722*21864bc5SMatthew Dillon 		return ENXIO;
723*21864bc5SMatthew Dillon 
724*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp) && ap->a_fp) {
725*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n");
726*21864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_EXCLUSIVE);
727*21864bc5SMatthew Dillon 		len = DEVFS_NODE(vp)->d_dir.d_namlen;
728*21864bc5SMatthew Dillon 		if (!(devfs_clone(DEVFS_NODE(vp)->d_dir.d_name, &len, &ndev, 1, ap->a_cred))) {
729*21864bc5SMatthew Dillon 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.2- |%s|\n", ndev->si_name);
730*21864bc5SMatthew Dillon 
731*21864bc5SMatthew Dillon 			dev = ndev;
732*21864bc5SMatthew Dillon 			reference_dev(dev);
733*21864bc5SMatthew Dillon 			devfs_link_dev(dev);
734*21864bc5SMatthew Dillon 			node = devfs_create_device_node(DEVFS_MNTDATA(vp->v_mount)->root_node, dev, NULL, NULL);
735*21864bc5SMatthew Dillon 			//node = devfs_allocp(Pdev, ndev->si_name, DEVFS_NODE(vp)->parent, vp->v_mount, dev);
736*21864bc5SMatthew Dillon 
737*21864bc5SMatthew Dillon 			devfs_debug(DEVFS_DEBUG_DEBUG, "parent here is: %s, node is: |%s|\n", (DEVFS_NODE(vp)->parent->node_type == Proot)?"ROOT!":DEVFS_NODE(vp)->parent->d_dir.d_name, node->d_dir.d_name);
738*21864bc5SMatthew Dillon 			devfs_debug(DEVFS_DEBUG_DEBUG, "test: %s\n", ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(DEVFS_NODE(vp)->parent), devfs_node_head)))->d_dir.d_name);
739*21864bc5SMatthew Dillon 
740*21864bc5SMatthew Dillon 			node->flags |= DEVFS_CLONED;
741*21864bc5SMatthew Dillon 			devfs_allocv(&vp, node);
742*21864bc5SMatthew Dillon 
743*21864bc5SMatthew Dillon 			ap->a_vp = vp;
744*21864bc5SMatthew Dillon 
745*21864bc5SMatthew Dillon 			//XXX: propagate to other devfs mounts?
746*21864bc5SMatthew Dillon 		}
747*21864bc5SMatthew Dillon 		lockmgr(&devfs_lock, LK_RELEASE);
748*21864bc5SMatthew Dillon 	}
749*21864bc5SMatthew Dillon 
750*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() called on %s! \n", dev->si_name);
751*21864bc5SMatthew Dillon 	/*
752*21864bc5SMatthew Dillon 	 * Make this field valid before any I/O in ->d_open
753*21864bc5SMatthew Dillon 	 */
754*21864bc5SMatthew Dillon 	if (!dev->si_iosize_max)
755*21864bc5SMatthew Dillon 		dev->si_iosize_max = DFLTPHYS;
756*21864bc5SMatthew Dillon 
757*21864bc5SMatthew Dillon 	if (dev_dflags(dev) & D_TTY)
758*21864bc5SMatthew Dillon 		vp->v_flag |= VISTTY;
759*21864bc5SMatthew Dillon 
760*21864bc5SMatthew Dillon 	vn_unlock(vp);
761*21864bc5SMatthew Dillon 	error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred);
762*21864bc5SMatthew Dillon 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
763*21864bc5SMatthew Dillon 
764*21864bc5SMatthew Dillon 	if (error) {
765*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open() error out: %x\n", error);
766*21864bc5SMatthew Dillon 		if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_CLONED) == DEVFS_CLONED))
767*21864bc5SMatthew Dillon 			vput(vp);
768*21864bc5SMatthew Dillon 		return error;
769*21864bc5SMatthew Dillon 	}
770*21864bc5SMatthew Dillon 
771*21864bc5SMatthew Dillon 
772*21864bc5SMatthew Dillon 	if (dev_dflags(dev) & D_TTY) {
773*21864bc5SMatthew Dillon 		if (dev->si_tty) {
774*21864bc5SMatthew Dillon 			struct tty *tp;
775*21864bc5SMatthew Dillon 			tp = dev->si_tty;
776*21864bc5SMatthew Dillon 			if (!tp->t_stop) {
777*21864bc5SMatthew Dillon 				devfs_debug(DEVFS_DEBUG_DEBUG, "devfs: no t_stop\n");
778*21864bc5SMatthew Dillon 				tp->t_stop = nottystop;
779*21864bc5SMatthew Dillon 			}
780*21864bc5SMatthew Dillon 		}
781*21864bc5SMatthew Dillon 	}
782*21864bc5SMatthew Dillon 
783*21864bc5SMatthew Dillon 
784*21864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL)) {
785*21864bc5SMatthew Dillon 		if (!dev->si_bsize_phys)
786*21864bc5SMatthew Dillon 			dev->si_bsize_phys = DEV_BSIZE;
787*21864bc5SMatthew Dillon 		vinitvmio(vp, IDX_TO_OFF(INT_MAX));
788*21864bc5SMatthew Dillon 	}
789*21864bc5SMatthew Dillon 
790*21864bc5SMatthew Dillon 	vop_stdopen(ap);
791*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
792*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
793*21864bc5SMatthew Dillon 
794*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_CLONED) == DEVFS_CLONED))
795*21864bc5SMatthew Dillon 		vn_unlock(vp);
796*21864bc5SMatthew Dillon 
797*21864bc5SMatthew Dillon 	/* Ugly pty magic, to make pty devices appear once they are opened */
798*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_PTY) == DEVFS_PTY))
799*21864bc5SMatthew Dillon 		DEVFS_NODE(vp)->flags &= ~DEVFS_INVISIBLE;
800*21864bc5SMatthew Dillon 
801*21864bc5SMatthew Dillon 	if (ap->a_fp) {
802*21864bc5SMatthew Dillon 		ap->a_fp->f_type = DTYPE_VNODE;
803*21864bc5SMatthew Dillon 		ap->a_fp->f_flag = ap->a_mode & FMASK;
804*21864bc5SMatthew Dillon 		ap->a_fp->f_ops = &devfs_dev_fileops;
805*21864bc5SMatthew Dillon 		ap->a_fp->f_data = vp;
806*21864bc5SMatthew Dillon 	}
807*21864bc5SMatthew Dillon 
808*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -end:3-\n");
809*21864bc5SMatthew Dillon 
810*21864bc5SMatthew Dillon 	return 0;
811*21864bc5SMatthew Dillon }
812*21864bc5SMatthew Dillon 
813*21864bc5SMatthew Dillon 
814*21864bc5SMatthew Dillon static int
815*21864bc5SMatthew Dillon devfs_spec_close(struct vop_close_args *ap)
816*21864bc5SMatthew Dillon {
817*21864bc5SMatthew Dillon 	struct proc *p = curproc;
818*21864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
819*21864bc5SMatthew Dillon 	cdev_t dev = vp->v_rdev;
820*21864bc5SMatthew Dillon 	int error = 0;
821*21864bc5SMatthew Dillon 	int needrelock;
822*21864bc5SMatthew Dillon 
823*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() called on %s! \n", dev->si_name);
824*21864bc5SMatthew Dillon 
825*21864bc5SMatthew Dillon 	/*
826*21864bc5SMatthew Dillon 	 * A couple of hacks for devices and tty devices.  The
827*21864bc5SMatthew Dillon 	 * vnode ref count cannot be used to figure out the
828*21864bc5SMatthew Dillon 	 * last close, but we can use v_opencount now that
829*21864bc5SMatthew Dillon 	 * revoke works properly.
830*21864bc5SMatthew Dillon 	 *
831*21864bc5SMatthew Dillon 	 * Detect the last close on a controlling terminal and clear
832*21864bc5SMatthew Dillon 	 * the session (half-close).
833*21864bc5SMatthew Dillon 	 */
834*21864bc5SMatthew Dillon 	if (dev)
835*21864bc5SMatthew Dillon 		reference_dev(dev);
836*21864bc5SMatthew Dillon 
837*21864bc5SMatthew Dillon 	if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
838*21864bc5SMatthew Dillon 		p->p_session->s_ttyvp = NULL;
839*21864bc5SMatthew Dillon 		vrele(vp);
840*21864bc5SMatthew Dillon 	}
841*21864bc5SMatthew Dillon 
842*21864bc5SMatthew Dillon 	/*
843*21864bc5SMatthew Dillon 	 * Vnodes can be opened and closed multiple times.  Do not really
844*21864bc5SMatthew Dillon 	 * close the device unless (1) it is being closed forcibly,
845*21864bc5SMatthew Dillon 	 * (2) the device wants to track closes, or (3) this is the last
846*21864bc5SMatthew Dillon 	 * vnode doing its last close on the device.
847*21864bc5SMatthew Dillon 	 *
848*21864bc5SMatthew Dillon 	 * XXX the VXLOCK (force close) case can leave vnodes referencing
849*21864bc5SMatthew Dillon 	 * a closed device.  This might not occur now that our revoke is
850*21864bc5SMatthew Dillon 	 * fixed.
851*21864bc5SMatthew Dillon 	 */
852*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
853*21864bc5SMatthew Dillon 	if (dev && ((vp->v_flag & VRECLAIMED) ||
854*21864bc5SMatthew Dillon 	    (dev_dflags(dev) & D_TRACKCLOSE) ||
855*21864bc5SMatthew Dillon 	    (vp->v_opencount == 1))) {
856*21864bc5SMatthew Dillon 		needrelock = 0;
857*21864bc5SMatthew Dillon 		if (vn_islocked(vp)) {
858*21864bc5SMatthew Dillon 			needrelock = 1;
859*21864bc5SMatthew Dillon 			vn_unlock(vp);
860*21864bc5SMatthew Dillon 		}
861*21864bc5SMatthew Dillon 		error = dev_dclose(dev, ap->a_fflag, S_IFCHR);
862*21864bc5SMatthew Dillon 		if (DEVFS_NODE(vp) && (DEVFS_NODE(vp)->flags & DEVFS_CLONED) == DEVFS_CLONED) {
863*21864bc5SMatthew Dillon 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close: last of the cloned ones, so delete node %s\n", dev->si_name);
864*21864bc5SMatthew Dillon 			devfs_unlinkp(DEVFS_NODE(vp));
865*21864bc5SMatthew Dillon 			devfs_freep(DEVFS_NODE(vp));
866*21864bc5SMatthew Dillon 			devfs_unlink_dev(dev);
867*21864bc5SMatthew Dillon 			release_dev(dev);
868*21864bc5SMatthew Dillon 			devfs_destroy_cdev(dev);
869*21864bc5SMatthew Dillon 		}
870*21864bc5SMatthew Dillon 		/* Ugly pty magic, to make pty devices disappear again once they are closed */
871*21864bc5SMatthew Dillon 		if (DEVFS_NODE(vp) && ((DEVFS_NODE(vp)->flags & DEVFS_PTY) == DEVFS_PTY))
872*21864bc5SMatthew Dillon 			DEVFS_NODE(vp)->flags |= DEVFS_INVISIBLE;
873*21864bc5SMatthew Dillon 
874*21864bc5SMatthew Dillon 		if (needrelock)
875*21864bc5SMatthew Dillon 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
876*21864bc5SMatthew Dillon 	} else {
877*21864bc5SMatthew Dillon 		error = 0;
878*21864bc5SMatthew Dillon 	}
879*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");
880*21864bc5SMatthew Dillon 	/*
881*21864bc5SMatthew Dillon 	 * Track the actual opens and closes on the vnode.  The last close
882*21864bc5SMatthew Dillon 	 * disassociates the rdev.  If the rdev is already disassociated or the
883*21864bc5SMatthew Dillon 	 * opencount is already 0, the vnode might have been revoked and no
884*21864bc5SMatthew Dillon 	 * further opencount tracking occurs.
885*21864bc5SMatthew Dillon 	 */
886*21864bc5SMatthew Dillon 	if (dev) {
887*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -3- \n");
888*21864bc5SMatthew Dillon 		if (vp->v_opencount == 1) {
889*21864bc5SMatthew Dillon 			//vp->v_rdev = 0;
890*21864bc5SMatthew Dillon 			devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -3.5- \n");
891*21864bc5SMatthew Dillon 		}
892*21864bc5SMatthew Dillon 		release_dev(dev);
893*21864bc5SMatthew Dillon 	}
894*21864bc5SMatthew Dillon 	if (vp->v_opencount > 0) {
895*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -4- \n");
896*21864bc5SMatthew Dillon 		vop_stdclose(ap);
897*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -5- \n");
898*21864bc5SMatthew Dillon 	}
899*21864bc5SMatthew Dillon 
900*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -end:6- \n");
901*21864bc5SMatthew Dillon 	return(error);
902*21864bc5SMatthew Dillon 
903*21864bc5SMatthew Dillon }
904*21864bc5SMatthew Dillon 
905*21864bc5SMatthew Dillon 
906*21864bc5SMatthew Dillon static int
907*21864bc5SMatthew Dillon devfs_specf_close(struct file *fp)
908*21864bc5SMatthew Dillon {
909*21864bc5SMatthew Dillon 	int error;
910*21864bc5SMatthew Dillon 	struct vnode *vp = (struct vnode *)fp->f_data;
911*21864bc5SMatthew Dillon 
912*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_close() called! \n");
913*21864bc5SMatthew Dillon 	get_mplock();
914*21864bc5SMatthew Dillon 	fp->f_ops = &badfileops;
915*21864bc5SMatthew Dillon 
916*21864bc5SMatthew Dillon 	error = vn_close(vp, fp->f_flag);
917*21864bc5SMatthew Dillon 	rel_mplock();
918*21864bc5SMatthew Dillon 
919*21864bc5SMatthew Dillon 	return (error);
920*21864bc5SMatthew Dillon }
921*21864bc5SMatthew Dillon 
922*21864bc5SMatthew Dillon 
923*21864bc5SMatthew Dillon /*
924*21864bc5SMatthew Dillon  * Device-optimized file table vnode read routine.
925*21864bc5SMatthew Dillon  *
926*21864bc5SMatthew Dillon  * This bypasses the VOP table and talks directly to the device.  Most
927*21864bc5SMatthew Dillon  * filesystems just route to specfs and can make this optimization.
928*21864bc5SMatthew Dillon  *
929*21864bc5SMatthew Dillon  * MPALMOSTSAFE - acquires mplock
930*21864bc5SMatthew Dillon  */
931*21864bc5SMatthew Dillon static int
932*21864bc5SMatthew Dillon devfs_specf_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
933*21864bc5SMatthew Dillon {
934*21864bc5SMatthew Dillon 	struct vnode *vp;
935*21864bc5SMatthew Dillon 	int ioflag;
936*21864bc5SMatthew Dillon 	int error;
937*21864bc5SMatthew Dillon 	cdev_t dev;
938*21864bc5SMatthew Dillon 
939*21864bc5SMatthew Dillon 	get_mplock();
940*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_read() called! \n");
941*21864bc5SMatthew Dillon 	KASSERT(uio->uio_td == curthread,
942*21864bc5SMatthew Dillon 		("uio_td %p is not td %p", uio->uio_td, curthread));
943*21864bc5SMatthew Dillon 
944*21864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
945*21864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
946*21864bc5SMatthew Dillon 		error = EBADF;
947*21864bc5SMatthew Dillon 		goto done;
948*21864bc5SMatthew Dillon 	}
949*21864bc5SMatthew Dillon 
950*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
951*21864bc5SMatthew Dillon 		error = EBADF;
952*21864bc5SMatthew Dillon 		goto done;
953*21864bc5SMatthew Dillon 	}
954*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_read() called! for dev %s\n", dev->si_name);
955*21864bc5SMatthew Dillon 
956*21864bc5SMatthew Dillon 	reference_dev(dev);
957*21864bc5SMatthew Dillon 
958*21864bc5SMatthew Dillon 	if (uio->uio_resid == 0) {
959*21864bc5SMatthew Dillon 		error = 0;
960*21864bc5SMatthew Dillon 		goto done;
961*21864bc5SMatthew Dillon 	}
962*21864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
963*21864bc5SMatthew Dillon 		uio->uio_offset = fp->f_offset;
964*21864bc5SMatthew Dillon 
965*21864bc5SMatthew Dillon 	ioflag = 0;
966*21864bc5SMatthew Dillon 	if (flags & O_FBLOCKING) {
967*21864bc5SMatthew Dillon 		/* ioflag &= ~IO_NDELAY; */
968*21864bc5SMatthew Dillon 	} else if (flags & O_FNONBLOCKING) {
969*21864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
970*21864bc5SMatthew Dillon 	} else if (fp->f_flag & FNONBLOCK) {
971*21864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
972*21864bc5SMatthew Dillon 	}
973*21864bc5SMatthew Dillon 	if (flags & O_FBUFFERED) {
974*21864bc5SMatthew Dillon 		/* ioflag &= ~IO_DIRECT; */
975*21864bc5SMatthew Dillon 	} else if (flags & O_FUNBUFFERED) {
976*21864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
977*21864bc5SMatthew Dillon 	} else if (fp->f_flag & O_DIRECT) {
978*21864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
979*21864bc5SMatthew Dillon 	}
980*21864bc5SMatthew Dillon 	ioflag |= sequential_heuristic(uio, fp);
981*21864bc5SMatthew Dillon 
982*21864bc5SMatthew Dillon 	error = dev_dread(dev, uio, ioflag);
983*21864bc5SMatthew Dillon 
984*21864bc5SMatthew Dillon 	release_dev(dev);
985*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
986*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
987*21864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
988*21864bc5SMatthew Dillon 		fp->f_offset = uio->uio_offset;
989*21864bc5SMatthew Dillon 	fp->f_nextoff = uio->uio_offset;
990*21864bc5SMatthew Dillon done:
991*21864bc5SMatthew Dillon 	rel_mplock();
992*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_read finished\n");
993*21864bc5SMatthew Dillon 	return (error);
994*21864bc5SMatthew Dillon }
995*21864bc5SMatthew Dillon 
996*21864bc5SMatthew Dillon 
997*21864bc5SMatthew Dillon static int
998*21864bc5SMatthew Dillon devfs_specf_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
999*21864bc5SMatthew Dillon {
1000*21864bc5SMatthew Dillon 	struct vnode *vp;
1001*21864bc5SMatthew Dillon 	int ioflag;
1002*21864bc5SMatthew Dillon 	int error;
1003*21864bc5SMatthew Dillon 	cdev_t dev;
1004*21864bc5SMatthew Dillon 
1005*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_write() called! \n");
1006*21864bc5SMatthew Dillon 	get_mplock();
1007*21864bc5SMatthew Dillon 	KASSERT(uio->uio_td == curthread,
1008*21864bc5SMatthew Dillon 		("uio_td %p is not p %p", uio->uio_td, curthread));
1009*21864bc5SMatthew Dillon 
1010*21864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
1011*21864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
1012*21864bc5SMatthew Dillon 		error = EBADF;
1013*21864bc5SMatthew Dillon 		goto done;
1014*21864bc5SMatthew Dillon 	}
1015*21864bc5SMatthew Dillon 	if (vp->v_type == VREG)
1016*21864bc5SMatthew Dillon 		bwillwrite(uio->uio_resid);
1017*21864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
1018*21864bc5SMatthew Dillon 
1019*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
1020*21864bc5SMatthew Dillon 		error = EBADF;
1021*21864bc5SMatthew Dillon 		goto done;
1022*21864bc5SMatthew Dillon 	}
1023*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_write() called! for dev %s\n", dev->si_name);
1024*21864bc5SMatthew Dillon 	reference_dev(dev);
1025*21864bc5SMatthew Dillon 
1026*21864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
1027*21864bc5SMatthew Dillon 		uio->uio_offset = fp->f_offset;
1028*21864bc5SMatthew Dillon 
1029*21864bc5SMatthew Dillon 	ioflag = IO_UNIT;
1030*21864bc5SMatthew Dillon 	if (vp->v_type == VREG &&
1031*21864bc5SMatthew Dillon 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
1032*21864bc5SMatthew Dillon 		ioflag |= IO_APPEND;
1033*21864bc5SMatthew Dillon 	}
1034*21864bc5SMatthew Dillon 
1035*21864bc5SMatthew Dillon 	if (flags & O_FBLOCKING) {
1036*21864bc5SMatthew Dillon 		/* ioflag &= ~IO_NDELAY; */
1037*21864bc5SMatthew Dillon 	} else if (flags & O_FNONBLOCKING) {
1038*21864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
1039*21864bc5SMatthew Dillon 	} else if (fp->f_flag & FNONBLOCK) {
1040*21864bc5SMatthew Dillon 		ioflag |= IO_NDELAY;
1041*21864bc5SMatthew Dillon 	}
1042*21864bc5SMatthew Dillon 	if (flags & O_FBUFFERED) {
1043*21864bc5SMatthew Dillon 		/* ioflag &= ~IO_DIRECT; */
1044*21864bc5SMatthew Dillon 	} else if (flags & O_FUNBUFFERED) {
1045*21864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
1046*21864bc5SMatthew Dillon 	} else if (fp->f_flag & O_DIRECT) {
1047*21864bc5SMatthew Dillon 		ioflag |= IO_DIRECT;
1048*21864bc5SMatthew Dillon 	}
1049*21864bc5SMatthew Dillon 	if (flags & O_FASYNCWRITE) {
1050*21864bc5SMatthew Dillon 		/* ioflag &= ~IO_SYNC; */
1051*21864bc5SMatthew Dillon 	} else if (flags & O_FSYNCWRITE) {
1052*21864bc5SMatthew Dillon 		ioflag |= IO_SYNC;
1053*21864bc5SMatthew Dillon 	} else if (fp->f_flag & O_FSYNC) {
1054*21864bc5SMatthew Dillon 		ioflag |= IO_SYNC;
1055*21864bc5SMatthew Dillon 	}
1056*21864bc5SMatthew Dillon 
1057*21864bc5SMatthew Dillon 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
1058*21864bc5SMatthew Dillon 		ioflag |= IO_SYNC;
1059*21864bc5SMatthew Dillon 	ioflag |= sequential_heuristic(uio, fp);
1060*21864bc5SMatthew Dillon 
1061*21864bc5SMatthew Dillon 	error = dev_dwrite(dev, uio, ioflag);
1062*21864bc5SMatthew Dillon 
1063*21864bc5SMatthew Dillon 	release_dev(dev);
1064*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
1065*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->mtime);
1066*21864bc5SMatthew Dillon 
1067*21864bc5SMatthew Dillon 	if ((flags & O_FOFFSET) == 0)
1068*21864bc5SMatthew Dillon 		fp->f_offset = uio->uio_offset;
1069*21864bc5SMatthew Dillon 	fp->f_nextoff = uio->uio_offset;
1070*21864bc5SMatthew Dillon done:
1071*21864bc5SMatthew Dillon 	rel_mplock();
1072*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_write done\n");
1073*21864bc5SMatthew Dillon 	return (error);
1074*21864bc5SMatthew Dillon }
1075*21864bc5SMatthew Dillon 
1076*21864bc5SMatthew Dillon 
1077*21864bc5SMatthew Dillon static int
1078*21864bc5SMatthew Dillon devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred)
1079*21864bc5SMatthew Dillon {
1080*21864bc5SMatthew Dillon 	struct vnode *vp;
1081*21864bc5SMatthew Dillon 	int error;
1082*21864bc5SMatthew Dillon 
1083*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_stat() called\n");
1084*21864bc5SMatthew Dillon 
1085*21864bc5SMatthew Dillon 	get_mplock();
1086*21864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
1087*21864bc5SMatthew Dillon 	error = vn_stat(vp, sb, cred);
1088*21864bc5SMatthew Dillon 	if (error) {
1089*21864bc5SMatthew Dillon 		rel_mplock();
1090*21864bc5SMatthew Dillon 		return (error);
1091*21864bc5SMatthew Dillon 	}
1092*21864bc5SMatthew Dillon 
1093*21864bc5SMatthew Dillon 	struct vattr vattr;
1094*21864bc5SMatthew Dillon 	struct vattr *vap;
1095*21864bc5SMatthew Dillon 	u_short mode;
1096*21864bc5SMatthew Dillon 	cdev_t dev;
1097*21864bc5SMatthew Dillon 
1098*21864bc5SMatthew Dillon 	vap = &vattr;
1099*21864bc5SMatthew Dillon 	error = VOP_GETATTR(vp, vap);
1100*21864bc5SMatthew Dillon 	if (error) {
1101*21864bc5SMatthew Dillon 		rel_mplock();
1102*21864bc5SMatthew Dillon 		return (error);
1103*21864bc5SMatthew Dillon 	}
1104*21864bc5SMatthew Dillon 
1105*21864bc5SMatthew Dillon 	/*
1106*21864bc5SMatthew Dillon 	 * Zero the spare stat fields
1107*21864bc5SMatthew Dillon 	 */
1108*21864bc5SMatthew Dillon 	sb->st_lspare = 0;
1109*21864bc5SMatthew Dillon 	sb->st_qspare = 0;
1110*21864bc5SMatthew Dillon 
1111*21864bc5SMatthew Dillon 	/*
1112*21864bc5SMatthew Dillon 	 * Copy from vattr table ... or not in case it's a cloned device
1113*21864bc5SMatthew Dillon 	 */
1114*21864bc5SMatthew Dillon 	if (vap->va_fsid != VNOVAL)
1115*21864bc5SMatthew Dillon 		sb->st_dev = vap->va_fsid;
1116*21864bc5SMatthew Dillon 	else
1117*21864bc5SMatthew Dillon 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1118*21864bc5SMatthew Dillon 
1119*21864bc5SMatthew Dillon 	sb->st_ino = vap->va_fileid;
1120*21864bc5SMatthew Dillon 
1121*21864bc5SMatthew Dillon 	mode = vap->va_mode;
1122*21864bc5SMatthew Dillon 	mode |= S_IFCHR;
1123*21864bc5SMatthew Dillon 	sb->st_mode = mode;
1124*21864bc5SMatthew Dillon 
1125*21864bc5SMatthew Dillon 	if (vap->va_nlink > (nlink_t)-1)
1126*21864bc5SMatthew Dillon 		sb->st_nlink = (nlink_t)-1;
1127*21864bc5SMatthew Dillon 	else
1128*21864bc5SMatthew Dillon 		sb->st_nlink = vap->va_nlink;
1129*21864bc5SMatthew Dillon 	sb->st_uid = vap->va_uid;
1130*21864bc5SMatthew Dillon 	sb->st_gid = vap->va_gid;
1131*21864bc5SMatthew Dillon 	sb->st_rdev = 0;
1132*21864bc5SMatthew Dillon 	sb->st_size = vap->va_size;
1133*21864bc5SMatthew Dillon 	sb->st_atimespec = vap->va_atime;
1134*21864bc5SMatthew Dillon 	sb->st_mtimespec = vap->va_mtime;
1135*21864bc5SMatthew Dillon 	sb->st_ctimespec = vap->va_ctime;
1136*21864bc5SMatthew Dillon 
1137*21864bc5SMatthew Dillon 	/*
1138*21864bc5SMatthew Dillon 	 * A VCHR and VBLK device may track the last access and last modified
1139*21864bc5SMatthew Dillon 	 * time independantly of the filesystem.  This is particularly true
1140*21864bc5SMatthew Dillon 	 * because device read and write calls may bypass the filesystem.
1141*21864bc5SMatthew Dillon 	 */
1142*21864bc5SMatthew Dillon 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
1143*21864bc5SMatthew Dillon 		dev = vp->v_rdev;
1144*21864bc5SMatthew Dillon 		if (dev != NULL) {
1145*21864bc5SMatthew Dillon 			if (dev->si_lastread) {
1146*21864bc5SMatthew Dillon 				sb->st_atimespec.tv_sec = dev->si_lastread;
1147*21864bc5SMatthew Dillon 				sb->st_atimespec.tv_nsec = 0;
1148*21864bc5SMatthew Dillon 			}
1149*21864bc5SMatthew Dillon 			if (dev->si_lastwrite) {
1150*21864bc5SMatthew Dillon 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
1151*21864bc5SMatthew Dillon 				sb->st_atimespec.tv_nsec = 0;
1152*21864bc5SMatthew Dillon 			}
1153*21864bc5SMatthew Dillon 		}
1154*21864bc5SMatthew Dillon 	}
1155*21864bc5SMatthew Dillon 
1156*21864bc5SMatthew Dillon         /*
1157*21864bc5SMatthew Dillon 	 * According to www.opengroup.org, the meaning of st_blksize is
1158*21864bc5SMatthew Dillon 	 *   "a filesystem-specific preferred I/O block size for this
1159*21864bc5SMatthew Dillon 	 *    object.  In some filesystem types, this may vary from file
1160*21864bc5SMatthew Dillon 	 *    to file"
1161*21864bc5SMatthew Dillon 	 * Default to PAGE_SIZE after much discussion.
1162*21864bc5SMatthew Dillon 	 */
1163*21864bc5SMatthew Dillon 
1164*21864bc5SMatthew Dillon 	sb->st_blksize = PAGE_SIZE;
1165*21864bc5SMatthew Dillon 
1166*21864bc5SMatthew Dillon 	sb->st_flags = vap->va_flags;
1167*21864bc5SMatthew Dillon 
1168*21864bc5SMatthew Dillon 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
1169*21864bc5SMatthew Dillon 	if (error)
1170*21864bc5SMatthew Dillon 		sb->st_gen = 0;
1171*21864bc5SMatthew Dillon 	else
1172*21864bc5SMatthew Dillon 		sb->st_gen = (u_int32_t)vap->va_gen;
1173*21864bc5SMatthew Dillon 
1174*21864bc5SMatthew Dillon 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1175*21864bc5SMatthew Dillon 	sb->st_fsmid = vap->va_fsmid;
1176*21864bc5SMatthew Dillon 
1177*21864bc5SMatthew Dillon 	rel_mplock();
1178*21864bc5SMatthew Dillon 	return (0);
1179*21864bc5SMatthew Dillon }
1180*21864bc5SMatthew Dillon 
1181*21864bc5SMatthew Dillon 
1182*21864bc5SMatthew Dillon static int
1183*21864bc5SMatthew Dillon devfs_specf_kqfilter(struct file *fp, struct knote *kn)
1184*21864bc5SMatthew Dillon {
1185*21864bc5SMatthew Dillon 	struct vnode *vp;
1186*21864bc5SMatthew Dillon 	//int ioflag;
1187*21864bc5SMatthew Dillon 	int error;
1188*21864bc5SMatthew Dillon 	cdev_t dev;
1189*21864bc5SMatthew Dillon 
1190*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_kqfilter() called! \n");
1191*21864bc5SMatthew Dillon 
1192*21864bc5SMatthew Dillon 	get_mplock();
1193*21864bc5SMatthew Dillon 
1194*21864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
1195*21864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
1196*21864bc5SMatthew Dillon 		error = EBADF;
1197*21864bc5SMatthew Dillon 		goto done;
1198*21864bc5SMatthew Dillon 	}
1199*21864bc5SMatthew Dillon 
1200*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
1201*21864bc5SMatthew Dillon 		error = EBADF;
1202*21864bc5SMatthew Dillon 		goto done;
1203*21864bc5SMatthew Dillon 	}
1204*21864bc5SMatthew Dillon 	reference_dev(dev);
1205*21864bc5SMatthew Dillon 
1206*21864bc5SMatthew Dillon 	error = dev_dkqfilter(dev, kn);
1207*21864bc5SMatthew Dillon 
1208*21864bc5SMatthew Dillon 	release_dev(dev);
1209*21864bc5SMatthew Dillon 
1210*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
1211*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1212*21864bc5SMatthew Dillon done:
1213*21864bc5SMatthew Dillon 	rel_mplock();
1214*21864bc5SMatthew Dillon 	return (error);
1215*21864bc5SMatthew Dillon }
1216*21864bc5SMatthew Dillon 
1217*21864bc5SMatthew Dillon 
1218*21864bc5SMatthew Dillon static int
1219*21864bc5SMatthew Dillon devfs_specf_poll(struct file *fp, int events, struct ucred *cred)
1220*21864bc5SMatthew Dillon {
1221*21864bc5SMatthew Dillon 	struct vnode *vp;
1222*21864bc5SMatthew Dillon 	//int ioflag;
1223*21864bc5SMatthew Dillon 	int error;
1224*21864bc5SMatthew Dillon 	cdev_t dev;
1225*21864bc5SMatthew Dillon 
1226*21864bc5SMatthew Dillon 	//devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_poll() called! \n");
1227*21864bc5SMatthew Dillon 
1228*21864bc5SMatthew Dillon 	get_mplock();
1229*21864bc5SMatthew Dillon 
1230*21864bc5SMatthew Dillon 	vp = (struct vnode *)fp->f_data;
1231*21864bc5SMatthew Dillon 	if (vp == NULL || vp->v_type == VBAD) {
1232*21864bc5SMatthew Dillon 		error = EBADF;
1233*21864bc5SMatthew Dillon 		goto done;
1234*21864bc5SMatthew Dillon 	}
1235*21864bc5SMatthew Dillon 
1236*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
1237*21864bc5SMatthew Dillon 		error = EBADF;
1238*21864bc5SMatthew Dillon 		goto done;
1239*21864bc5SMatthew Dillon 	}
1240*21864bc5SMatthew Dillon 	reference_dev(dev);
1241*21864bc5SMatthew Dillon 	error = dev_dpoll(dev, events);
1242*21864bc5SMatthew Dillon 
1243*21864bc5SMatthew Dillon 	release_dev(dev);
1244*21864bc5SMatthew Dillon 
1245*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
1246*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1247*21864bc5SMatthew Dillon done:
1248*21864bc5SMatthew Dillon 	rel_mplock();
1249*21864bc5SMatthew Dillon 	return (error);
1250*21864bc5SMatthew Dillon }
1251*21864bc5SMatthew Dillon 
1252*21864bc5SMatthew Dillon 
1253*21864bc5SMatthew Dillon /*
1254*21864bc5SMatthew Dillon  * MPALMOSTSAFE - acquires mplock
1255*21864bc5SMatthew Dillon  */
1256*21864bc5SMatthew Dillon static int
1257*21864bc5SMatthew Dillon devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
1258*21864bc5SMatthew Dillon {
1259*21864bc5SMatthew Dillon 	struct vnode *vp = ((struct vnode *)fp->f_data);
1260*21864bc5SMatthew Dillon 	struct vnode *ovp;
1261*21864bc5SMatthew Dillon 	//struct vattr vattr;
1262*21864bc5SMatthew Dillon 	cdev_t	dev;
1263*21864bc5SMatthew Dillon 	int error;
1264*21864bc5SMatthew Dillon 	struct fiodname_args *name_args;
1265*21864bc5SMatthew Dillon 	size_t namlen;
1266*21864bc5SMatthew Dillon 	const char *name;
1267*21864bc5SMatthew Dillon 
1268*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called! \n");
1269*21864bc5SMatthew Dillon 
1270*21864bc5SMatthew Dillon 	get_mplock();
1271*21864bc5SMatthew Dillon 
1272*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL) {
1273*21864bc5SMatthew Dillon 		error = EBADF;		/* device was revoked */
1274*21864bc5SMatthew Dillon 		goto out;
1275*21864bc5SMatthew Dillon 	}
1276*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called! for dev %s\n", dev->si_name);
1277*21864bc5SMatthew Dillon 
1278*21864bc5SMatthew Dillon 	if (!(dev_dflags(dev) & D_TTY))
1279*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() called on %s! com is: %x\n", dev->si_name, com);
1280*21864bc5SMatthew Dillon 
1281*21864bc5SMatthew Dillon 	if (com == FIODTYPE) {
1282*21864bc5SMatthew Dillon 		*(int *)data = dev_dflags(dev) & D_TYPEMASK;
1283*21864bc5SMatthew Dillon 		error = 0;
1284*21864bc5SMatthew Dillon 		goto out;
1285*21864bc5SMatthew Dillon 	} else if (com == FIODNAME) {
1286*21864bc5SMatthew Dillon 		name_args = (struct fiodname_args *)data;
1287*21864bc5SMatthew Dillon 		name = dev->si_name;
1288*21864bc5SMatthew Dillon 		namlen = strlen(name) + 1;
1289*21864bc5SMatthew Dillon 
1290*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl, got: FIODNAME for %s\n", name);
1291*21864bc5SMatthew Dillon 
1292*21864bc5SMatthew Dillon 		if (namlen <= name_args->len)
1293*21864bc5SMatthew Dillon 			error = copyout(dev->si_name, name_args->name, namlen);
1294*21864bc5SMatthew Dillon 		else
1295*21864bc5SMatthew Dillon 			error = EINVAL;
1296*21864bc5SMatthew Dillon 
1297*21864bc5SMatthew Dillon 		//name_args->len = namlen; //need _IOWR to enable this
1298*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "ioctl stuff: error: %d\n", error);
1299*21864bc5SMatthew Dillon 		goto out;
1300*21864bc5SMatthew Dillon 	}
1301*21864bc5SMatthew Dillon 	reference_dev(dev);
1302*21864bc5SMatthew Dillon 	error = dev_dioctl(dev, com, data, fp->f_flag, ucred);
1303*21864bc5SMatthew Dillon 	release_dev(dev);
1304*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp)) {
1305*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1306*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->mtime);
1307*21864bc5SMatthew Dillon 	}
1308*21864bc5SMatthew Dillon 
1309*21864bc5SMatthew Dillon 	if (com == TIOCSCTTY)
1310*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl: got TIOCSCTTY on %s\n", dev->si_name);
1311*21864bc5SMatthew Dillon 	if (error == 0 && com == TIOCSCTTY) {
1312*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n", dev->si_name);
1313*21864bc5SMatthew Dillon 		struct proc *p = curthread->td_proc;
1314*21864bc5SMatthew Dillon 		struct session *sess;
1315*21864bc5SMatthew Dillon 			if (p == NULL) {
1316*21864bc5SMatthew Dillon 			error = ENOTTY;
1317*21864bc5SMatthew Dillon 			goto out;
1318*21864bc5SMatthew Dillon 		}
1319*21864bc5SMatthew Dillon 		sess = p->p_session;
1320*21864bc5SMatthew Dillon 		/* Do nothing if reassigning same control tty */
1321*21864bc5SMatthew Dillon 		if (sess->s_ttyvp == vp) {
1322*21864bc5SMatthew Dillon 			error = 0;
1323*21864bc5SMatthew Dillon 			goto out;
1324*21864bc5SMatthew Dillon 		}
1325*21864bc5SMatthew Dillon 			/* Get rid of reference to old control tty */
1326*21864bc5SMatthew Dillon 		ovp = sess->s_ttyvp;
1327*21864bc5SMatthew Dillon 		vref(vp);
1328*21864bc5SMatthew Dillon 		sess->s_ttyvp = vp;
1329*21864bc5SMatthew Dillon 		if (ovp)
1330*21864bc5SMatthew Dillon 			vrele(ovp);
1331*21864bc5SMatthew Dillon 	}
1332*21864bc5SMatthew Dillon 
1333*21864bc5SMatthew Dillon out:
1334*21864bc5SMatthew Dillon 	rel_mplock();
1335*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n");
1336*21864bc5SMatthew Dillon 	return (error);
1337*21864bc5SMatthew Dillon }
1338*21864bc5SMatthew Dillon 
1339*21864bc5SMatthew Dillon 
1340*21864bc5SMatthew Dillon static int
1341*21864bc5SMatthew Dillon devfs_spec_fsync(struct vop_fsync_args *ap)
1342*21864bc5SMatthew Dillon {
1343*21864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1344*21864bc5SMatthew Dillon 	int error;
1345*21864bc5SMatthew Dillon 
1346*21864bc5SMatthew Dillon 	if (!vn_isdisk(vp, NULL))
1347*21864bc5SMatthew Dillon 		return (0);
1348*21864bc5SMatthew Dillon 
1349*21864bc5SMatthew Dillon 	/*
1350*21864bc5SMatthew Dillon 	 * Flush all dirty buffers associated with a block device.
1351*21864bc5SMatthew Dillon 	 */
1352*21864bc5SMatthew Dillon 	error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL);
1353*21864bc5SMatthew Dillon 	return (error);
1354*21864bc5SMatthew Dillon }
1355*21864bc5SMatthew Dillon 
1356*21864bc5SMatthew Dillon 
1357*21864bc5SMatthew Dillon 
1358*21864bc5SMatthew Dillon 
1359*21864bc5SMatthew Dillon 
1360*21864bc5SMatthew Dillon 
1361*21864bc5SMatthew Dillon 
1362*21864bc5SMatthew Dillon 
1363*21864bc5SMatthew Dillon 
1364*21864bc5SMatthew Dillon 
1365*21864bc5SMatthew Dillon 
1366*21864bc5SMatthew Dillon 
1367*21864bc5SMatthew Dillon 
1368*21864bc5SMatthew Dillon 
1369*21864bc5SMatthew Dillon 
1370*21864bc5SMatthew Dillon 
1371*21864bc5SMatthew Dillon 
1372*21864bc5SMatthew Dillon 
1373*21864bc5SMatthew Dillon 
1374*21864bc5SMatthew Dillon 
1375*21864bc5SMatthew Dillon static int
1376*21864bc5SMatthew Dillon devfs_spec_read(struct vop_read_args *ap)
1377*21864bc5SMatthew Dillon {
1378*21864bc5SMatthew Dillon 	struct vnode *vp;
1379*21864bc5SMatthew Dillon 	struct uio *uio;
1380*21864bc5SMatthew Dillon 	cdev_t dev;
1381*21864bc5SMatthew Dillon 	int error;
1382*21864bc5SMatthew Dillon 
1383*21864bc5SMatthew Dillon 	vp = ap->a_vp;
1384*21864bc5SMatthew Dillon 	dev = vp->v_rdev;
1385*21864bc5SMatthew Dillon 	uio = ap->a_uio;
1386*21864bc5SMatthew Dillon 
1387*21864bc5SMatthew Dillon 	if (dev == NULL)		/* device was revoked */
1388*21864bc5SMatthew Dillon 		return (EBADF);
1389*21864bc5SMatthew Dillon 	if (uio->uio_resid == 0)
1390*21864bc5SMatthew Dillon 		return (0);
1391*21864bc5SMatthew Dillon 
1392*21864bc5SMatthew Dillon 	vn_unlock(vp);
1393*21864bc5SMatthew Dillon 	error = dev_dread(dev, uio, ap->a_ioflag);
1394*21864bc5SMatthew Dillon 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1395*21864bc5SMatthew Dillon 
1396*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
1397*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1398*21864bc5SMatthew Dillon 
1399*21864bc5SMatthew Dillon 	return (error);
1400*21864bc5SMatthew Dillon }
1401*21864bc5SMatthew Dillon 
1402*21864bc5SMatthew Dillon /*
1403*21864bc5SMatthew Dillon  * Vnode op for write
1404*21864bc5SMatthew Dillon  *
1405*21864bc5SMatthew Dillon  * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1406*21864bc5SMatthew Dillon  *	      struct ucred *a_cred)
1407*21864bc5SMatthew Dillon  */
1408*21864bc5SMatthew Dillon /* ARGSUSED */
1409*21864bc5SMatthew Dillon static int
1410*21864bc5SMatthew Dillon devfs_spec_write(struct vop_write_args *ap)
1411*21864bc5SMatthew Dillon {
1412*21864bc5SMatthew Dillon 	struct vnode *vp;
1413*21864bc5SMatthew Dillon 	struct uio *uio;
1414*21864bc5SMatthew Dillon 	cdev_t dev;
1415*21864bc5SMatthew Dillon 	int error;
1416*21864bc5SMatthew Dillon 
1417*21864bc5SMatthew Dillon 	vp = ap->a_vp;
1418*21864bc5SMatthew Dillon 	dev = vp->v_rdev;
1419*21864bc5SMatthew Dillon 	uio = ap->a_uio;
1420*21864bc5SMatthew Dillon 
1421*21864bc5SMatthew Dillon 	KKASSERT(uio->uio_segflg != UIO_NOCOPY);
1422*21864bc5SMatthew Dillon 
1423*21864bc5SMatthew Dillon 	if (dev == NULL)		/* device was revoked */
1424*21864bc5SMatthew Dillon 		return (EBADF);
1425*21864bc5SMatthew Dillon 
1426*21864bc5SMatthew Dillon 	vn_unlock(vp);
1427*21864bc5SMatthew Dillon 	error = dev_dwrite(dev, uio, ap->a_ioflag);
1428*21864bc5SMatthew Dillon 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1429*21864bc5SMatthew Dillon 
1430*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
1431*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->mtime);
1432*21864bc5SMatthew Dillon 
1433*21864bc5SMatthew Dillon 	return (error);
1434*21864bc5SMatthew Dillon }
1435*21864bc5SMatthew Dillon 
1436*21864bc5SMatthew Dillon /*
1437*21864bc5SMatthew Dillon  * Device ioctl operation.
1438*21864bc5SMatthew Dillon  *
1439*21864bc5SMatthew Dillon  * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
1440*21864bc5SMatthew Dillon  *	      int a_fflag, struct ucred *a_cred)
1441*21864bc5SMatthew Dillon  */
1442*21864bc5SMatthew Dillon /* ARGSUSED */
1443*21864bc5SMatthew Dillon static int
1444*21864bc5SMatthew Dillon devfs_spec_ioctl(struct vop_ioctl_args *ap)
1445*21864bc5SMatthew Dillon {
1446*21864bc5SMatthew Dillon 	cdev_t dev;
1447*21864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1448*21864bc5SMatthew Dillon 
1449*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
1450*21864bc5SMatthew Dillon 		return (EBADF);		/* device was revoked */
1451*21864bc5SMatthew Dillon 	if ( ap->a_command == TIOCSCTTY )
1452*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_*SPEC*_ioctl: got TIOCSCTTY\n");
1453*21864bc5SMatthew Dillon 
1454*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp)) {
1455*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1456*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->mtime);
1457*21864bc5SMatthew Dillon 	}
1458*21864bc5SMatthew Dillon 
1459*21864bc5SMatthew Dillon 	return (dev_dioctl(dev, ap->a_command, ap->a_data,
1460*21864bc5SMatthew Dillon 		    ap->a_fflag, ap->a_cred));
1461*21864bc5SMatthew Dillon }
1462*21864bc5SMatthew Dillon 
1463*21864bc5SMatthew Dillon /*
1464*21864bc5SMatthew Dillon  * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
1465*21864bc5SMatthew Dillon  */
1466*21864bc5SMatthew Dillon /* ARGSUSED */
1467*21864bc5SMatthew Dillon static int
1468*21864bc5SMatthew Dillon devfs_spec_poll(struct vop_poll_args *ap)
1469*21864bc5SMatthew Dillon {
1470*21864bc5SMatthew Dillon 	cdev_t dev;
1471*21864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1472*21864bc5SMatthew Dillon 
1473*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
1474*21864bc5SMatthew Dillon 		return (EBADF);		/* device was revoked */
1475*21864bc5SMatthew Dillon 
1476*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
1477*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1478*21864bc5SMatthew Dillon 
1479*21864bc5SMatthew Dillon 	return (dev_dpoll(dev, ap->a_events));
1480*21864bc5SMatthew Dillon }
1481*21864bc5SMatthew Dillon 
1482*21864bc5SMatthew Dillon /*
1483*21864bc5SMatthew Dillon  * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
1484*21864bc5SMatthew Dillon  */
1485*21864bc5SMatthew Dillon /* ARGSUSED */
1486*21864bc5SMatthew Dillon static int
1487*21864bc5SMatthew Dillon devfs_spec_kqfilter(struct vop_kqfilter_args *ap)
1488*21864bc5SMatthew Dillon {
1489*21864bc5SMatthew Dillon 	cdev_t dev;
1490*21864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1491*21864bc5SMatthew Dillon 
1492*21864bc5SMatthew Dillon 	if ((dev = vp->v_rdev) == NULL)
1493*21864bc5SMatthew Dillon 		return (EBADF);		/* device was revoked */
1494*21864bc5SMatthew Dillon 
1495*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp))
1496*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1497*21864bc5SMatthew Dillon 
1498*21864bc5SMatthew Dillon 	return (dev_dkqfilter(dev, ap->a_kn));
1499*21864bc5SMatthew Dillon }
1500*21864bc5SMatthew Dillon 
1501*21864bc5SMatthew Dillon 
1502*21864bc5SMatthew Dillon 
1503*21864bc5SMatthew Dillon 
1504*21864bc5SMatthew Dillon 
1505*21864bc5SMatthew Dillon 
1506*21864bc5SMatthew Dillon 
1507*21864bc5SMatthew Dillon 
1508*21864bc5SMatthew Dillon 
1509*21864bc5SMatthew Dillon 
1510*21864bc5SMatthew Dillon 
1511*21864bc5SMatthew Dillon 
1512*21864bc5SMatthew Dillon 
1513*21864bc5SMatthew Dillon 
1514*21864bc5SMatthew Dillon 
1515*21864bc5SMatthew Dillon 
1516*21864bc5SMatthew Dillon 
1517*21864bc5SMatthew Dillon 
1518*21864bc5SMatthew Dillon 
1519*21864bc5SMatthew Dillon 
1520*21864bc5SMatthew Dillon 
1521*21864bc5SMatthew Dillon 
1522*21864bc5SMatthew Dillon 
1523*21864bc5SMatthew Dillon 
1524*21864bc5SMatthew Dillon 
1525*21864bc5SMatthew Dillon 
1526*21864bc5SMatthew Dillon 
1527*21864bc5SMatthew Dillon 
1528*21864bc5SMatthew Dillon 
1529*21864bc5SMatthew Dillon 
1530*21864bc5SMatthew Dillon 
1531*21864bc5SMatthew Dillon 
1532*21864bc5SMatthew Dillon 
1533*21864bc5SMatthew Dillon 
1534*21864bc5SMatthew Dillon 
1535*21864bc5SMatthew Dillon 
1536*21864bc5SMatthew Dillon 
1537*21864bc5SMatthew Dillon 
1538*21864bc5SMatthew Dillon 
1539*21864bc5SMatthew Dillon 
1540*21864bc5SMatthew Dillon 
1541*21864bc5SMatthew Dillon 
1542*21864bc5SMatthew Dillon /*
1543*21864bc5SMatthew Dillon  * Convert a vnode strategy call into a device strategy call.  Vnode strategy
1544*21864bc5SMatthew Dillon  * calls are not limited to device DMA limits so we have to deal with the
1545*21864bc5SMatthew Dillon  * case.
1546*21864bc5SMatthew Dillon  *
1547*21864bc5SMatthew Dillon  * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
1548*21864bc5SMatthew Dillon  */
1549*21864bc5SMatthew Dillon static int
1550*21864bc5SMatthew Dillon devfs_spec_strategy(struct vop_strategy_args *ap)
1551*21864bc5SMatthew Dillon {
1552*21864bc5SMatthew Dillon 	struct bio *bio = ap->a_bio;
1553*21864bc5SMatthew Dillon 	struct buf *bp = bio->bio_buf;
1554*21864bc5SMatthew Dillon 	struct buf *nbp;
1555*21864bc5SMatthew Dillon 	struct vnode *vp;
1556*21864bc5SMatthew Dillon 	struct mount *mp;
1557*21864bc5SMatthew Dillon 	int chunksize;
1558*21864bc5SMatthew Dillon 	int maxiosize;
1559*21864bc5SMatthew Dillon 
1560*21864bc5SMatthew Dillon 	if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL)
1561*21864bc5SMatthew Dillon 		buf_start(bp);
1562*21864bc5SMatthew Dillon 
1563*21864bc5SMatthew Dillon 	/*
1564*21864bc5SMatthew Dillon 	 * Collect statistics on synchronous and asynchronous read
1565*21864bc5SMatthew Dillon 	 * and write counts for disks that have associated filesystems.
1566*21864bc5SMatthew Dillon 	 */
1567*21864bc5SMatthew Dillon 	vp = ap->a_vp;
1568*21864bc5SMatthew Dillon 	KKASSERT(vp->v_rdev != NULL);	/* XXX */
1569*21864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) {
1570*21864bc5SMatthew Dillon 		if (bp->b_cmd == BUF_CMD_READ) {
1571*21864bc5SMatthew Dillon 			//XXX: no idea what has changed here...
1572*21864bc5SMatthew Dillon 			if (bp->b_flags & BIO_SYNC)
1573*21864bc5SMatthew Dillon 				mp->mnt_stat.f_syncreads++;
1574*21864bc5SMatthew Dillon 			else
1575*21864bc5SMatthew Dillon 				mp->mnt_stat.f_asyncreads++;
1576*21864bc5SMatthew Dillon 		} else {
1577*21864bc5SMatthew Dillon 			if (bp->b_flags & BIO_SYNC)
1578*21864bc5SMatthew Dillon 				mp->mnt_stat.f_syncwrites++;
1579*21864bc5SMatthew Dillon 			else
1580*21864bc5SMatthew Dillon 				mp->mnt_stat.f_asyncwrites++;
1581*21864bc5SMatthew Dillon 		}
1582*21864bc5SMatthew Dillon 	}
1583*21864bc5SMatthew Dillon 
1584*21864bc5SMatthew Dillon         /*
1585*21864bc5SMatthew Dillon          * Device iosize limitations only apply to read and write.  Shortcut
1586*21864bc5SMatthew Dillon          * the I/O if it fits.
1587*21864bc5SMatthew Dillon          */
1588*21864bc5SMatthew Dillon 	if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) {
1589*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "%s: si_iosize_max not set!\n", dev_dname(vp->v_rdev));
1590*21864bc5SMatthew Dillon 		maxiosize = MAXPHYS;
1591*21864bc5SMatthew Dillon 	}
1592*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 2
1593*21864bc5SMatthew Dillon 	maxiosize = 4096;
1594*21864bc5SMatthew Dillon #endif
1595*21864bc5SMatthew Dillon         if (bp->b_bcount <= maxiosize ||
1596*21864bc5SMatthew Dillon             (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) {
1597*21864bc5SMatthew Dillon                 dev_dstrategy_chain(vp->v_rdev, bio);
1598*21864bc5SMatthew Dillon                 return (0);
1599*21864bc5SMatthew Dillon         }
1600*21864bc5SMatthew Dillon 
1601*21864bc5SMatthew Dillon 	/*
1602*21864bc5SMatthew Dillon 	 * Clone the buffer and set up an I/O chain to chunk up the I/O.
1603*21864bc5SMatthew Dillon 	 */
1604*21864bc5SMatthew Dillon 	nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO);
1605*21864bc5SMatthew Dillon 	initbufbio(nbp);
1606*21864bc5SMatthew Dillon 	buf_dep_init(nbp);
1607*21864bc5SMatthew Dillon 	BUF_LOCKINIT(nbp);
1608*21864bc5SMatthew Dillon 	BUF_LOCK(nbp, LK_EXCLUSIVE);
1609*21864bc5SMatthew Dillon 	BUF_KERNPROC(nbp);
1610*21864bc5SMatthew Dillon 	nbp->b_vp = vp;
1611*21864bc5SMatthew Dillon 	nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP);
1612*21864bc5SMatthew Dillon 	nbp->b_data = bp->b_data;
1613*21864bc5SMatthew Dillon 	nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1614*21864bc5SMatthew Dillon 	nbp->b_bio1.bio_offset = bio->bio_offset;
1615*21864bc5SMatthew Dillon 	nbp->b_bio1.bio_caller_info1.ptr = bio;
1616*21864bc5SMatthew Dillon 
1617*21864bc5SMatthew Dillon 	/*
1618*21864bc5SMatthew Dillon 	 * Start the first transfer
1619*21864bc5SMatthew Dillon 	 */
1620*21864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL))
1621*21864bc5SMatthew Dillon 		chunksize = vp->v_rdev->si_bsize_phys;
1622*21864bc5SMatthew Dillon 	else
1623*21864bc5SMatthew Dillon 		chunksize = DEV_BSIZE;
1624*21864bc5SMatthew Dillon 	chunksize = maxiosize / chunksize * chunksize;
1625*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1626*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy chained I/O chunksize=%d\n", chunksize);
1627*21864bc5SMatthew Dillon #endif
1628*21864bc5SMatthew Dillon 	nbp->b_cmd = bp->b_cmd;
1629*21864bc5SMatthew Dillon 	nbp->b_bcount = chunksize;
1630*21864bc5SMatthew Dillon 	nbp->b_bufsize = chunksize;	/* used to detect a short I/O */
1631*21864bc5SMatthew Dillon 	nbp->b_bio1.bio_caller_info2.index = chunksize;
1632*21864bc5SMatthew Dillon 
1633*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1634*21864bc5SMatthew Dillon 	devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n",
1635*21864bc5SMatthew Dillon 		bp, 0, bp->b_bcount, nbp->b_bcount);
1636*21864bc5SMatthew Dillon #endif
1637*21864bc5SMatthew Dillon 
1638*21864bc5SMatthew Dillon 	dev_dstrategy(vp->v_rdev, &nbp->b_bio1);
1639*21864bc5SMatthew Dillon 
1640*21864bc5SMatthew Dillon 	if (DEVFS_NODE(vp)) {
1641*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->atime);
1642*21864bc5SMatthew Dillon 		nanotime(&DEVFS_NODE(vp)->mtime);
1643*21864bc5SMatthew Dillon 	}
1644*21864bc5SMatthew Dillon 
1645*21864bc5SMatthew Dillon 	return (0);
1646*21864bc5SMatthew Dillon }
1647*21864bc5SMatthew Dillon 
1648*21864bc5SMatthew Dillon /*
1649*21864bc5SMatthew Dillon  * Chunked up transfer completion routine - chain transfers until done
1650*21864bc5SMatthew Dillon  */
1651*21864bc5SMatthew Dillon static
1652*21864bc5SMatthew Dillon void
1653*21864bc5SMatthew Dillon devfs_spec_strategy_done(struct bio *nbio)
1654*21864bc5SMatthew Dillon {
1655*21864bc5SMatthew Dillon 	struct buf *nbp = nbio->bio_buf;
1656*21864bc5SMatthew Dillon 	struct bio *bio = nbio->bio_caller_info1.ptr;	/* original bio */
1657*21864bc5SMatthew Dillon 	struct buf *bp = bio->bio_buf;			/* original bp */
1658*21864bc5SMatthew Dillon 	int chunksize = nbio->bio_caller_info2.index;	/* chunking */
1659*21864bc5SMatthew Dillon 	int boffset = nbp->b_data - bp->b_data;
1660*21864bc5SMatthew Dillon 
1661*21864bc5SMatthew Dillon 	if (nbp->b_flags & B_ERROR) {
1662*21864bc5SMatthew Dillon 		/*
1663*21864bc5SMatthew Dillon 		 * An error terminates the chain, propogate the error back
1664*21864bc5SMatthew Dillon 		 * to the original bp
1665*21864bc5SMatthew Dillon 		 */
1666*21864bc5SMatthew Dillon 		bp->b_flags |= B_ERROR;
1667*21864bc5SMatthew Dillon 		bp->b_error = nbp->b_error;
1668*21864bc5SMatthew Dillon 		bp->b_resid = bp->b_bcount - boffset +
1669*21864bc5SMatthew Dillon 			      (nbp->b_bcount - nbp->b_resid);
1670*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1671*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p error %d bcount %d/%d\n",
1672*21864bc5SMatthew Dillon 			bp, bp->b_error, bp->b_bcount,
1673*21864bc5SMatthew Dillon 			bp->b_bcount - bp->b_resid);
1674*21864bc5SMatthew Dillon #endif
1675*21864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
1676*21864bc5SMatthew Dillon 		biodone(bio);
1677*21864bc5SMatthew Dillon 	} else if (nbp->b_resid) {
1678*21864bc5SMatthew Dillon 		/*
1679*21864bc5SMatthew Dillon 		 * A short read or write terminates the chain
1680*21864bc5SMatthew Dillon 		 */
1681*21864bc5SMatthew Dillon 		bp->b_error = nbp->b_error;
1682*21864bc5SMatthew Dillon 		bp->b_resid = bp->b_bcount - boffset +
1683*21864bc5SMatthew Dillon 			      (nbp->b_bcount - nbp->b_resid);
1684*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1685*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(1) bcount %d/%d\n",
1686*21864bc5SMatthew Dillon 			bp, bp->b_bcount - bp->b_resid, bp->b_bcount);
1687*21864bc5SMatthew Dillon #endif
1688*21864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
1689*21864bc5SMatthew Dillon 		biodone(bio);
1690*21864bc5SMatthew Dillon 	} else if (nbp->b_bcount != nbp->b_bufsize) {
1691*21864bc5SMatthew Dillon 		/*
1692*21864bc5SMatthew Dillon 		 * A short read or write can also occur by truncating b_bcount
1693*21864bc5SMatthew Dillon 		 */
1694*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1695*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p short read(2) bcount %d/%d\n",
1696*21864bc5SMatthew Dillon 			bp, nbp->b_bcount + boffset, bp->b_bcount);
1697*21864bc5SMatthew Dillon #endif
1698*21864bc5SMatthew Dillon 		bp->b_error = 0;
1699*21864bc5SMatthew Dillon 		bp->b_bcount = nbp->b_bcount + boffset;
1700*21864bc5SMatthew Dillon 		bp->b_resid = nbp->b_resid;
1701*21864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
1702*21864bc5SMatthew Dillon 		biodone(bio);
1703*21864bc5SMatthew Dillon 	} else if (nbp->b_bcount + boffset == bp->b_bcount) {
1704*21864bc5SMatthew Dillon 		/*
1705*21864bc5SMatthew Dillon 		 * No more data terminates the chain
1706*21864bc5SMatthew Dillon 		 */
1707*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1708*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p finished bcount %d\n",
1709*21864bc5SMatthew Dillon 			bp, bp->b_bcount);
1710*21864bc5SMatthew Dillon #endif
1711*21864bc5SMatthew Dillon 		bp->b_error = 0;
1712*21864bc5SMatthew Dillon 		bp->b_resid = 0;
1713*21864bc5SMatthew Dillon 		kfree(nbp, M_DEVBUF);
1714*21864bc5SMatthew Dillon 		biodone(bio);
1715*21864bc5SMatthew Dillon 	} else {
1716*21864bc5SMatthew Dillon 		/*
1717*21864bc5SMatthew Dillon 		 * Continue the chain
1718*21864bc5SMatthew Dillon 		 */
1719*21864bc5SMatthew Dillon 		boffset += nbp->b_bcount;
1720*21864bc5SMatthew Dillon 		nbp->b_data = bp->b_data + boffset;
1721*21864bc5SMatthew Dillon 		nbp->b_bcount = bp->b_bcount - boffset;
1722*21864bc5SMatthew Dillon 		if (nbp->b_bcount > chunksize)
1723*21864bc5SMatthew Dillon 			nbp->b_bcount = chunksize;
1724*21864bc5SMatthew Dillon 		nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1725*21864bc5SMatthew Dillon 		nbp->b_bio1.bio_offset = bio->bio_offset + boffset;
1726*21864bc5SMatthew Dillon 
1727*21864bc5SMatthew Dillon #if SPEC_CHAIN_DEBUG & 1
1728*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n",
1729*21864bc5SMatthew Dillon 			bp, boffset, bp->b_bcount, nbp->b_bcount);
1730*21864bc5SMatthew Dillon #endif
1731*21864bc5SMatthew Dillon 
1732*21864bc5SMatthew Dillon 		dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1);
1733*21864bc5SMatthew Dillon 	}
1734*21864bc5SMatthew Dillon }
1735*21864bc5SMatthew Dillon 
1736*21864bc5SMatthew Dillon /*
1737*21864bc5SMatthew Dillon  * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
1738*21864bc5SMatthew Dillon  */
1739*21864bc5SMatthew Dillon static int
1740*21864bc5SMatthew Dillon devfs_spec_freeblks(struct vop_freeblks_args *ap)
1741*21864bc5SMatthew Dillon {
1742*21864bc5SMatthew Dillon 	struct buf *bp;
1743*21864bc5SMatthew Dillon 
1744*21864bc5SMatthew Dillon 	/*
1745*21864bc5SMatthew Dillon 	 * XXX: This assumes that strategy does the deed right away.
1746*21864bc5SMatthew Dillon 	 * XXX: this may not be TRTTD.
1747*21864bc5SMatthew Dillon 	 */
1748*21864bc5SMatthew Dillon 	KKASSERT(ap->a_vp->v_rdev != NULL);
1749*21864bc5SMatthew Dillon 	if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
1750*21864bc5SMatthew Dillon 		return (0);
1751*21864bc5SMatthew Dillon 	bp = geteblk(ap->a_length);
1752*21864bc5SMatthew Dillon 	bp->b_cmd = BUF_CMD_FREEBLKS;
1753*21864bc5SMatthew Dillon 	bp->b_bio1.bio_offset = ap->a_offset;
1754*21864bc5SMatthew Dillon 	bp->b_bcount = ap->a_length;
1755*21864bc5SMatthew Dillon 	dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1);
1756*21864bc5SMatthew Dillon 	return (0);
1757*21864bc5SMatthew Dillon }
1758*21864bc5SMatthew Dillon 
1759*21864bc5SMatthew Dillon /*
1760*21864bc5SMatthew Dillon  * Implement degenerate case where the block requested is the block
1761*21864bc5SMatthew Dillon  * returned, and assume that the entire device is contiguous in regards
1762*21864bc5SMatthew Dillon  * to the contiguous block range (runp and runb).
1763*21864bc5SMatthew Dillon  *
1764*21864bc5SMatthew Dillon  * spec_bmap(struct vnode *a_vp, off_t a_loffset,
1765*21864bc5SMatthew Dillon  *	     off_t *a_doffsetp, int *a_runp, int *a_runb)
1766*21864bc5SMatthew Dillon  */
1767*21864bc5SMatthew Dillon static int
1768*21864bc5SMatthew Dillon devfs_spec_bmap(struct vop_bmap_args *ap)
1769*21864bc5SMatthew Dillon {
1770*21864bc5SMatthew Dillon 	if (ap->a_doffsetp != NULL)
1771*21864bc5SMatthew Dillon 		*ap->a_doffsetp = ap->a_loffset;
1772*21864bc5SMatthew Dillon 	if (ap->a_runp != NULL)
1773*21864bc5SMatthew Dillon 		*ap->a_runp = MAXBSIZE;
1774*21864bc5SMatthew Dillon 	if (ap->a_runb != NULL) {
1775*21864bc5SMatthew Dillon 		if (ap->a_loffset < MAXBSIZE)
1776*21864bc5SMatthew Dillon 			*ap->a_runb = (int)ap->a_loffset;
1777*21864bc5SMatthew Dillon 		else
1778*21864bc5SMatthew Dillon 			*ap->a_runb = MAXBSIZE;
1779*21864bc5SMatthew Dillon 	}
1780*21864bc5SMatthew Dillon 	return (0);
1781*21864bc5SMatthew Dillon }
1782*21864bc5SMatthew Dillon 
1783*21864bc5SMatthew Dillon 
1784*21864bc5SMatthew Dillon /*
1785*21864bc5SMatthew Dillon  * Special device advisory byte-level locks.
1786*21864bc5SMatthew Dillon  *
1787*21864bc5SMatthew Dillon  * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1788*21864bc5SMatthew Dillon  *		struct flock *a_fl, int a_flags)
1789*21864bc5SMatthew Dillon  */
1790*21864bc5SMatthew Dillon /* ARGSUSED */
1791*21864bc5SMatthew Dillon static int
1792*21864bc5SMatthew Dillon devfs_spec_advlock(struct vop_advlock_args *ap)
1793*21864bc5SMatthew Dillon {
1794*21864bc5SMatthew Dillon 	return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP);
1795*21864bc5SMatthew Dillon }
1796*21864bc5SMatthew Dillon 
1797*21864bc5SMatthew Dillon static void
1798*21864bc5SMatthew Dillon devfs_spec_getpages_iodone(struct bio *bio)
1799*21864bc5SMatthew Dillon {
1800*21864bc5SMatthew Dillon 	bio->bio_buf->b_cmd = BUF_CMD_DONE;
1801*21864bc5SMatthew Dillon 	wakeup(bio->bio_buf);
1802*21864bc5SMatthew Dillon }
1803*21864bc5SMatthew Dillon 
1804*21864bc5SMatthew Dillon /*
1805*21864bc5SMatthew Dillon  * spec_getpages() - get pages associated with device vnode.
1806*21864bc5SMatthew Dillon  *
1807*21864bc5SMatthew Dillon  * Note that spec_read and spec_write do not use the buffer cache, so we
1808*21864bc5SMatthew Dillon  * must fully implement getpages here.
1809*21864bc5SMatthew Dillon  */
1810*21864bc5SMatthew Dillon static int
1811*21864bc5SMatthew Dillon devfs_spec_getpages(struct vop_getpages_args *ap)
1812*21864bc5SMatthew Dillon {
1813*21864bc5SMatthew Dillon 	vm_offset_t kva;
1814*21864bc5SMatthew Dillon 	int error;
1815*21864bc5SMatthew Dillon 	int i, pcount, size;
1816*21864bc5SMatthew Dillon 	struct buf *bp;
1817*21864bc5SMatthew Dillon 	vm_page_t m;
1818*21864bc5SMatthew Dillon 	vm_ooffset_t offset;
1819*21864bc5SMatthew Dillon 	int toff, nextoff, nread;
1820*21864bc5SMatthew Dillon 	struct vnode *vp = ap->a_vp;
1821*21864bc5SMatthew Dillon 	int blksiz;
1822*21864bc5SMatthew Dillon 	int gotreqpage;
1823*21864bc5SMatthew Dillon 
1824*21864bc5SMatthew Dillon 	error = 0;
1825*21864bc5SMatthew Dillon 	pcount = round_page(ap->a_count) / PAGE_SIZE;
1826*21864bc5SMatthew Dillon 
1827*21864bc5SMatthew Dillon 	/*
1828*21864bc5SMatthew Dillon 	 * Calculate the offset of the transfer and do sanity check.
1829*21864bc5SMatthew Dillon 	 */
1830*21864bc5SMatthew Dillon 	offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
1831*21864bc5SMatthew Dillon 
1832*21864bc5SMatthew Dillon 	/*
1833*21864bc5SMatthew Dillon 	 * Round up physical size for real devices.  We cannot round using
1834*21864bc5SMatthew Dillon 	 * v_mount's block size data because v_mount has nothing to do with
1835*21864bc5SMatthew Dillon 	 * the device.  i.e. it's usually '/dev'.  We need the physical block
1836*21864bc5SMatthew Dillon 	 * size for the device itself.
1837*21864bc5SMatthew Dillon 	 *
1838*21864bc5SMatthew Dillon 	 * We can't use v_rdev->si_mountpoint because it only exists when the
1839*21864bc5SMatthew Dillon 	 * block device is mounted.  However, we can use v_rdev.
1840*21864bc5SMatthew Dillon 	 */
1841*21864bc5SMatthew Dillon 
1842*21864bc5SMatthew Dillon 	if (vn_isdisk(vp, NULL))
1843*21864bc5SMatthew Dillon 		blksiz = vp->v_rdev->si_bsize_phys;
1844*21864bc5SMatthew Dillon 	else
1845*21864bc5SMatthew Dillon 		blksiz = DEV_BSIZE;
1846*21864bc5SMatthew Dillon 
1847*21864bc5SMatthew Dillon 	size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
1848*21864bc5SMatthew Dillon 
1849*21864bc5SMatthew Dillon 	bp = getpbuf(NULL);
1850*21864bc5SMatthew Dillon 	kva = (vm_offset_t)bp->b_data;
1851*21864bc5SMatthew Dillon 
1852*21864bc5SMatthew Dillon 	/*
1853*21864bc5SMatthew Dillon 	 * Map the pages to be read into the kva.
1854*21864bc5SMatthew Dillon 	 */
1855*21864bc5SMatthew Dillon 	pmap_qenter(kva, ap->a_m, pcount);
1856*21864bc5SMatthew Dillon 
1857*21864bc5SMatthew Dillon 	/* Build a minimal buffer header. */
1858*21864bc5SMatthew Dillon 	bp->b_cmd = BUF_CMD_READ;
1859*21864bc5SMatthew Dillon 	bp->b_bcount = size;
1860*21864bc5SMatthew Dillon 	bp->b_resid = 0;
1861*21864bc5SMatthew Dillon 	bp->b_runningbufspace = size;
1862*21864bc5SMatthew Dillon 	if (size) {
1863*21864bc5SMatthew Dillon 		runningbufspace += bp->b_runningbufspace;
1864*21864bc5SMatthew Dillon 		++runningbufcount;
1865*21864bc5SMatthew Dillon 	}
1866*21864bc5SMatthew Dillon 
1867*21864bc5SMatthew Dillon 	bp->b_bio1.bio_offset = offset;
1868*21864bc5SMatthew Dillon 	bp->b_bio1.bio_done = devfs_spec_getpages_iodone;
1869*21864bc5SMatthew Dillon 
1870*21864bc5SMatthew Dillon 	mycpu->gd_cnt.v_vnodein++;
1871*21864bc5SMatthew Dillon 	mycpu->gd_cnt.v_vnodepgsin += pcount;
1872*21864bc5SMatthew Dillon 
1873*21864bc5SMatthew Dillon 	/* Do the input. */
1874*21864bc5SMatthew Dillon 	vn_strategy(ap->a_vp, &bp->b_bio1);
1875*21864bc5SMatthew Dillon 
1876*21864bc5SMatthew Dillon 	crit_enter();
1877*21864bc5SMatthew Dillon 
1878*21864bc5SMatthew Dillon 	/* We definitely need to be at splbio here. */
1879*21864bc5SMatthew Dillon 	while (bp->b_cmd != BUF_CMD_DONE)
1880*21864bc5SMatthew Dillon 		tsleep(bp, 0, "spread", 0);
1881*21864bc5SMatthew Dillon 
1882*21864bc5SMatthew Dillon 	crit_exit();
1883*21864bc5SMatthew Dillon 
1884*21864bc5SMatthew Dillon 	if (bp->b_flags & B_ERROR) {
1885*21864bc5SMatthew Dillon 		if (bp->b_error)
1886*21864bc5SMatthew Dillon 			error = bp->b_error;
1887*21864bc5SMatthew Dillon 		else
1888*21864bc5SMatthew Dillon 			error = EIO;
1889*21864bc5SMatthew Dillon 	}
1890*21864bc5SMatthew Dillon 
1891*21864bc5SMatthew Dillon 	/*
1892*21864bc5SMatthew Dillon 	 * If EOF is encountered we must zero-extend the result in order
1893*21864bc5SMatthew Dillon 	 * to ensure that the page does not contain garabge.  When no
1894*21864bc5SMatthew Dillon 	 * error occurs, an early EOF is indicated if b_bcount got truncated.
1895*21864bc5SMatthew Dillon 	 * b_resid is relative to b_bcount and should be 0, but some devices
1896*21864bc5SMatthew Dillon 	 * might indicate an EOF with b_resid instead of truncating b_bcount.
1897*21864bc5SMatthew Dillon 	 */
1898*21864bc5SMatthew Dillon 	nread = bp->b_bcount - bp->b_resid;
1899*21864bc5SMatthew Dillon 	if (nread < ap->a_count)
1900*21864bc5SMatthew Dillon 		bzero((caddr_t)kva + nread, ap->a_count - nread);
1901*21864bc5SMatthew Dillon 	pmap_qremove(kva, pcount);
1902*21864bc5SMatthew Dillon 
1903*21864bc5SMatthew Dillon 	gotreqpage = 0;
1904*21864bc5SMatthew Dillon 	for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
1905*21864bc5SMatthew Dillon 		nextoff = toff + PAGE_SIZE;
1906*21864bc5SMatthew Dillon 		m = ap->a_m[i];
1907*21864bc5SMatthew Dillon 
1908*21864bc5SMatthew Dillon 		m->flags &= ~PG_ZERO;
1909*21864bc5SMatthew Dillon 
1910*21864bc5SMatthew Dillon 		if (nextoff <= nread) {
1911*21864bc5SMatthew Dillon 			m->valid = VM_PAGE_BITS_ALL;
1912*21864bc5SMatthew Dillon 			vm_page_undirty(m);
1913*21864bc5SMatthew Dillon 		} else if (toff < nread) {
1914*21864bc5SMatthew Dillon 			/*
1915*21864bc5SMatthew Dillon 			 * Since this is a VM request, we have to supply the
1916*21864bc5SMatthew Dillon 			 * unaligned offset to allow vm_page_set_validclean()
1917*21864bc5SMatthew Dillon 			 * to zero sub-DEV_BSIZE'd portions of the page.
1918*21864bc5SMatthew Dillon 			 */
1919*21864bc5SMatthew Dillon 			vm_page_set_validclean(m, 0, nread - toff);
1920*21864bc5SMatthew Dillon 		} else {
1921*21864bc5SMatthew Dillon 			m->valid = 0;
1922*21864bc5SMatthew Dillon 			vm_page_undirty(m);
1923*21864bc5SMatthew Dillon 		}
1924*21864bc5SMatthew Dillon 
1925*21864bc5SMatthew Dillon 		if (i != ap->a_reqpage) {
1926*21864bc5SMatthew Dillon 			/*
1927*21864bc5SMatthew Dillon 			 * Just in case someone was asking for this page we
1928*21864bc5SMatthew Dillon 			 * now tell them that it is ok to use.
1929*21864bc5SMatthew Dillon 			 */
1930*21864bc5SMatthew Dillon 			if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
1931*21864bc5SMatthew Dillon 				if (m->valid) {
1932*21864bc5SMatthew Dillon 					if (m->flags & PG_WANTED) {
1933*21864bc5SMatthew Dillon 						vm_page_activate(m);
1934*21864bc5SMatthew Dillon 					} else {
1935*21864bc5SMatthew Dillon 						vm_page_deactivate(m);
1936*21864bc5SMatthew Dillon 					}
1937*21864bc5SMatthew Dillon 					vm_page_wakeup(m);
1938*21864bc5SMatthew Dillon 				} else {
1939*21864bc5SMatthew Dillon 					vm_page_free(m);
1940*21864bc5SMatthew Dillon 				}
1941*21864bc5SMatthew Dillon 			} else {
1942*21864bc5SMatthew Dillon 				vm_page_free(m);
1943*21864bc5SMatthew Dillon 			}
1944*21864bc5SMatthew Dillon 		} else if (m->valid) {
1945*21864bc5SMatthew Dillon 			gotreqpage = 1;
1946*21864bc5SMatthew Dillon 			/*
1947*21864bc5SMatthew Dillon 			 * Since this is a VM request, we need to make the
1948*21864bc5SMatthew Dillon 			 * entire page presentable by zeroing invalid sections.
1949*21864bc5SMatthew Dillon 			 */
1950*21864bc5SMatthew Dillon 			if (m->valid != VM_PAGE_BITS_ALL)
1951*21864bc5SMatthew Dillon 			    vm_page_zero_invalid(m, FALSE);
1952*21864bc5SMatthew Dillon 		}
1953*21864bc5SMatthew Dillon 	}
1954*21864bc5SMatthew Dillon 	if (!gotreqpage) {
1955*21864bc5SMatthew Dillon 		m = ap->a_m[ap->a_reqpage];
1956*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_WARNING,
1957*21864bc5SMatthew Dillon 	    "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
1958*21864bc5SMatthew Dillon 			devtoname(vp->v_rdev), error, bp, bp->b_vp);
1959*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_WARNING,
1960*21864bc5SMatthew Dillon 	    "               size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
1961*21864bc5SMatthew Dillon 		    size, bp->b_resid, ap->a_count, m->valid);
1962*21864bc5SMatthew Dillon 		devfs_debug(DEVFS_DEBUG_WARNING,
1963*21864bc5SMatthew Dillon 	    "               nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
1964*21864bc5SMatthew Dillon 		    nread, ap->a_reqpage, (u_long)m->pindex, pcount);
1965*21864bc5SMatthew Dillon 		/*
1966*21864bc5SMatthew Dillon 		 * Free the buffer header back to the swap buffer pool.
1967*21864bc5SMatthew Dillon 		 */
1968*21864bc5SMatthew Dillon 		relpbuf(bp, NULL);
1969*21864bc5SMatthew Dillon 		return VM_PAGER_ERROR;
1970*21864bc5SMatthew Dillon 	}
1971*21864bc5SMatthew Dillon 	/*
1972*21864bc5SMatthew Dillon 	 * Free the buffer header back to the swap buffer pool.
1973*21864bc5SMatthew Dillon 	 */
1974*21864bc5SMatthew Dillon 	relpbuf(bp, NULL);
1975*21864bc5SMatthew Dillon 	return VM_PAGER_OK;
1976*21864bc5SMatthew Dillon }
1977*21864bc5SMatthew Dillon 
1978*21864bc5SMatthew Dillon 
1979*21864bc5SMatthew Dillon 
1980*21864bc5SMatthew Dillon 
1981*21864bc5SMatthew Dillon 
1982*21864bc5SMatthew Dillon 
1983*21864bc5SMatthew Dillon 
1984*21864bc5SMatthew Dillon 
1985*21864bc5SMatthew Dillon 
1986*21864bc5SMatthew Dillon 
1987*21864bc5SMatthew Dillon 
1988*21864bc5SMatthew Dillon 
1989*21864bc5SMatthew Dillon 
1990*21864bc5SMatthew Dillon 
1991*21864bc5SMatthew Dillon 
1992*21864bc5SMatthew Dillon 
1993*21864bc5SMatthew Dillon 
1994*21864bc5SMatthew Dillon 
1995*21864bc5SMatthew Dillon 
1996*21864bc5SMatthew Dillon 
1997*21864bc5SMatthew Dillon 
1998*21864bc5SMatthew Dillon 
1999*21864bc5SMatthew Dillon 
2000*21864bc5SMatthew Dillon 
2001*21864bc5SMatthew Dillon 
2002*21864bc5SMatthew Dillon 
2003*21864bc5SMatthew Dillon 
2004*21864bc5SMatthew Dillon 
2005*21864bc5SMatthew Dillon 
2006*21864bc5SMatthew Dillon 
2007*21864bc5SMatthew Dillon 
2008*21864bc5SMatthew Dillon 
2009*21864bc5SMatthew Dillon 
2010*21864bc5SMatthew Dillon 
2011*21864bc5SMatthew Dillon 
2012*21864bc5SMatthew Dillon 
2013*21864bc5SMatthew Dillon 
2014*21864bc5SMatthew Dillon 
2015*21864bc5SMatthew Dillon 
2016*21864bc5SMatthew Dillon static __inline
2017*21864bc5SMatthew Dillon int
2018*21864bc5SMatthew Dillon sequential_heuristic(struct uio *uio, struct file *fp)
2019*21864bc5SMatthew Dillon {
2020*21864bc5SMatthew Dillon 	/*
2021*21864bc5SMatthew Dillon 	 * Sequential heuristic - detect sequential operation
2022*21864bc5SMatthew Dillon 	 */
2023*21864bc5SMatthew Dillon 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
2024*21864bc5SMatthew Dillon 	    uio->uio_offset == fp->f_nextoff) {
2025*21864bc5SMatthew Dillon 		int tmpseq = fp->f_seqcount;
2026*21864bc5SMatthew Dillon 		/*
2027*21864bc5SMatthew Dillon 		 * XXX we assume that the filesystem block size is
2028*21864bc5SMatthew Dillon 		 * the default.  Not true, but still gives us a pretty
2029*21864bc5SMatthew Dillon 		 * good indicator of how sequential the read operations
2030*21864bc5SMatthew Dillon 		 * are.
2031*21864bc5SMatthew Dillon 		 */
2032*21864bc5SMatthew Dillon 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
2033*21864bc5SMatthew Dillon 		if (tmpseq > IO_SEQMAX)
2034*21864bc5SMatthew Dillon 			tmpseq = IO_SEQMAX;
2035*21864bc5SMatthew Dillon 		fp->f_seqcount = tmpseq;
2036*21864bc5SMatthew Dillon 		return(fp->f_seqcount << IO_SEQSHIFT);
2037*21864bc5SMatthew Dillon 	}
2038*21864bc5SMatthew Dillon 
2039*21864bc5SMatthew Dillon 	/*
2040*21864bc5SMatthew Dillon 	 * Not sequential, quick draw-down of seqcount
2041*21864bc5SMatthew Dillon 	 */
2042*21864bc5SMatthew Dillon 	if (fp->f_seqcount > 1)
2043*21864bc5SMatthew Dillon 		fp->f_seqcount = 1;
2044*21864bc5SMatthew Dillon 	else
2045*21864bc5SMatthew Dillon 		fp->f_seqcount = 0;
2046*21864bc5SMatthew Dillon 	return(0);
2047*21864bc5SMatthew Dillon }
2048