xref: /original-bsd/sys/miscfs/specfs/spec_vnops.c (revision 10042f30)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)spec_vnops.c	7.44 (Berkeley) 05/15/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/proc.h>
12 #include <sys/systm.h>
13 #include <sys/kernel.h>
14 #include <sys/conf.h>
15 #include <sys/buf.h>
16 #include <sys/mount.h>
17 #include <sys/namei.h>
18 #include <sys/vnode.h>
19 #include <sys/specdev.h>
20 #include <sys/stat.h>
21 #include <sys/errno.h>
22 #include <sys/ioctl.h>
23 #include <sys/file.h>
24 #include <sys/disklabel.h>
25 
26 /* symbolic sleep message strings for devices */
27 char	devopn[] = "devopn";
28 char	devio[] = "devio";
29 char	devwait[] = "devwait";
30 char	devin[] = "devin";
31 char	devout[] = "devout";
32 char	devioc[] = "devioc";
33 char	devcls[] = "devcls";
34 
35 int (**spec_vnodeop_p)();
36 struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
37 	{ &vop_default_desc, vn_default_error },
38 	{ &vop_lookup_desc, spec_lookup },		/* lookup */
39 	{ &vop_create_desc, spec_create },		/* create */
40 	{ &vop_mknod_desc, spec_mknod },		/* mknod */
41 	{ &vop_open_desc, spec_open },		/* open */
42 	{ &vop_close_desc, spec_close },		/* close */
43 	{ &vop_access_desc, spec_access },		/* access */
44 	{ &vop_getattr_desc, spec_getattr },		/* getattr */
45 	{ &vop_setattr_desc, spec_setattr },		/* setattr */
46 	{ &vop_read_desc, spec_read },		/* read */
47 	{ &vop_write_desc, spec_write },		/* write */
48 	{ &vop_ioctl_desc, spec_ioctl },		/* ioctl */
49 	{ &vop_select_desc, spec_select },		/* select */
50 	{ &vop_mmap_desc, spec_mmap },		/* mmap */
51 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
52 	{ &vop_seek_desc, spec_seek },		/* seek */
53 	{ &vop_remove_desc, spec_remove },		/* remove */
54 	{ &vop_link_desc, spec_link },		/* link */
55 	{ &vop_rename_desc, spec_rename },		/* rename */
56 	{ &vop_mkdir_desc, spec_mkdir },		/* mkdir */
57 	{ &vop_rmdir_desc, spec_rmdir },		/* rmdir */
58 	{ &vop_symlink_desc, spec_symlink },		/* symlink */
59 	{ &vop_readdir_desc, spec_readdir },		/* readdir */
60 	{ &vop_readlink_desc, spec_readlink },		/* readlink */
61 	{ &vop_abortop_desc, spec_abortop },		/* abortop */
62 	{ &vop_inactive_desc, spec_inactive },		/* inactive */
63 	{ &vop_reclaim_desc, spec_reclaim },		/* reclaim */
64 	{ &vop_lock_desc, spec_lock },		/* lock */
65 	{ &vop_unlock_desc, spec_unlock },		/* unlock */
66 	{ &vop_bmap_desc, spec_bmap },		/* bmap */
67 	{ &vop_strategy_desc, spec_strategy },		/* strategy */
68 	{ &vop_print_desc, spec_print },		/* print */
69 	{ &vop_islocked_desc, spec_islocked },		/* islocked */
70 	{ &vop_advlock_desc, spec_advlock },		/* advlock */
71 	{ &vop_blkatoff_desc, spec_blkatoff },		/* blkatoff */
72 	{ &vop_vget_desc, spec_vget },		/* vget */
73 	{ &vop_valloc_desc, spec_valloc },		/* valloc */
74 	{ &vop_vfree_desc, spec_vfree },		/* vfree */
75 	{ &vop_truncate_desc, spec_truncate },		/* truncate */
76 	{ &vop_update_desc, spec_update },		/* update */
77 	{ &vop_bwrite_desc, spec_bwrite },		/* bwrite */
78 	{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
79 };
80 struct vnodeopv_desc spec_vnodeop_opv_desc =
81 	{ &spec_vnodeop_p, spec_vnodeop_entries };
82 
83 /*
84  * Trivial lookup routine that always fails.
85  */
86 int
87 spec_lookup (ap)
88 	struct vop_lookup_args *ap;
89 {
90 
91 	*ap->a_vpp = NULL;
92 	return (ENOTDIR);
93 }
94 
95 /*
96  * Open a special file: Don't allow open if fs is mounted -nodev,
97  * and don't allow opens of block devices that are currently mounted.
98  * Otherwise, call device driver open function.
99  */
100 /* ARGSUSED */
101 spec_open (ap)
102 	struct vop_open_args *ap;
103 {
104 	USES_VOP_LOCK;
105 	USES_VOP_UNLOCK;
106 	dev_t dev = (dev_t)ap->a_vp->v_rdev;
107 	register int maj = major(dev);
108 	int error;
109 
110 	if (ap->a_vp->v_mount && (ap->a_vp->v_mount->mnt_flag & MNT_NODEV))
111 		return (ENXIO);
112 
113 	switch (ap->a_vp->v_type) {
114 
115 	case VCHR:
116 		if ((u_int)maj >= nchrdev)
117 			return (ENXIO);
118 		VOP_UNLOCK(ap->a_vp);
119 		error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p);
120 		VOP_LOCK(ap->a_vp);
121 		return (error);
122 
123 	case VBLK:
124 		if ((u_int)maj >= nblkdev)
125 			return (ENXIO);
126 		if (error = ufs_mountedon(ap->a_vp))
127 			return (error);
128 		return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p));
129 	}
130 	return (0);
131 }
132 
133 /*
134  * Vnode op for read
135  */
136 /* ARGSUSED */
137 spec_read (ap)
138 	struct vop_read_args *ap;
139 {
140 	USES_VOP_LOCK;
141 	USES_VOP_UNLOCK;
142 	struct proc *p = ap->a_uio->uio_procp;
143 	struct buf *bp;
144 	daddr_t bn, nextbn;
145 	long bsize, bscale;
146 	struct partinfo dpart;
147 	register int n, on;
148 	int error = 0;
149 
150 #ifdef DIAGNOSTIC
151 	if (ap->a_uio->uio_rw != UIO_READ)
152 		panic("spec_read mode");
153 	if (ap->a_uio->uio_segflg == UIO_USERSPACE && ap->a_uio->uio_procp != curproc)
154 		panic("spec_read proc");
155 #endif
156 	if (ap->a_uio->uio_resid == 0)
157 		return (0);
158 
159 	switch (ap->a_vp->v_type) {
160 
161 	case VCHR:
162 		VOP_UNLOCK(ap->a_vp);
163 		error = (*cdevsw[major(ap->a_vp->v_rdev)].d_read)
164 			(ap->a_vp->v_rdev, ap->a_uio, ap->a_ioflag);
165 		VOP_LOCK(ap->a_vp);
166 		return (error);
167 
168 	case VBLK:
169 		if (ap->a_uio->uio_offset < 0)
170 			return (EINVAL);
171 		bsize = BLKDEV_IOSIZE;
172 		if ((*bdevsw[major(ap->a_vp->v_rdev)].d_ioctl)(ap->a_vp->v_rdev, DIOCGPART,
173 		    (caddr_t)&dpart, FREAD, p) == 0) {
174 			if (dpart.part->p_fstype == FS_BSDFFS &&
175 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
176 				bsize = dpart.part->p_frag *
177 				    dpart.part->p_fsize;
178 		}
179 		bscale = bsize / DEV_BSIZE;
180 		do {
181 			bn = (ap->a_uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
182 			on = ap->a_uio->uio_offset % bsize;
183 			n = MIN((unsigned)(bsize - on), ap->a_uio->uio_resid);
184 			if (ap->a_vp->v_lastr + bscale == bn) {
185 				nextbn = bn + bscale;
186 				error = breadn(ap->a_vp, bn, (int)bsize, &nextbn,
187 					(int *)&bsize, 1, NOCRED, &bp);
188 			} else
189 				error = bread(ap->a_vp, bn, (int)bsize, NOCRED, &bp);
190 			ap->a_vp->v_lastr = bn;
191 			n = MIN(n, bsize - bp->b_resid);
192 			if (error) {
193 				brelse(bp);
194 				return (error);
195 			}
196 			error = uiomove(bp->b_un.b_addr + on, n, ap->a_uio);
197 			if (n + on == bsize)
198 				bp->b_flags |= B_AGE;
199 			brelse(bp);
200 		} while (error == 0 && ap->a_uio->uio_resid > 0 && n != 0);
201 		return (error);
202 
203 	default:
204 		panic("spec_read type");
205 	}
206 	/* NOTREACHED */
207 }
208 
209 /*
210  * Vnode op for write
211  */
212 /* ARGSUSED */
213 spec_write (ap)
214 	struct vop_write_args *ap;
215 {
216 	USES_VOP_LOCK;
217 	USES_VOP_UNLOCK;
218 	struct proc *p = ap->a_uio->uio_procp;
219 	struct buf *bp;
220 	daddr_t bn;
221 	int bsize, blkmask;
222 	struct partinfo dpart;
223 	register int n, on;
224 	int error = 0;
225 
226 #ifdef DIAGNOSTIC
227 	if (ap->a_uio->uio_rw != UIO_WRITE)
228 		panic("spec_write mode");
229 	if (ap->a_uio->uio_segflg == UIO_USERSPACE && ap->a_uio->uio_procp != curproc)
230 		panic("spec_write proc");
231 #endif
232 
233 	switch (ap->a_vp->v_type) {
234 
235 	case VCHR:
236 		VOP_UNLOCK(ap->a_vp);
237 		error = (*cdevsw[major(ap->a_vp->v_rdev)].d_write)
238 			(ap->a_vp->v_rdev, ap->a_uio, ap->a_ioflag);
239 		VOP_LOCK(ap->a_vp);
240 		return (error);
241 
242 	case VBLK:
243 		if (ap->a_uio->uio_resid == 0)
244 			return (0);
245 		if (ap->a_uio->uio_offset < 0)
246 			return (EINVAL);
247 		bsize = BLKDEV_IOSIZE;
248 		if ((*bdevsw[major(ap->a_vp->v_rdev)].d_ioctl)(ap->a_vp->v_rdev, DIOCGPART,
249 		    (caddr_t)&dpart, FREAD, p) == 0) {
250 			if (dpart.part->p_fstype == FS_BSDFFS &&
251 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
252 				bsize = dpart.part->p_frag *
253 				    dpart.part->p_fsize;
254 		}
255 		blkmask = (bsize / DEV_BSIZE) - 1;
256 		do {
257 			bn = (ap->a_uio->uio_offset / DEV_BSIZE) &~ blkmask;
258 			on = ap->a_uio->uio_offset % bsize;
259 			n = MIN((unsigned)(bsize - on), ap->a_uio->uio_resid);
260 			if (n == bsize)
261 				bp = getblk(ap->a_vp, bn, bsize);
262 			else
263 				error = bread(ap->a_vp, bn, bsize, NOCRED, &bp);
264 			n = MIN(n, bsize - bp->b_resid);
265 			if (error) {
266 				brelse(bp);
267 				return (error);
268 			}
269 			error = uiomove(bp->b_un.b_addr + on, n, ap->a_uio);
270 			if (n + on == bsize) {
271 				bp->b_flags |= B_AGE;
272 				bawrite(bp);
273 			} else
274 				bdwrite(bp);
275 		} while (error == 0 && ap->a_uio->uio_resid > 0 && n != 0);
276 		return (error);
277 
278 	default:
279 		panic("spec_write type");
280 	}
281 	/* NOTREACHED */
282 }
283 
284 /*
285  * Device ioctl operation.
286  */
287 /* ARGSUSED */
288 spec_ioctl (ap)
289 	struct vop_ioctl_args *ap;
290 {
291 	dev_t dev = ap->a_vp->v_rdev;
292 
293 	switch (ap->a_vp->v_type) {
294 
295 	case VCHR:
296 		return ((*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
297 		    ap->a_fflag, ap->a_p));
298 
299 	case VBLK:
300 		if (ap->a_command == 0 && (int)ap->a_data == B_TAPE)
301 			if (bdevsw[major(dev)].d_flags & B_TAPE)
302 				return (0);
303 			else
304 				return (1);
305 		return ((*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
306 		   ap->a_fflag, ap->a_p));
307 
308 	default:
309 		panic("spec_ioctl");
310 		/* NOTREACHED */
311 	}
312 }
313 
314 /* ARGSUSED */
315 spec_select (ap)
316 	struct vop_select_args *ap;
317 {
318 	register dev_t dev;
319 
320 	switch (ap->a_vp->v_type) {
321 
322 	default:
323 		return (1);		/* XXX */
324 
325 	case VCHR:
326 		dev = ap->a_vp->v_rdev;
327 		return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_p);
328 	}
329 }
330 
331 /*
332  * Just call the device strategy routine
333  */
334 spec_strategy (ap)
335 	struct vop_strategy_args *ap;
336 {
337 
338 	(*bdevsw[major(ap->a_bp->b_dev)].d_strategy)(ap->a_bp);
339 	return (0);
340 }
341 
342 /*
343  * This is a noop, simply returning what one has been given.
344  */
345 spec_bmap (ap)
346 	struct vop_bmap_args *ap;
347 {
348 
349 	if (ap->a_vpp != NULL)
350 		*ap->a_vpp = ap->a_vp;
351 	if (ap->a_bnp != NULL)
352 		*ap->a_bnp = ap->a_bn;
353 	return (0);
354 }
355 
356 /*
357  * At the moment we do not do any locking.
358  */
359 /* ARGSUSED */
360 spec_lock (ap)
361 	struct vop_lock_args *ap;
362 {
363 
364 	return (0);
365 }
366 
367 /* ARGSUSED */
368 spec_unlock (ap)
369 	struct vop_unlock_args *ap;
370 {
371 
372 	return (0);
373 }
374 
375 /*
376  * Device close routine
377  */
378 /* ARGSUSED */
379 spec_close (ap)
380 	struct vop_close_args *ap;
381 {
382 	dev_t dev = ap->a_vp->v_rdev;
383 	int (*devclose) __P((dev_t, int, int, struct proc *));
384 	int mode;
385 
386 	switch (ap->a_vp->v_type) {
387 
388 	case VCHR:
389 		/*
390 		 * If the vnode is locked, then we are in the midst
391 		 * of forcably closing the device, otherwise we only
392 		 * close on last reference.
393 		 */
394 		if (vcount(ap->a_vp) > 1 && (ap->a_vp->v_flag & VXLOCK) == 0)
395 			return (0);
396 		devclose = cdevsw[major(dev)].d_close;
397 		mode = S_IFCHR;
398 		break;
399 
400 	case VBLK:
401 		/*
402 		 * On last close of a block device (that isn't mounted)
403 		 * we must invalidate any in core blocks, so that
404 		 * we can, for instance, change floppy disks.
405 		 */
406 		vflushbuf(ap->a_vp, 0);
407 		if (vinvalbuf(ap->a_vp, 1))
408 			return (0);
409 		/*
410 		 * We do not want to really close the device if it
411 		 * is still in use unless we are trying to close it
412 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
413 		 * holds a reference to the vnode, and because we mark
414 		 * any other vnodes that alias this device, when the
415 		 * sum of the reference counts on all the aliased
416 		 * vnodes descends to one, we are on last close.
417 		 */
418 		if (vcount(ap->a_vp) > 1 && (ap->a_vp->v_flag & VXLOCK) == 0)
419 			return (0);
420 		devclose = bdevsw[major(dev)].d_close;
421 		mode = S_IFBLK;
422 		break;
423 
424 	default:
425 		panic("spec_close: not special");
426 	}
427 
428 	return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p));
429 }
430 
431 /*
432  * Print out the contents of a special device vnode.
433  */
434 spec_print (ap)
435 	struct vop_print_args *ap;
436 {
437 
438 	printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
439 		minor(ap->a_vp->v_rdev));
440 }
441 
442 /*
443  * Special device advisory byte-level locks.
444  */
445 /* ARGSUSED */
446 spec_advlock (ap)
447 	struct vop_advlock_args *ap;
448 {
449 
450 	return (EOPNOTSUPP);
451 }
452 
453 /*
454  * Special device failed operation
455  */
456 spec_ebadf()
457 {
458 
459 	return (EBADF);
460 }
461 
462 /*
463  * Special device bad operation
464  */
465 spec_badop()
466 {
467 
468 	panic("spec_badop called");
469 	/* NOTREACHED */
470 }
471