xref: /original-bsd/sys/miscfs/specfs/spec_vnops.c (revision c5b4fb6e)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)spec_vnops.c	7.37 (Berkeley) 05/30/91
8  */
9 
10 #include "param.h"
11 #include "proc.h"
12 #include "systm.h"
13 #include "kernel.h"
14 #include "conf.h"
15 #include "buf.h"
16 #include "mount.h"
17 #include "namei.h"
18 #include "vnode.h"
19 #include "specdev.h"
20 #include "stat.h"
21 #include "errno.h"
22 #include "ioctl.h"
23 #include "file.h"
24 #include "disklabel.h"
25 
26 /* symbolic sleep message strings for devices */
27 char	devopn[] = "devopn";
28 char	devio[] = "devio";
29 char	devwait[] = "devwait";
30 char	devin[] = "devin";
31 char	devout[] = "devout";
32 char	devioc[] = "devioc";
33 char	devcls[] = "devcls";
34 
35 struct vnodeops spec_vnodeops = {
36 	spec_lookup,		/* lookup */
37 	spec_create,		/* create */
38 	spec_mknod,		/* mknod */
39 	spec_open,		/* open */
40 	spec_close,		/* close */
41 	spec_access,		/* access */
42 	spec_getattr,		/* getattr */
43 	spec_setattr,		/* setattr */
44 	spec_read,		/* read */
45 	spec_write,		/* write */
46 	spec_ioctl,		/* ioctl */
47 	spec_select,		/* select */
48 	spec_mmap,		/* mmap */
49 	spec_fsync,		/* fsync */
50 	spec_seek,		/* seek */
51 	spec_remove,		/* remove */
52 	spec_link,		/* link */
53 	spec_rename,		/* rename */
54 	spec_mkdir,		/* mkdir */
55 	spec_rmdir,		/* rmdir */
56 	spec_symlink,		/* symlink */
57 	spec_readdir,		/* readdir */
58 	spec_readlink,		/* readlink */
59 	spec_abortop,		/* abortop */
60 	spec_inactive,		/* inactive */
61 	spec_reclaim,		/* reclaim */
62 	spec_lock,		/* lock */
63 	spec_unlock,		/* unlock */
64 	spec_bmap,		/* bmap */
65 	spec_strategy,		/* strategy */
66 	spec_print,		/* print */
67 	spec_islocked,		/* islocked */
68 	spec_advlock,		/* advlock */
69 };
70 
71 /*
72  * Trivial lookup routine that always fails.
73  */
74 spec_lookup(vp, ndp, p)
75 	struct vnode *vp;
76 	struct nameidata *ndp;
77 	struct proc *p;
78 {
79 
80 	ndp->ni_dvp = vp;
81 	ndp->ni_vp = NULL;
82 	return (ENOTDIR);
83 }
84 
85 /*
86  * Open a special file: Don't allow open if fs is mounted -nodev,
87  * and don't allow opens of block devices that are currently mounted.
88  * Otherwise, call device driver open function.
89  */
90 /* ARGSUSED */
91 spec_open(vp, mode, cred, p)
92 	register struct vnode *vp;
93 	int mode;
94 	struct ucred *cred;
95 	struct proc *p;
96 {
97 	dev_t dev = (dev_t)vp->v_rdev;
98 	register int maj = major(dev);
99 	int error;
100 
101 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
102 		return (ENXIO);
103 
104 	switch (vp->v_type) {
105 
106 	case VCHR:
107 		if ((u_int)maj >= nchrdev)
108 			return (ENXIO);
109 		VOP_UNLOCK(vp);
110 		error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p);
111 		VOP_LOCK(vp);
112 		return (error);
113 
114 	case VBLK:
115 		if ((u_int)maj >= nblkdev)
116 			return (ENXIO);
117 		if (error = mountedon(vp))
118 			return (error);
119 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
120 	}
121 	return (0);
122 }
123 
124 /*
125  * Vnode op for read
126  */
127 /* ARGSUSED */
128 spec_read(vp, uio, ioflag, cred)
129 	register struct vnode *vp;
130 	register struct uio *uio;
131 	int ioflag;
132 	struct ucred *cred;
133 {
134 	struct proc *p = uio->uio_procp;
135 	struct buf *bp;
136 	daddr_t bn;
137 	long bsize, bscale;
138 	struct partinfo dpart;
139 	register int n, on;
140 	int error = 0;
141 	extern int mem_no;
142 
143 #ifdef DIAGNOSTIC
144 	if (uio->uio_rw != UIO_READ)
145 		panic("spec_read mode");
146 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
147 		panic("spec_read proc");
148 #endif
149 	if (uio->uio_resid == 0)
150 		return (0);
151 
152 	switch (vp->v_type) {
153 
154 	case VCHR:
155 		/*
156 		 * Negative offsets allowed only for /dev/kmem
157 		 */
158 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
159 			return (EINVAL);
160 		VOP_UNLOCK(vp);
161 		error = (*cdevsw[major(vp->v_rdev)].d_read)
162 			(vp->v_rdev, uio, ioflag);
163 		VOP_LOCK(vp);
164 		return (error);
165 
166 	case VBLK:
167 		if (uio->uio_offset < 0)
168 			return (EINVAL);
169 		bsize = BLKDEV_IOSIZE;
170 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
171 		    (caddr_t)&dpart, FREAD, p) == 0) {
172 			if (dpart.part->p_fstype == FS_BSDFFS &&
173 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
174 				bsize = dpart.part->p_frag *
175 				    dpart.part->p_fsize;
176 		}
177 		bscale = bsize / DEV_BSIZE;
178 		do {
179 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
180 			on = uio->uio_offset % bsize;
181 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
182 			if (vp->v_lastr + bscale == bn)
183 				error = breada(vp, bn, (int)bsize, bn + bscale,
184 					(int)bsize, NOCRED, &bp);
185 			else
186 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
187 			vp->v_lastr = bn;
188 			n = MIN(n, bsize - bp->b_resid);
189 			if (error) {
190 				brelse(bp);
191 				return (error);
192 			}
193 			error = uiomove(bp->b_un.b_addr + on, n, uio);
194 			if (n + on == bsize)
195 				bp->b_flags |= B_AGE;
196 			brelse(bp);
197 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
198 		return (error);
199 
200 	default:
201 		panic("spec_read type");
202 	}
203 	/* NOTREACHED */
204 }
205 
206 /*
207  * Vnode op for write
208  */
209 /* ARGSUSED */
210 spec_write(vp, uio, ioflag, cred)
211 	register struct vnode *vp;
212 	register struct uio *uio;
213 	int ioflag;
214 	struct ucred *cred;
215 {
216 	struct proc *p = uio->uio_procp;
217 	struct buf *bp;
218 	daddr_t bn;
219 	int bsize, blkmask;
220 	struct partinfo dpart;
221 	register int n, on;
222 	int error = 0;
223 	extern int mem_no;
224 
225 #ifdef DIAGNOSTIC
226 	if (uio->uio_rw != UIO_WRITE)
227 		panic("spec_write mode");
228 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
229 		panic("spec_write proc");
230 #endif
231 
232 	switch (vp->v_type) {
233 
234 	case VCHR:
235 		/*
236 		 * Negative offsets allowed only for /dev/kmem
237 		 */
238 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
239 			return (EINVAL);
240 		VOP_UNLOCK(vp);
241 		error = (*cdevsw[major(vp->v_rdev)].d_write)
242 			(vp->v_rdev, uio, ioflag);
243 		VOP_LOCK(vp);
244 		return (error);
245 
246 	case VBLK:
247 		if (uio->uio_resid == 0)
248 			return (0);
249 		if (uio->uio_offset < 0)
250 			return (EINVAL);
251 		bsize = BLKDEV_IOSIZE;
252 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
253 		    (caddr_t)&dpart, FREAD, p) == 0) {
254 			if (dpart.part->p_fstype == FS_BSDFFS &&
255 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
256 				bsize = dpart.part->p_frag *
257 				    dpart.part->p_fsize;
258 		}
259 		blkmask = (bsize / DEV_BSIZE) - 1;
260 		do {
261 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
262 			on = uio->uio_offset % bsize;
263 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
264 			if (n == bsize)
265 				bp = getblk(vp, bn, bsize);
266 			else
267 				error = bread(vp, bn, bsize, NOCRED, &bp);
268 			n = MIN(n, bsize - bp->b_resid);
269 			if (error) {
270 				brelse(bp);
271 				return (error);
272 			}
273 			error = uiomove(bp->b_un.b_addr + on, n, uio);
274 			if (n + on == bsize) {
275 				bp->b_flags |= B_AGE;
276 				bawrite(bp);
277 			} else
278 				bdwrite(bp);
279 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
280 		return (error);
281 
282 	default:
283 		panic("spec_write type");
284 	}
285 	/* NOTREACHED */
286 }
287 
288 /*
289  * Device ioctl operation.
290  */
291 /* ARGSUSED */
292 spec_ioctl(vp, com, data, fflag, cred, p)
293 	struct vnode *vp;
294 	int com;
295 	caddr_t data;
296 	int fflag;
297 	struct ucred *cred;
298 	struct proc *p;
299 {
300 	dev_t dev = vp->v_rdev;
301 
302 	switch (vp->v_type) {
303 
304 	case VCHR:
305 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
306 		    fflag, p));
307 
308 	case VBLK:
309 		if (com == 0 && (int)data == B_TAPE)
310 			if (bdevsw[major(dev)].d_flags & B_TAPE)
311 				return (0);
312 			else
313 				return (1);
314 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
315 		   fflag, p));
316 
317 	default:
318 		panic("spec_ioctl");
319 		/* NOTREACHED */
320 	}
321 }
322 
323 /* ARGSUSED */
324 spec_select(vp, which, fflags, cred, p)
325 	struct vnode *vp;
326 	int which, fflags;
327 	struct ucred *cred;
328 	struct proc *p;
329 {
330 	register dev_t dev;
331 
332 	switch (vp->v_type) {
333 
334 	default:
335 		return (1);		/* XXX */
336 
337 	case VCHR:
338 		dev = vp->v_rdev;
339 		return (*cdevsw[major(dev)].d_select)(dev, which, p);
340 	}
341 }
342 
343 /*
344  * Just call the device strategy routine
345  */
346 spec_strategy(bp)
347 	register struct buf *bp;
348 {
349 
350 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
351 	return (0);
352 }
353 
354 /*
355  * This is a noop, simply returning what one has been given.
356  */
357 spec_bmap(vp, bn, vpp, bnp)
358 	struct vnode *vp;
359 	daddr_t bn;
360 	struct vnode **vpp;
361 	daddr_t *bnp;
362 {
363 
364 	if (vpp != NULL)
365 		*vpp = vp;
366 	if (bnp != NULL)
367 		*bnp = bn;
368 	return (0);
369 }
370 
371 /*
372  * At the moment we do not do any locking.
373  */
374 /* ARGSUSED */
375 spec_lock(vp)
376 	struct vnode *vp;
377 {
378 
379 	return (0);
380 }
381 
382 /* ARGSUSED */
383 spec_unlock(vp)
384 	struct vnode *vp;
385 {
386 
387 	return (0);
388 }
389 
390 /*
391  * Device close routine
392  */
393 /* ARGSUSED */
394 spec_close(vp, flag, cred, p)
395 	register struct vnode *vp;
396 	int flag;
397 	struct ucred *cred;
398 	struct proc *p;
399 {
400 	dev_t dev = vp->v_rdev;
401 	int (*devclose) __P((dev_t, int, int, struct proc *));
402 	int mode;
403 
404 	switch (vp->v_type) {
405 
406 	case VCHR:
407 		/*
408 		 * If the vnode is locked, then we are in the midst
409 		 * of forcably closing the device, otherwise we only
410 		 * close on last reference.
411 		 */
412 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
413 			return (0);
414 		devclose = cdevsw[major(dev)].d_close;
415 		mode = S_IFCHR;
416 		break;
417 
418 	case VBLK:
419 		/*
420 		 * On last close of a block device (that isn't mounted)
421 		 * we must invalidate any in core blocks, so that
422 		 * we can, for instance, change floppy disks.
423 		 */
424 		vflushbuf(vp, 0);
425 		if (vinvalbuf(vp, 1))
426 			return (0);
427 		/*
428 		 * We do not want to really close the device if it
429 		 * is still in use unless we are trying to close it
430 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
431 		 * holds a reference to the vnode, and because we mark
432 		 * any other vnodes that alias this device, when the
433 		 * sum of the reference counts on all the aliased
434 		 * vnodes descends to one, we are on last close.
435 		 */
436 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
437 			return (0);
438 		devclose = bdevsw[major(dev)].d_close;
439 		mode = S_IFBLK;
440 		break;
441 
442 	default:
443 		panic("spec_close: not special");
444 	}
445 
446 	return ((*devclose)(dev, flag, mode, p));
447 }
448 
449 /*
450  * Print out the contents of a special device vnode.
451  */
452 spec_print(vp)
453 	struct vnode *vp;
454 {
455 
456 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
457 		minor(vp->v_rdev));
458 }
459 
460 /*
461  * Special device advisory byte-level locks.
462  */
463 /* ARGSUSED */
464 spec_advlock(vp, id, op, fl, flags)
465 	struct vnode *vp;
466 	caddr_t id;
467 	int op;
468 	struct flock *fl;
469 	int flags;
470 {
471 
472 	return (EOPNOTSUPP);
473 }
474 
475 /*
476  * Special device failed operation
477  */
478 spec_ebadf()
479 {
480 
481 	return (EBADF);
482 }
483 
484 /*
485  * Special device bad operation
486  */
487 spec_badop()
488 {
489 
490 	panic("spec_badop called");
491 	/* NOTREACHED */
492 }
493