xref: /original-bsd/sys/miscfs/specfs/spec_vnops.c (revision 95a66346)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)spec_vnops.c	7.32 (Berkeley) 03/24/91
8  */
9 
10 #include "param.h"
11 #include "proc.h"
12 #include "systm.h"
13 #include "kernel.h"
14 #include "conf.h"
15 #include "buf.h"
16 #include "mount.h"
17 #include "vnode.h"
18 #include "specdev.h"
19 #include "stat.h"
20 #include "errno.h"
21 #include "ioctl.h"
22 #include "file.h"
23 #include "disklabel.h"
24 
25 /* symbolic sleep message strings for devices */
26 char	devopn[] = "devopn";
27 char	devio[] = "devio";
28 char	devwait[] = "devwait";
29 char	devin[] = "devin";
30 char	devout[] = "devout";
31 char	devioc[] = "devioc";
32 char	devcls[] = "devcls";
33 
34 int	spec_lookup(),
35 	spec_open(),
36 	spec_read(),
37 	spec_write(),
38 	spec_strategy(),
39 	spec_bmap(),
40 	spec_ioctl(),
41 	spec_select(),
42 	spec_lock(),
43 	spec_unlock(),
44 	spec_close(),
45 	spec_print(),
46 	spec_advlock(),
47 	spec_ebadf(),
48 	spec_badop();
49 
50 int	nullop();
51 
52 struct vnodeops spec_vnodeops = {
53 	spec_lookup,		/* lookup */
54 	spec_badop,		/* create */
55 	spec_badop,		/* mknod */
56 	spec_open,		/* open */
57 	spec_close,		/* close */
58 	spec_ebadf,		/* access */
59 	spec_ebadf,		/* getattr */
60 	spec_ebadf,		/* setattr */
61 	spec_read,		/* read */
62 	spec_write,		/* write */
63 	spec_ioctl,		/* ioctl */
64 	spec_select,		/* select */
65 	spec_badop,		/* mmap */
66 	nullop,			/* fsync */
67 	spec_badop,		/* seek */
68 	spec_badop,		/* remove */
69 	spec_badop,		/* link */
70 	spec_badop,		/* rename */
71 	spec_badop,		/* mkdir */
72 	spec_badop,		/* rmdir */
73 	spec_badop,		/* symlink */
74 	spec_badop,		/* readdir */
75 	spec_badop,		/* readlink */
76 	spec_badop,		/* abortop */
77 	nullop,			/* inactive */
78 	nullop,			/* reclaim */
79 	spec_lock,		/* lock */
80 	spec_unlock,		/* unlock */
81 	spec_bmap,		/* bmap */
82 	spec_strategy,		/* strategy */
83 	spec_print,		/* print */
84 	nullop,			/* islocked */
85 	spec_advlock,		/* advlock */
86 };
87 
88 /*
89  * Trivial lookup routine that always fails.
90  */
91 spec_lookup(vp, ndp)
92 	struct vnode *vp;
93 	struct nameidata *ndp;
94 {
95 
96 	ndp->ni_dvp = vp;
97 	ndp->ni_vp = NULL;
98 	return (ENOTDIR);
99 }
100 
101 /*
102  * Open called to allow handler
103  * of special files to initialize and
104  * validate before actual IO.
105  */
106 /* ARGSUSED */
107 spec_open(vp, mode, cred)
108 	register struct vnode *vp;
109 	int mode;
110 	struct ucred *cred;
111 {
112 	struct proc *p = curproc;		/* XXX */
113 	dev_t dev = (dev_t)vp->v_rdev;
114 	register int maj = major(dev);
115 	int error;
116 
117 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
118 		return (ENXIO);
119 
120 	switch (vp->v_type) {
121 
122 	case VCHR:
123 		if ((u_int)maj >= nchrdev)
124 			return (ENXIO);
125 		return ((*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p));
126 
127 	case VBLK:
128 		if ((u_int)maj >= nblkdev)
129 			return (ENXIO);
130 		if (error = mountedon(vp))
131 			return (error);
132 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
133 	}
134 	return (0);
135 }
136 
137 /*
138  * Vnode op for read
139  */
140 /* ARGSUSED */
141 spec_read(vp, uio, ioflag, cred)
142 	register struct vnode *vp;
143 	register struct uio *uio;
144 	int ioflag;
145 	struct ucred *cred;
146 {
147 	struct proc *p = curproc;		/* XXX */
148 	struct buf *bp;
149 	daddr_t bn;
150 	long bsize, bscale;
151 	struct partinfo dpart;
152 	register int n, on;
153 	int error = 0;
154 	extern int mem_no;
155 
156 	if (uio->uio_rw != UIO_READ)
157 		panic("spec_read mode");
158 	if (uio->uio_resid == 0)
159 		return (0);
160 
161 	switch (vp->v_type) {
162 
163 	case VCHR:
164 		/*
165 		 * Negative offsets allowed only for /dev/kmem
166 		 */
167 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
168 			return (EINVAL);
169 		VOP_UNLOCK(vp);
170 		error = (*cdevsw[major(vp->v_rdev)].d_read)
171 			(vp->v_rdev, uio, ioflag, p);
172 		VOP_LOCK(vp);
173 		return (error);
174 
175 	case VBLK:
176 		if (uio->uio_offset < 0)
177 			return (EINVAL);
178 		bsize = BLKDEV_IOSIZE;
179 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
180 		    (caddr_t)&dpart, FREAD, p) == 0) {
181 			if (dpart.part->p_fstype == FS_BSDFFS &&
182 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
183 				bsize = dpart.part->p_frag *
184 				    dpart.part->p_fsize;
185 		}
186 		bscale = bsize / DEV_BSIZE;
187 		do {
188 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
189 			on = uio->uio_offset % bsize;
190 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
191 			if (vp->v_lastr + bscale == bn)
192 				error = breada(vp, bn, (int)bsize, bn + bscale,
193 					(int)bsize, NOCRED, &bp);
194 			else
195 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
196 			vp->v_lastr = bn;
197 			n = MIN(n, bsize - bp->b_resid);
198 			if (error) {
199 				brelse(bp);
200 				return (error);
201 			}
202 			error = uiomove(bp->b_un.b_addr + on, n, uio);
203 			if (n + on == bsize)
204 				bp->b_flags |= B_AGE;
205 			brelse(bp);
206 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
207 		return (error);
208 
209 	default:
210 		panic("spec_read type");
211 	}
212 	/* NOTREACHED */
213 }
214 
215 /*
216  * Vnode op for write
217  */
218 /* ARGSUSED */
219 spec_write(vp, uio, ioflag, cred)
220 	register struct vnode *vp;
221 	register struct uio *uio;
222 	int ioflag;
223 	struct ucred *cred;
224 {
225 	struct proc *p = curproc;		/* XXX */
226 	struct buf *bp;
227 	daddr_t bn;
228 	int bsize, blkmask;
229 	struct partinfo dpart;
230 	register int n, on;
231 	int error = 0;
232 	extern int mem_no;
233 
234 	if (uio->uio_rw != UIO_WRITE)
235 		panic("spec_write mode");
236 
237 	switch (vp->v_type) {
238 
239 	case VCHR:
240 		/*
241 		 * Negative offsets allowed only for /dev/kmem
242 		 */
243 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
244 			return (EINVAL);
245 		VOP_UNLOCK(vp);
246 		error = (*cdevsw[major(vp->v_rdev)].d_write)
247 			(vp->v_rdev, uio, ioflag, p);
248 		VOP_LOCK(vp);
249 		return (error);
250 
251 	case VBLK:
252 		if (uio->uio_resid == 0)
253 			return (0);
254 		if (uio->uio_offset < 0)
255 			return (EINVAL);
256 		bsize = BLKDEV_IOSIZE;
257 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
258 		    (caddr_t)&dpart, FREAD, p) == 0) {
259 			if (dpart.part->p_fstype == FS_BSDFFS &&
260 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
261 				bsize = dpart.part->p_frag *
262 				    dpart.part->p_fsize;
263 		}
264 		blkmask = (bsize / DEV_BSIZE) - 1;
265 		do {
266 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
267 			on = uio->uio_offset % bsize;
268 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
269 			if (n == bsize)
270 				bp = getblk(vp, bn, bsize);
271 			else
272 				error = bread(vp, bn, bsize, NOCRED, &bp);
273 			n = MIN(n, bsize - bp->b_resid);
274 			if (error) {
275 				brelse(bp);
276 				return (error);
277 			}
278 			error = uiomove(bp->b_un.b_addr + on, n, uio);
279 			if (n + on == bsize) {
280 				bp->b_flags |= B_AGE;
281 				bawrite(bp);
282 			} else
283 				bdwrite(bp);
284 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
285 		return (error);
286 
287 	default:
288 		panic("spec_write type");
289 	}
290 	/* NOTREACHED */
291 }
292 
293 /*
294  * Device ioctl operation.
295  */
296 /* ARGSUSED */
297 spec_ioctl(vp, com, data, fflag, cred)
298 	struct vnode *vp;
299 	int com;
300 	caddr_t data;
301 	int fflag;
302 	struct ucred *cred;
303 {
304 	struct proc *p = curproc;		/* XXX */
305 	dev_t dev = vp->v_rdev;
306 
307 	switch (vp->v_type) {
308 
309 	case VCHR:
310 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
311 		    fflag, p));
312 
313 	case VBLK:
314 		if (com == 0 && (int)data == B_TAPE)
315 			if (bdevsw[major(dev)].d_flags & B_TAPE)
316 				return (0);
317 			else
318 				return (1);
319 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
320 		   fflag, p));
321 
322 	default:
323 		panic("spec_ioctl");
324 		/* NOTREACHED */
325 	}
326 }
327 
328 /* ARGSUSED */
329 spec_select(vp, which, fflags, cred)
330 	struct vnode *vp;
331 	int which, fflags;
332 	struct ucred *cred;
333 {
334 	struct proc *p = curproc;		/* XXX */
335 	register dev_t dev;
336 
337 	switch (vp->v_type) {
338 
339 	default:
340 		return (1);		/* XXX */
341 
342 	case VCHR:
343 		dev = vp->v_rdev;
344 		return (*cdevsw[major(dev)].d_select)(dev, which, p);
345 	}
346 }
347 
348 /*
349  * Just call the device strategy routine
350  */
351 spec_strategy(bp)
352 	register struct buf *bp;
353 {
354 
355 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
356 	return (0);
357 }
358 
359 /*
360  * This is a noop, simply returning what one has been given.
361  */
362 spec_bmap(vp, bn, vpp, bnp)
363 	struct vnode *vp;
364 	daddr_t bn;
365 	struct vnode **vpp;
366 	daddr_t *bnp;
367 {
368 
369 	if (vpp != NULL)
370 		*vpp = vp;
371 	if (bnp != NULL)
372 		*bnp = bn;
373 	return (0);
374 }
375 
376 /*
377  * At the moment we do not do any locking.
378  */
379 /* ARGSUSED */
380 spec_lock(vp)
381 	struct vnode *vp;
382 {
383 
384 	return (0);
385 }
386 
387 /* ARGSUSED */
388 spec_unlock(vp)
389 	struct vnode *vp;
390 {
391 
392 	return (0);
393 }
394 
395 /*
396  * Device close routine
397  */
398 /* ARGSUSED */
399 spec_close(vp, flag, cred)
400 	register struct vnode *vp;
401 	int flag;
402 	struct ucred *cred;
403 {
404 	struct proc *p = curproc;		/* XXX */
405 	dev_t dev = vp->v_rdev;
406 	int (*cfunc)();
407 	int mode;
408 
409 	switch (vp->v_type) {
410 
411 	case VCHR:
412 		/*
413 		 * If the vnode is locked, then we are in the midst
414 		 * of forcably closing the device, otherwise we only
415 		 * close on last reference.
416 		 */
417 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
418 			return (0);
419 		cfunc = cdevsw[major(dev)].d_close;
420 		mode = S_IFCHR;
421 		break;
422 
423 	case VBLK:
424 		/*
425 		 * On last close of a block device (that isn't mounted)
426 		 * we must invalidate any in core blocks, so that
427 		 * we can, for instance, change floppy disks.
428 		 */
429 		vflushbuf(vp, 0);
430 		if (vinvalbuf(vp, 1))
431 			return (0);
432 		/*
433 		 * We do not want to really close the device if it
434 		 * is still in use unless we are trying to close it
435 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
436 		 * holds a reference to the vnode, and because we mark
437 		 * any other vnodes that alias this device, when the
438 		 * sum of the reference counts on all the aliased
439 		 * vnodes descends to one, we are on last close.
440 		 */
441 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
442 			return (0);
443 		cfunc = bdevsw[major(dev)].d_close;
444 		mode = S_IFBLK;
445 		break;
446 
447 	default:
448 		panic("spec_close: not special");
449 	}
450 
451 	return ((*cfunc)(dev, flag, mode, p));
452 }
453 
454 /*
455  * Print out the contents of a special device vnode.
456  */
457 spec_print(vp)
458 	struct vnode *vp;
459 {
460 
461 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
462 		minor(vp->v_rdev));
463 }
464 
465 /*
466  * Special device advisory byte-level locks.
467  */
468 spec_advlock(vp, id, op, fl, flags)
469 	struct vnode *vp;
470 	caddr_t id;
471 	int op;
472 	struct flock *fl;
473 	int flags;
474 {
475 
476 	return (EOPNOTSUPP);
477 }
478 
479 /*
480  * Special device failed operation
481  */
482 spec_ebadf()
483 {
484 
485 	return (EBADF);
486 }
487 
488 /*
489  * Special device bad operation
490  */
491 spec_badop()
492 {
493 
494 	panic("spec_badop called");
495 	/* NOTREACHED */
496 }
497