xref: /original-bsd/sys/miscfs/specfs/spec_vnops.c (revision ba762ddc)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)spec_vnops.c	7.34 (Berkeley) 04/15/91
8  */
9 
10 #include "param.h"
11 #include "proc.h"
12 #include "systm.h"
13 #include "kernel.h"
14 #include "conf.h"
15 #include "buf.h"
16 #include "mount.h"
17 #include "namei.h"
18 #include "vnode.h"
19 #include "specdev.h"
20 #include "stat.h"
21 #include "errno.h"
22 #include "ioctl.h"
23 #include "file.h"
24 #include "disklabel.h"
25 
26 /* symbolic sleep message strings for devices */
27 char	devopn[] = "devopn";
28 char	devio[] = "devio";
29 char	devwait[] = "devwait";
30 char	devin[] = "devin";
31 char	devout[] = "devout";
32 char	devioc[] = "devioc";
33 char	devcls[] = "devcls";
34 
35 struct vnodeops spec_vnodeops = {
36 	spec_lookup,		/* lookup */
37 	spec_create,		/* create */
38 	spec_mknod,		/* mknod */
39 	spec_open,		/* open */
40 	spec_close,		/* close */
41 	spec_access,		/* access */
42 	spec_getattr,		/* getattr */
43 	spec_setattr,		/* setattr */
44 	spec_read,		/* read */
45 	spec_write,		/* write */
46 	spec_ioctl,		/* ioctl */
47 	spec_select,		/* select */
48 	spec_mmap,		/* mmap */
49 	spec_fsync,		/* fsync */
50 	spec_seek,		/* seek */
51 	spec_remove,		/* remove */
52 	spec_link,		/* link */
53 	spec_rename,		/* rename */
54 	spec_mkdir,		/* mkdir */
55 	spec_rmdir,		/* rmdir */
56 	spec_symlink,		/* symlink */
57 	spec_readdir,		/* readdir */
58 	spec_readlink,		/* readlink */
59 	spec_abortop,		/* abortop */
60 	spec_inactive,		/* inactive */
61 	spec_reclaim,		/* reclaim */
62 	spec_lock,		/* lock */
63 	spec_unlock,		/* unlock */
64 	spec_bmap,		/* bmap */
65 	spec_strategy,		/* strategy */
66 	spec_print,		/* print */
67 	spec_islocked,		/* islocked */
68 	spec_advlock,		/* advlock */
69 };
70 
71 /*
72  * Trivial lookup routine that always fails.
73  */
74 spec_lookup(vp, ndp, p)
75 	struct vnode *vp;
76 	struct nameidata *ndp;
77 	struct proc *p;
78 {
79 
80 	ndp->ni_dvp = vp;
81 	ndp->ni_vp = NULL;
82 	return (ENOTDIR);
83 }
84 
85 /*
86  * Open called to allow handler
87  * of special files to initialize and
88  * validate before actual IO.
89  */
90 /* ARGSUSED */
91 spec_open(vp, mode, cred, p)
92 	register struct vnode *vp;
93 	int mode;
94 	struct ucred *cred;
95 	struct proc *p;
96 {
97 	dev_t dev = (dev_t)vp->v_rdev;
98 	register int maj = major(dev);
99 	int error;
100 
101 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
102 		return (ENXIO);
103 
104 	switch (vp->v_type) {
105 
106 	case VCHR:
107 		if ((u_int)maj >= nchrdev)
108 			return (ENXIO);
109 		return ((*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p));
110 
111 	case VBLK:
112 		if ((u_int)maj >= nblkdev)
113 			return (ENXIO);
114 		if (error = mountedon(vp))
115 			return (error);
116 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p));
117 	}
118 	return (0);
119 }
120 
121 /*
122  * Vnode op for read
123  */
124 /* ARGSUSED */
125 spec_read(vp, uio, ioflag, cred)
126 	register struct vnode *vp;
127 	register struct uio *uio;
128 	int ioflag;
129 	struct ucred *cred;
130 {
131 	struct proc *p = uio->uio_procp;
132 	struct buf *bp;
133 	daddr_t bn;
134 	long bsize, bscale;
135 	struct partinfo dpart;
136 	register int n, on;
137 	int error = 0;
138 	extern int mem_no;
139 
140 #ifdef DIAGNOSTIC
141 	if (uio->uio_rw != UIO_READ)
142 		panic("spec_read mode");
143 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
144 		panic("spec_read proc");
145 #endif
146 	if (uio->uio_resid == 0)
147 		return (0);
148 
149 	switch (vp->v_type) {
150 
151 	case VCHR:
152 		/*
153 		 * Negative offsets allowed only for /dev/kmem
154 		 */
155 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
156 			return (EINVAL);
157 		VOP_UNLOCK(vp);
158 		error = (*cdevsw[major(vp->v_rdev)].d_read)
159 			(vp->v_rdev, uio, ioflag, p);
160 		VOP_LOCK(vp);
161 		return (error);
162 
163 	case VBLK:
164 		if (uio->uio_offset < 0)
165 			return (EINVAL);
166 		bsize = BLKDEV_IOSIZE;
167 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
168 		    (caddr_t)&dpart, FREAD, p) == 0) {
169 			if (dpart.part->p_fstype == FS_BSDFFS &&
170 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
171 				bsize = dpart.part->p_frag *
172 				    dpart.part->p_fsize;
173 		}
174 		bscale = bsize / DEV_BSIZE;
175 		do {
176 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
177 			on = uio->uio_offset % bsize;
178 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
179 			if (vp->v_lastr + bscale == bn)
180 				error = breada(vp, bn, (int)bsize, bn + bscale,
181 					(int)bsize, NOCRED, &bp);
182 			else
183 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
184 			vp->v_lastr = bn;
185 			n = MIN(n, bsize - bp->b_resid);
186 			if (error) {
187 				brelse(bp);
188 				return (error);
189 			}
190 			error = uiomove(bp->b_un.b_addr + on, n, uio);
191 			if (n + on == bsize)
192 				bp->b_flags |= B_AGE;
193 			brelse(bp);
194 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
195 		return (error);
196 
197 	default:
198 		panic("spec_read type");
199 	}
200 	/* NOTREACHED */
201 }
202 
203 /*
204  * Vnode op for write
205  */
206 /* ARGSUSED */
207 spec_write(vp, uio, ioflag, cred)
208 	register struct vnode *vp;
209 	register struct uio *uio;
210 	int ioflag;
211 	struct ucred *cred;
212 {
213 	struct proc *p = uio->uio_procp;
214 	struct buf *bp;
215 	daddr_t bn;
216 	int bsize, blkmask;
217 	struct partinfo dpart;
218 	register int n, on;
219 	int error = 0;
220 	extern int mem_no;
221 
222 #ifdef DIAGNOSTIC
223 	if (uio->uio_rw != UIO_WRITE)
224 		panic("spec_write mode");
225 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
226 		panic("spec_write proc");
227 #endif
228 
229 	switch (vp->v_type) {
230 
231 	case VCHR:
232 		/*
233 		 * Negative offsets allowed only for /dev/kmem
234 		 */
235 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
236 			return (EINVAL);
237 		VOP_UNLOCK(vp);
238 		error = (*cdevsw[major(vp->v_rdev)].d_write)
239 			(vp->v_rdev, uio, ioflag, p);
240 		VOP_LOCK(vp);
241 		return (error);
242 
243 	case VBLK:
244 		if (uio->uio_resid == 0)
245 			return (0);
246 		if (uio->uio_offset < 0)
247 			return (EINVAL);
248 		bsize = BLKDEV_IOSIZE;
249 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
250 		    (caddr_t)&dpart, FREAD, p) == 0) {
251 			if (dpart.part->p_fstype == FS_BSDFFS &&
252 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
253 				bsize = dpart.part->p_frag *
254 				    dpart.part->p_fsize;
255 		}
256 		blkmask = (bsize / DEV_BSIZE) - 1;
257 		do {
258 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
259 			on = uio->uio_offset % bsize;
260 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
261 			if (n == bsize)
262 				bp = getblk(vp, bn, bsize);
263 			else
264 				error = bread(vp, bn, bsize, NOCRED, &bp);
265 			n = MIN(n, bsize - bp->b_resid);
266 			if (error) {
267 				brelse(bp);
268 				return (error);
269 			}
270 			error = uiomove(bp->b_un.b_addr + on, n, uio);
271 			if (n + on == bsize) {
272 				bp->b_flags |= B_AGE;
273 				bawrite(bp);
274 			} else
275 				bdwrite(bp);
276 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
277 		return (error);
278 
279 	default:
280 		panic("spec_write type");
281 	}
282 	/* NOTREACHED */
283 }
284 
285 /*
286  * Device ioctl operation.
287  */
288 /* ARGSUSED */
289 spec_ioctl(vp, com, data, fflag, cred, p)
290 	struct vnode *vp;
291 	int com;
292 	caddr_t data;
293 	int fflag;
294 	struct ucred *cred;
295 	struct proc *p;
296 {
297 	dev_t dev = vp->v_rdev;
298 
299 	switch (vp->v_type) {
300 
301 	case VCHR:
302 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data,
303 		    fflag, p));
304 
305 	case VBLK:
306 		if (com == 0 && (int)data == B_TAPE)
307 			if (bdevsw[major(dev)].d_flags & B_TAPE)
308 				return (0);
309 			else
310 				return (1);
311 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data,
312 		   fflag, p));
313 
314 	default:
315 		panic("spec_ioctl");
316 		/* NOTREACHED */
317 	}
318 }
319 
320 /* ARGSUSED */
321 spec_select(vp, which, fflags, cred, p)
322 	struct vnode *vp;
323 	int which, fflags;
324 	struct ucred *cred;
325 	struct proc *p;
326 {
327 	register dev_t dev;
328 
329 	switch (vp->v_type) {
330 
331 	default:
332 		return (1);		/* XXX */
333 
334 	case VCHR:
335 		dev = vp->v_rdev;
336 		return (*cdevsw[major(dev)].d_select)(dev, which, p);
337 	}
338 }
339 
340 /*
341  * Just call the device strategy routine
342  */
343 spec_strategy(bp)
344 	register struct buf *bp;
345 {
346 
347 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
348 	return (0);
349 }
350 
351 /*
352  * This is a noop, simply returning what one has been given.
353  */
354 spec_bmap(vp, bn, vpp, bnp)
355 	struct vnode *vp;
356 	daddr_t bn;
357 	struct vnode **vpp;
358 	daddr_t *bnp;
359 {
360 
361 	if (vpp != NULL)
362 		*vpp = vp;
363 	if (bnp != NULL)
364 		*bnp = bn;
365 	return (0);
366 }
367 
368 /*
369  * At the moment we do not do any locking.
370  */
371 /* ARGSUSED */
372 spec_lock(vp)
373 	struct vnode *vp;
374 {
375 
376 	return (0);
377 }
378 
379 /* ARGSUSED */
380 spec_unlock(vp)
381 	struct vnode *vp;
382 {
383 
384 	return (0);
385 }
386 
387 /*
388  * Device close routine
389  */
390 /* ARGSUSED */
391 spec_close(vp, flag, cred, p)
392 	register struct vnode *vp;
393 	int flag;
394 	struct ucred *cred;
395 	struct proc *p;
396 {
397 	dev_t dev = vp->v_rdev;
398 	int (*cfunc) __P((dev_t, int, int, struct proc *));
399 	int mode;
400 
401 	switch (vp->v_type) {
402 
403 	case VCHR:
404 		/*
405 		 * If the vnode is locked, then we are in the midst
406 		 * of forcably closing the device, otherwise we only
407 		 * close on last reference.
408 		 */
409 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
410 			return (0);
411 		cfunc = cdevsw[major(dev)].d_close;
412 		mode = S_IFCHR;
413 		break;
414 
415 	case VBLK:
416 		/*
417 		 * On last close of a block device (that isn't mounted)
418 		 * we must invalidate any in core blocks, so that
419 		 * we can, for instance, change floppy disks.
420 		 */
421 		vflushbuf(vp, 0);
422 		if (vinvalbuf(vp, 1))
423 			return (0);
424 		/*
425 		 * We do not want to really close the device if it
426 		 * is still in use unless we are trying to close it
427 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
428 		 * holds a reference to the vnode, and because we mark
429 		 * any other vnodes that alias this device, when the
430 		 * sum of the reference counts on all the aliased
431 		 * vnodes descends to one, we are on last close.
432 		 */
433 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
434 			return (0);
435 		cfunc = bdevsw[major(dev)].d_close;
436 		mode = S_IFBLK;
437 		break;
438 
439 	default:
440 		panic("spec_close: not special");
441 	}
442 
443 	return ((*cfunc)(dev, flag, mode, p));
444 }
445 
446 /*
447  * Print out the contents of a special device vnode.
448  */
449 spec_print(vp)
450 	struct vnode *vp;
451 {
452 
453 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
454 		minor(vp->v_rdev));
455 }
456 
457 /*
458  * Special device advisory byte-level locks.
459  */
460 /* ARGSUSED */
461 spec_advlock(vp, id, op, fl, flags)
462 	struct vnode *vp;
463 	caddr_t id;
464 	int op;
465 	struct flock *fl;
466 	int flags;
467 {
468 
469 	return (EOPNOTSUPP);
470 }
471 
472 /*
473  * Special device failed operation
474  */
475 spec_ebadf()
476 {
477 
478 	return (EBADF);
479 }
480 
481 /*
482  * Special device bad operation
483  */
484 spec_badop()
485 {
486 
487 	panic("spec_badop called");
488 	/* NOTREACHED */
489 }
490