xref: /original-bsd/sys/miscfs/specfs/spec_vnops.c (revision dd262573)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)spec_vnops.c	7.29 (Berkeley) 12/05/90
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "user.h"
13 #include "kernel.h"
14 #include "conf.h"
15 #include "buf.h"
16 #include "mount.h"
17 #include "vnode.h"
18 #include "specdev.h"
19 #include "stat.h"
20 #include "errno.h"
21 #include "ioctl.h"
22 #include "file.h"
23 #include "disklabel.h"
24 
25 /* symbolic sleep message strings for devices */
26 char	devopn[] = "devopn";
27 char	devio[] = "devio";
28 char	devwait[] = "devwait";
29 char	devin[] = "devin";
30 char	devout[] = "devout";
31 char	devioc[] = "devioc";
32 char	devcls[] = "devcls";
33 
34 int	spec_lookup(),
35 	spec_open(),
36 	spec_read(),
37 	spec_write(),
38 	spec_strategy(),
39 	spec_bmap(),
40 	spec_ioctl(),
41 	spec_select(),
42 	spec_lock(),
43 	spec_unlock(),
44 	spec_close(),
45 	spec_print(),
46 	spec_ebadf(),
47 	spec_badop(),
48 	spec_nullop();
49 
50 struct vnodeops spec_vnodeops = {
51 	spec_lookup,		/* lookup */
52 	spec_badop,		/* create */
53 	spec_badop,		/* mknod */
54 	spec_open,		/* open */
55 	spec_close,		/* close */
56 	spec_ebadf,		/* access */
57 	spec_ebadf,		/* getattr */
58 	spec_ebadf,		/* setattr */
59 	spec_read,		/* read */
60 	spec_write,		/* write */
61 	spec_ioctl,		/* ioctl */
62 	spec_select,		/* select */
63 	spec_badop,		/* mmap */
64 	spec_nullop,		/* fsync */
65 	spec_badop,		/* seek */
66 	spec_badop,		/* remove */
67 	spec_badop,		/* link */
68 	spec_badop,		/* rename */
69 	spec_badop,		/* mkdir */
70 	spec_badop,		/* rmdir */
71 	spec_badop,		/* symlink */
72 	spec_badop,		/* readdir */
73 	spec_badop,		/* readlink */
74 	spec_badop,		/* abortop */
75 	spec_nullop,		/* inactive */
76 	spec_nullop,		/* reclaim */
77 	spec_lock,		/* lock */
78 	spec_unlock,		/* unlock */
79 	spec_bmap,		/* bmap */
80 	spec_strategy,		/* strategy */
81 	spec_print,		/* print */
82 	spec_nullop,		/* islocked */
83 };
84 
85 /*
86  * Trivial lookup routine that always fails.
87  */
88 spec_lookup(vp, ndp)
89 	struct vnode *vp;
90 	struct nameidata *ndp;
91 {
92 
93 	ndp->ni_dvp = vp;
94 	ndp->ni_vp = NULL;
95 	return (ENOTDIR);
96 }
97 
98 /*
99  * Open called to allow handler
100  * of special files to initialize and
101  * validate before actual IO.
102  */
103 /* ARGSUSED */
104 spec_open(vp, mode, cred)
105 	register struct vnode *vp;
106 	int mode;
107 	struct ucred *cred;
108 {
109 	dev_t dev = (dev_t)vp->v_rdev;
110 	register int maj = major(dev);
111 	int error;
112 
113 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
114 		return (ENXIO);
115 
116 	switch (vp->v_type) {
117 
118 	case VCHR:
119 		if ((u_int)maj >= nchrdev)
120 			return (ENXIO);
121 		return ((*cdevsw[maj].d_open)(dev, mode, S_IFCHR));
122 
123 	case VBLK:
124 		if ((u_int)maj >= nblkdev)
125 			return (ENXIO);
126 		if (error = mountedon(vp))
127 			return (error);
128 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK));
129 	}
130 	return (0);
131 }
132 
133 /*
134  * Vnode op for read
135  */
136 /* ARGSUSED */
137 spec_read(vp, uio, ioflag, cred)
138 	register struct vnode *vp;
139 	register struct uio *uio;
140 	int ioflag;
141 	struct ucred *cred;
142 {
143 	struct buf *bp;
144 	daddr_t bn;
145 	long bsize, bscale;
146 	struct partinfo dpart;
147 	register int n, on;
148 	int error = 0;
149 	extern int mem_no;
150 
151 	if (uio->uio_rw != UIO_READ)
152 		panic("spec_read mode");
153 	if (uio->uio_resid == 0)
154 		return (0);
155 
156 	switch (vp->v_type) {
157 
158 	case VCHR:
159 		/*
160 		 * Negative offsets allowed only for /dev/kmem
161 		 */
162 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
163 			return (EINVAL);
164 		VOP_UNLOCK(vp);
165 		error = (*cdevsw[major(vp->v_rdev)].d_read)
166 			(vp->v_rdev, uio, ioflag);
167 		VOP_LOCK(vp);
168 		return (error);
169 
170 	case VBLK:
171 		if (uio->uio_offset < 0)
172 			return (EINVAL);
173 		bsize = BLKDEV_IOSIZE;
174 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
175 		    (caddr_t)&dpart, FREAD) == 0) {
176 			if (dpart.part->p_fstype == FS_BSDFFS &&
177 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
178 				bsize = dpart.part->p_frag *
179 				    dpart.part->p_fsize;
180 		}
181 		bscale = bsize / DEV_BSIZE;
182 		do {
183 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
184 			on = uio->uio_offset % bsize;
185 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
186 			if (vp->v_lastr + bscale == bn)
187 				error = breada(vp, bn, (int)bsize, bn + bscale,
188 					(int)bsize, NOCRED, &bp);
189 			else
190 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
191 			vp->v_lastr = bn;
192 			n = MIN(n, bsize - bp->b_resid);
193 			if (error) {
194 				brelse(bp);
195 				return (error);
196 			}
197 			error = uiomove(bp->b_un.b_addr + on, n, uio);
198 			if (n + on == bsize)
199 				bp->b_flags |= B_AGE;
200 			brelse(bp);
201 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
202 		return (error);
203 
204 	default:
205 		panic("spec_read type");
206 	}
207 	/* NOTREACHED */
208 }
209 
210 /*
211  * Vnode op for write
212  */
213 /* ARGSUSED */
214 spec_write(vp, uio, ioflag, cred)
215 	register struct vnode *vp;
216 	register struct uio *uio;
217 	int ioflag;
218 	struct ucred *cred;
219 {
220 	struct buf *bp;
221 	daddr_t bn;
222 	int bsize, blkmask;
223 	struct partinfo dpart;
224 	register int n, on;
225 	int error = 0;
226 	extern int mem_no;
227 
228 	if (uio->uio_rw != UIO_WRITE)
229 		panic("spec_write mode");
230 
231 	switch (vp->v_type) {
232 
233 	case VCHR:
234 		/*
235 		 * Negative offsets allowed only for /dev/kmem
236 		 */
237 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
238 			return (EINVAL);
239 		VOP_UNLOCK(vp);
240 		error = (*cdevsw[major(vp->v_rdev)].d_write)
241 			(vp->v_rdev, uio, ioflag);
242 		VOP_LOCK(vp);
243 		return (error);
244 
245 	case VBLK:
246 		if (uio->uio_resid == 0)
247 			return (0);
248 		if (uio->uio_offset < 0)
249 			return (EINVAL);
250 		bsize = BLKDEV_IOSIZE;
251 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
252 		    (caddr_t)&dpart, FREAD) == 0) {
253 			if (dpart.part->p_fstype == FS_BSDFFS &&
254 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
255 				bsize = dpart.part->p_frag *
256 				    dpart.part->p_fsize;
257 		}
258 		blkmask = (bsize / DEV_BSIZE) - 1;
259 		do {
260 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
261 			on = uio->uio_offset % bsize;
262 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
263 			if (n == bsize)
264 				bp = getblk(vp, bn, bsize);
265 			else
266 				error = bread(vp, bn, bsize, NOCRED, &bp);
267 			n = MIN(n, bsize - bp->b_resid);
268 			if (error) {
269 				brelse(bp);
270 				return (error);
271 			}
272 			error = uiomove(bp->b_un.b_addr + on, n, uio);
273 			if (n + on == bsize) {
274 				bp->b_flags |= B_AGE;
275 				bawrite(bp);
276 			} else
277 				bdwrite(bp);
278 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
279 		return (error);
280 
281 	default:
282 		panic("spec_write type");
283 	}
284 	/* NOTREACHED */
285 }
286 
287 /*
288  * Device ioctl operation.
289  */
290 /* ARGSUSED */
291 spec_ioctl(vp, com, data, fflag, cred)
292 	struct vnode *vp;
293 	int com;
294 	caddr_t data;
295 	int fflag;
296 	struct ucred *cred;
297 {
298 	dev_t dev = vp->v_rdev;
299 
300 	switch (vp->v_type) {
301 
302 	case VCHR:
303 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data, fflag));
304 
305 	case VBLK:
306 		if (com == 0 && (int)data == B_TAPE)
307 			if (bdevsw[major(dev)].d_flags & B_TAPE)
308 				return (0);
309 			else
310 				return (1);
311 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data, fflag));
312 
313 	default:
314 		panic("spec_ioctl");
315 		/* NOTREACHED */
316 	}
317 }
318 
319 /* ARGSUSED */
320 spec_select(vp, which, fflags, cred)
321 	struct vnode *vp;
322 	int which, fflags;
323 	struct ucred *cred;
324 {
325 	register dev_t dev;
326 
327 	switch (vp->v_type) {
328 
329 	default:
330 		return (1);		/* XXX */
331 
332 	case VCHR:
333 		dev = vp->v_rdev;
334 		return (*cdevsw[major(dev)].d_select)(dev, which);
335 	}
336 }
337 
338 /*
339  * Just call the device strategy routine
340  */
341 spec_strategy(bp)
342 	register struct buf *bp;
343 {
344 
345 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
346 	return (0);
347 }
348 
349 /*
350  * This is a noop, simply returning what one has been given.
351  */
352 spec_bmap(vp, bn, vpp, bnp)
353 	struct vnode *vp;
354 	daddr_t bn;
355 	struct vnode **vpp;
356 	daddr_t *bnp;
357 {
358 
359 	if (vpp != NULL)
360 		*vpp = vp;
361 	if (bnp != NULL)
362 		*bnp = bn;
363 	return (0);
364 }
365 
366 /*
367  * At the moment we do not do any locking.
368  */
369 /* ARGSUSED */
370 spec_lock(vp)
371 	struct vnode *vp;
372 {
373 
374 	return (0);
375 }
376 
377 /* ARGSUSED */
378 spec_unlock(vp)
379 	struct vnode *vp;
380 {
381 
382 	return (0);
383 }
384 
385 /*
386  * Device close routine
387  */
388 /* ARGSUSED */
389 spec_close(vp, flag, cred)
390 	register struct vnode *vp;
391 	int flag;
392 	struct ucred *cred;
393 {
394 	dev_t dev = vp->v_rdev;
395 	int (*cfunc)();
396 	int mode;
397 
398 	switch (vp->v_type) {
399 
400 	case VCHR:
401 		/*
402 		 * If the vnode is locked, then we are in the midst
403 		 * of forcably closing the device, otherwise we only
404 		 * close on last reference.
405 		 */
406 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
407 			return (0);
408 		cfunc = cdevsw[major(dev)].d_close;
409 		mode = S_IFCHR;
410 		break;
411 
412 	case VBLK:
413 		/*
414 		 * On last close of a block device (that isn't mounted)
415 		 * we must invalidate any in core blocks, so that
416 		 * we can, for instance, change floppy disks.
417 		 */
418 		vflushbuf(vp, 0);
419 		if (vinvalbuf(vp, 1))
420 			return (0);
421 		/*
422 		 * We do not want to really close the device if it
423 		 * is still in use unless we are trying to close it
424 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
425 		 * holds a reference to the vnode, and because we mark
426 		 * any other vnodes that alias this device, when the
427 		 * sum of the reference counts on all the aliased
428 		 * vnodes descends to one, we are on last close.
429 		 */
430 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
431 			return (0);
432 		cfunc = bdevsw[major(dev)].d_close;
433 		mode = S_IFBLK;
434 		break;
435 
436 	default:
437 		panic("spec_close: not special");
438 	}
439 
440 	return ((*cfunc)(dev, flag, mode));
441 }
442 
443 /*
444  * Print out the contents of a special device vnode.
445  */
446 spec_print(vp)
447 	struct vnode *vp;
448 {
449 
450 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
451 		minor(vp->v_rdev));
452 }
453 
454 /*
455  * Special device failed operation
456  */
457 spec_ebadf()
458 {
459 
460 	return (EBADF);
461 }
462 
463 /*
464  * Special device bad operation
465  */
466 spec_badop()
467 {
468 
469 	panic("spec_badop called");
470 	/* NOTREACHED */
471 }
472 
473 /*
474  * Special device null operation
475  */
476 spec_nullop()
477 {
478 
479 	return (0);
480 }
481