/* * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * %sccs.include.redist.c% * * @(#)spec_vnops.c 7.42 (Berkeley) 02/04/92 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* symbolic sleep message strings for devices */ char devopn[] = "devopn"; char devio[] = "devio"; char devwait[] = "devwait"; char devin[] = "devin"; char devout[] = "devout"; char devioc[] = "devioc"; char devcls[] = "devcls"; struct vnodeops spec_vnodeops = { spec_lookup, /* lookup */ spec_create, /* create */ spec_mknod, /* mknod */ spec_open, /* open */ spec_close, /* close */ spec_access, /* access */ spec_getattr, /* getattr */ spec_setattr, /* setattr */ spec_read, /* read */ spec_write, /* write */ spec_ioctl, /* ioctl */ spec_select, /* select */ spec_mmap, /* mmap */ spec_fsync, /* fsync */ spec_seek, /* seek */ spec_remove, /* remove */ spec_link, /* link */ spec_rename, /* rename */ spec_mkdir, /* mkdir */ spec_rmdir, /* rmdir */ spec_symlink, /* symlink */ spec_readdir, /* readdir */ spec_readlink, /* readlink */ spec_abortop, /* abortop */ spec_inactive, /* inactive */ spec_reclaim, /* reclaim */ spec_lock, /* lock */ spec_unlock, /* unlock */ spec_bmap, /* bmap */ spec_strategy, /* strategy */ spec_print, /* print */ spec_islocked, /* islocked */ spec_advlock, /* advlock */ spec_blkatoff, /* blkatoff */ spec_vget, /* vget */ spec_valloc, /* valloc */ spec_vfree, /* vfree */ spec_truncate, /* truncate */ spec_update, /* update */ spec_bwrite, /* bwrite */ }; /* * Trivial lookup routine that always fails. */ int spec_lookup(dvp, vpp, cnp) struct vnode *dvp; struct vnode **vpp; struct componentname *cnp; { *vpp = NULL; return (ENOTDIR); } /* * Open a special file: Don't allow open if fs is mounted -nodev, * and don't allow opens of block devices that are currently mounted. * Otherwise, call device driver open function. */ /* ARGSUSED */ spec_open(vp, mode, cred, p) register struct vnode *vp; int mode; struct ucred *cred; struct proc *p; { dev_t dev = (dev_t)vp->v_rdev; register int maj = major(dev); int error; if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) return (ENXIO); switch (vp->v_type) { case VCHR: if ((u_int)maj >= nchrdev) return (ENXIO); VOP_UNLOCK(vp); error = (*cdevsw[maj].d_open)(dev, mode, S_IFCHR, p); VOP_LOCK(vp); return (error); case VBLK: if ((u_int)maj >= nblkdev) return (ENXIO); if (error = ufs_mountedon(vp)) return (error); return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK, p)); } return (0); } /* * Vnode op for read */ /* ARGSUSED */ spec_read(vp, uio, ioflag, cred) register struct vnode *vp; register struct uio *uio; int ioflag; struct ucred *cred; { struct proc *p = uio->uio_procp; struct buf *bp; daddr_t bn, nextbn; long bsize, bscale; struct partinfo dpart; register int n, on; int error = 0; #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("spec_read mode"); if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) panic("spec_read proc"); #endif if (uio->uio_resid == 0) return (0); switch (vp->v_type) { case VCHR: VOP_UNLOCK(vp); error = (*cdevsw[major(vp->v_rdev)].d_read) (vp->v_rdev, uio, ioflag); VOP_LOCK(vp); return (error); case VBLK: if (uio->uio_offset < 0) return (EINVAL); bsize = BLKDEV_IOSIZE; if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { if (dpart.part->p_fstype == FS_BSDFFS && dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) bsize = dpart.part->p_frag * dpart.part->p_fsize; } bscale = bsize / DEV_BSIZE; do { bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1); on = uio->uio_offset % bsize; n = MIN((unsigned)(bsize - on), uio->uio_resid); if (vp->v_lastr + bscale == bn) { nextbn = bn + bscale; error = breadn(vp, bn, (int)bsize, &nextbn, (int *)&bsize, 1, NOCRED, &bp); } else error = bread(vp, bn, (int)bsize, NOCRED, &bp); vp->v_lastr = bn; n = MIN(n, bsize - bp->b_resid); if (error) { brelse(bp); return (error); } error = uiomove(bp->b_un.b_addr + on, n, uio); if (n + on == bsize) bp->b_flags |= B_AGE; brelse(bp); } while (error == 0 && uio->uio_resid > 0 && n != 0); return (error); default: panic("spec_read type"); } /* NOTREACHED */ } /* * Vnode op for write */ /* ARGSUSED */ spec_write(vp, uio, ioflag, cred) register struct vnode *vp; register struct uio *uio; int ioflag; struct ucred *cred; { struct proc *p = uio->uio_procp; struct buf *bp; daddr_t bn; int bsize, blkmask; struct partinfo dpart; register int n, on; int error = 0; #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_WRITE) panic("spec_write mode"); if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) panic("spec_write proc"); #endif switch (vp->v_type) { case VCHR: VOP_UNLOCK(vp); error = (*cdevsw[major(vp->v_rdev)].d_write) (vp->v_rdev, uio, ioflag); VOP_LOCK(vp); return (error); case VBLK: if (uio->uio_resid == 0) return (0); if (uio->uio_offset < 0) return (EINVAL); bsize = BLKDEV_IOSIZE; if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { if (dpart.part->p_fstype == FS_BSDFFS && dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) bsize = dpart.part->p_frag * dpart.part->p_fsize; } blkmask = (bsize / DEV_BSIZE) - 1; do { bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask; on = uio->uio_offset % bsize; n = MIN((unsigned)(bsize - on), uio->uio_resid); if (n == bsize) bp = getblk(vp, bn, bsize); else error = bread(vp, bn, bsize, NOCRED, &bp); n = MIN(n, bsize - bp->b_resid); if (error) { brelse(bp); return (error); } error = uiomove(bp->b_un.b_addr + on, n, uio); if (n + on == bsize) { bp->b_flags |= B_AGE; bawrite(bp); } else bdwrite(bp); } while (error == 0 && uio->uio_resid > 0 && n != 0); return (error); default: panic("spec_write type"); } /* NOTREACHED */ } /* * Device ioctl operation. */ /* ARGSUSED */ spec_ioctl(vp, com, data, fflag, cred, p) struct vnode *vp; int com; caddr_t data; int fflag; struct ucred *cred; struct proc *p; { dev_t dev = vp->v_rdev; switch (vp->v_type) { case VCHR: return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data, fflag, p)); case VBLK: if (com == 0 && (int)data == B_TAPE) if (bdevsw[major(dev)].d_flags & B_TAPE) return (0); else return (1); return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data, fflag, p)); default: panic("spec_ioctl"); /* NOTREACHED */ } } /* ARGSUSED */ spec_select(vp, which, fflags, cred, p) struct vnode *vp; int which, fflags; struct ucred *cred; struct proc *p; { register dev_t dev; switch (vp->v_type) { default: return (1); /* XXX */ case VCHR: dev = vp->v_rdev; return (*cdevsw[major(dev)].d_select)(dev, which, p); } } /* * Just call the device strategy routine */ spec_strategy(bp) register struct buf *bp; { (*bdevsw[major(bp->b_dev)].d_strategy)(bp); return (0); } /* * This is a noop, simply returning what one has been given. */ spec_bmap(vp, bn, vpp, bnp) struct vnode *vp; daddr_t bn; struct vnode **vpp; daddr_t *bnp; { if (vpp != NULL) *vpp = vp; if (bnp != NULL) *bnp = bn; return (0); } /* * At the moment we do not do any locking. */ /* ARGSUSED */ spec_lock(vp) struct vnode *vp; { return (0); } /* ARGSUSED */ spec_unlock(vp) struct vnode *vp; { return (0); } /* * Device close routine */ /* ARGSUSED */ spec_close(vp, flag, cred, p) register struct vnode *vp; int flag; struct ucred *cred; struct proc *p; { dev_t dev = vp->v_rdev; int (*devclose) __P((dev_t, int, int, struct proc *)); int mode; switch (vp->v_type) { case VCHR: /* * If the vnode is locked, then we are in the midst * of forcably closing the device, otherwise we only * close on last reference. */ if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) return (0); devclose = cdevsw[major(dev)].d_close; mode = S_IFCHR; break; case VBLK: /* * On last close of a block device (that isn't mounted) * we must invalidate any in core blocks, so that * we can, for instance, change floppy disks. */ vflushbuf(vp, 0); if (vinvalbuf(vp, 1)) return (0); /* * We do not want to really close the device if it * is still in use unless we are trying to close it * forcibly. Since every use (buffer, vnode, swap, cmap) * holds a reference to the vnode, and because we mark * any other vnodes that alias this device, when the * sum of the reference counts on all the aliased * vnodes descends to one, we are on last close. */ if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) return (0); devclose = bdevsw[major(dev)].d_close; mode = S_IFBLK; break; default: panic("spec_close: not special"); } return ((*devclose)(dev, flag, mode, p)); } /* * Print out the contents of a special device vnode. */ spec_print(vp) struct vnode *vp; { printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev), minor(vp->v_rdev)); } /* * Special device advisory byte-level locks. */ /* ARGSUSED */ spec_advlock(vp, id, op, fl, flags) struct vnode *vp; caddr_t id; int op; struct flock *fl; int flags; { return (EOPNOTSUPP); } /* * Special device failed operation */ spec_ebadf() { return (EBADF); } /* * Special device bad operation */ spec_badop() { panic("spec_badop called"); /* NOTREACHED */ }