1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)spec_vnops.c 7.45 (Berkeley) 06/04/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/proc.h> 12 #include <sys/systm.h> 13 #include <sys/kernel.h> 14 #include <sys/conf.h> 15 #include <sys/buf.h> 16 #include <sys/mount.h> 17 #include <sys/namei.h> 18 #include <sys/vnode.h> 19 #include <sys/specdev.h> 20 #include <sys/stat.h> 21 #include <sys/errno.h> 22 #include <sys/ioctl.h> 23 #include <sys/file.h> 24 #include <sys/disklabel.h> 25 26 /* symbolic sleep message strings for devices */ 27 char devopn[] = "devopn"; 28 char devio[] = "devio"; 29 char devwait[] = "devwait"; 30 char devin[] = "devin"; 31 char devout[] = "devout"; 32 char devioc[] = "devioc"; 33 char devcls[] = "devcls"; 34 35 int (**spec_vnodeop_p)(); 36 struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 37 { &vop_default_desc, vn_default_error }, 38 { &vop_lookup_desc, spec_lookup }, /* lookup */ 39 { &vop_create_desc, spec_create }, /* create */ 40 { &vop_mknod_desc, spec_mknod }, /* mknod */ 41 { &vop_open_desc, spec_open }, /* open */ 42 { &vop_close_desc, spec_close }, /* close */ 43 { &vop_access_desc, spec_access }, /* access */ 44 { &vop_getattr_desc, spec_getattr }, /* getattr */ 45 { &vop_setattr_desc, spec_setattr }, /* setattr */ 46 { &vop_read_desc, spec_read }, /* read */ 47 { &vop_write_desc, spec_write }, /* write */ 48 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 49 { &vop_select_desc, spec_select }, /* select */ 50 { &vop_mmap_desc, spec_mmap }, /* mmap */ 51 { &vop_fsync_desc, spec_fsync }, /* fsync */ 52 { &vop_seek_desc, spec_seek }, /* seek */ 53 { &vop_remove_desc, spec_remove }, /* remove */ 54 { &vop_link_desc, spec_link }, /* link */ 55 { &vop_rename_desc, spec_rename }, /* rename */ 56 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 57 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 58 { &vop_symlink_desc, spec_symlink }, /* symlink */ 59 { &vop_readdir_desc, spec_readdir }, /* readdir */ 60 { &vop_readlink_desc, spec_readlink }, /* readlink */ 61 { &vop_abortop_desc, spec_abortop }, /* abortop */ 62 { &vop_inactive_desc, spec_inactive }, /* inactive */ 63 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 64 { &vop_lock_desc, spec_lock }, /* lock */ 65 { &vop_unlock_desc, spec_unlock }, /* unlock */ 66 { &vop_bmap_desc, spec_bmap }, /* bmap */ 67 { &vop_strategy_desc, spec_strategy }, /* strategy */ 68 { &vop_print_desc, spec_print }, /* print */ 69 { &vop_islocked_desc, spec_islocked }, /* islocked */ 70 { &vop_advlock_desc, spec_advlock }, /* advlock */ 71 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */ 72 { &vop_vget_desc, spec_vget }, /* vget */ 73 { &vop_valloc_desc, spec_valloc }, /* valloc */ 74 { &vop_vfree_desc, spec_vfree }, /* vfree */ 75 { &vop_truncate_desc, spec_truncate }, /* truncate */ 76 { &vop_update_desc, spec_update }, /* update */ 77 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 78 { (struct vnodeop_desc*)NULL, (int(*)())NULL } 79 }; 80 struct vnodeopv_desc spec_vnodeop_opv_desc = 81 { &spec_vnodeop_p, spec_vnodeop_entries }; 82 83 /* 84 * Trivial lookup routine that always fails. 85 */ 86 int 87 spec_lookup (ap) 88 struct vop_lookup_args *ap; 89 { 90 91 *ap->a_vpp = NULL; 92 return (ENOTDIR); 93 } 94 95 /* 96 * Open a special file: Don't allow open if fs is mounted -nodev, 97 * and don't allow opens of block devices that are currently mounted. 98 * Otherwise, call device driver open function. 99 */ 100 /* ARGSUSED */ 101 spec_open (ap) 102 struct vop_open_args *ap; 103 { 104 USES_VOP_LOCK; 105 USES_VOP_UNLOCK; 106 register struct vnode *vp = ap->a_vp; 107 dev_t dev = (dev_t)vp->v_rdev; 108 register int maj = major(dev); 109 int error; 110 111 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 112 return (ENXIO); 113 114 switch (vp->v_type) { 115 116 case VCHR: 117 if ((u_int)maj >= nchrdev) 118 return (ENXIO); 119 VOP_UNLOCK(vp); 120 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p); 121 VOP_LOCK(vp); 122 return (error); 123 124 case VBLK: 125 if ((u_int)maj >= nblkdev) 126 return (ENXIO); 127 if (error = ufs_mountedon(vp)) 128 return (error); 129 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p)); 130 } 131 return (0); 132 } 133 134 /* 135 * Vnode op for read 136 */ 137 /* ARGSUSED */ 138 spec_read (ap) 139 struct vop_read_args *ap; 140 { 141 USES_VOP_LOCK; 142 USES_VOP_UNLOCK; 143 register struct vnode *vp = ap->a_vp; 144 register struct uio *uio = ap->a_uio; 145 struct proc *p = uio->uio_procp; 146 struct buf *bp; 147 daddr_t bn, nextbn; 148 long bsize, bscale; 149 struct partinfo dpart; 150 register int n, on; 151 int error = 0; 152 153 #ifdef DIAGNOSTIC 154 if (uio->uio_rw != UIO_READ) 155 panic("spec_read mode"); 156 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 157 panic("spec_read proc"); 158 #endif 159 if (uio->uio_resid == 0) 160 return (0); 161 162 switch (vp->v_type) { 163 164 case VCHR: 165 VOP_UNLOCK(vp); 166 error = (*cdevsw[major(vp->v_rdev)].d_read) 167 (vp->v_rdev, uio, ap->a_ioflag); 168 VOP_LOCK(vp); 169 return (error); 170 171 case VBLK: 172 if (uio->uio_offset < 0) 173 return (EINVAL); 174 bsize = BLKDEV_IOSIZE; 175 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART, 176 (caddr_t)&dpart, FREAD, p) == 0) { 177 if (dpart.part->p_fstype == FS_BSDFFS && 178 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 179 bsize = dpart.part->p_frag * 180 dpart.part->p_fsize; 181 } 182 bscale = bsize / DEV_BSIZE; 183 do { 184 bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1); 185 on = uio->uio_offset % bsize; 186 n = MIN((unsigned)(bsize - on), uio->uio_resid); 187 if (vp->v_lastr + bscale == bn) { 188 nextbn = bn + bscale; 189 error = breadn(vp, bn, (int)bsize, &nextbn, 190 (int *)&bsize, 1, NOCRED, &bp); 191 } else 192 error = bread(vp, bn, (int)bsize, NOCRED, &bp); 193 vp->v_lastr = bn; 194 n = MIN(n, bsize - bp->b_resid); 195 if (error) { 196 brelse(bp); 197 return (error); 198 } 199 error = uiomove(bp->b_un.b_addr + on, n, uio); 200 if (n + on == bsize) 201 bp->b_flags |= B_AGE; 202 brelse(bp); 203 } while (error == 0 && uio->uio_resid > 0 && n != 0); 204 return (error); 205 206 default: 207 panic("spec_read type"); 208 } 209 /* NOTREACHED */ 210 } 211 212 /* 213 * Vnode op for write 214 */ 215 /* ARGSUSED */ 216 spec_write (ap) 217 struct vop_write_args *ap; 218 { 219 USES_VOP_LOCK; 220 USES_VOP_UNLOCK; 221 register struct vnode *vp = ap->a_vp; 222 register struct uio *uio = ap->a_uio; 223 struct proc *p = uio->uio_procp; 224 struct buf *bp; 225 daddr_t bn; 226 int bsize, blkmask; 227 struct partinfo dpart; 228 register int n, on; 229 int error = 0; 230 231 #ifdef DIAGNOSTIC 232 if (uio->uio_rw != UIO_WRITE) 233 panic("spec_write mode"); 234 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 235 panic("spec_write proc"); 236 #endif 237 238 switch (vp->v_type) { 239 240 case VCHR: 241 VOP_UNLOCK(vp); 242 error = (*cdevsw[major(vp->v_rdev)].d_write) 243 (vp->v_rdev, uio, ap->a_ioflag); 244 VOP_LOCK(vp); 245 return (error); 246 247 case VBLK: 248 if (uio->uio_resid == 0) 249 return (0); 250 if (uio->uio_offset < 0) 251 return (EINVAL); 252 bsize = BLKDEV_IOSIZE; 253 if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART, 254 (caddr_t)&dpart, FREAD, p) == 0) { 255 if (dpart.part->p_fstype == FS_BSDFFS && 256 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 257 bsize = dpart.part->p_frag * 258 dpart.part->p_fsize; 259 } 260 blkmask = (bsize / DEV_BSIZE) - 1; 261 do { 262 bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask; 263 on = uio->uio_offset % bsize; 264 n = MIN((unsigned)(bsize - on), uio->uio_resid); 265 if (n == bsize) 266 bp = getblk(vp, bn, bsize); 267 else 268 error = bread(vp, bn, bsize, NOCRED, &bp); 269 n = MIN(n, bsize - bp->b_resid); 270 if (error) { 271 brelse(bp); 272 return (error); 273 } 274 error = uiomove(bp->b_un.b_addr + on, n, uio); 275 if (n + on == bsize) { 276 bp->b_flags |= B_AGE; 277 bawrite(bp); 278 } else 279 bdwrite(bp); 280 } while (error == 0 && uio->uio_resid > 0 && n != 0); 281 return (error); 282 283 default: 284 panic("spec_write type"); 285 } 286 /* NOTREACHED */ 287 } 288 289 /* 290 * Device ioctl operation. 291 */ 292 /* ARGSUSED */ 293 spec_ioctl (ap) 294 struct vop_ioctl_args *ap; 295 { 296 dev_t dev = ap->a_vp->v_rdev; 297 298 switch (ap->a_vp->v_type) { 299 300 case VCHR: 301 return ((*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, 302 ap->a_fflag, ap->a_p)); 303 304 case VBLK: 305 if (ap->a_command == 0 && (int)ap->a_data == B_TAPE) 306 if (bdevsw[major(dev)].d_flags & B_TAPE) 307 return (0); 308 else 309 return (1); 310 return ((*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, 311 ap->a_fflag, ap->a_p)); 312 313 default: 314 panic("spec_ioctl"); 315 /* NOTREACHED */ 316 } 317 } 318 319 /* ARGSUSED */ 320 spec_select (ap) 321 struct vop_select_args *ap; 322 { 323 register dev_t dev; 324 325 switch (ap->a_vp->v_type) { 326 327 default: 328 return (1); /* XXX */ 329 330 case VCHR: 331 dev = ap->a_vp->v_rdev; 332 return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_p); 333 } 334 } 335 336 /* 337 * Just call the device strategy routine 338 */ 339 spec_strategy (ap) 340 struct vop_strategy_args *ap; 341 { 342 343 (*bdevsw[major(ap->a_bp->b_dev)].d_strategy)(ap->a_bp); 344 return (0); 345 } 346 347 /* 348 * This is a noop, simply returning what one has been given. 349 */ 350 spec_bmap (ap) 351 struct vop_bmap_args *ap; 352 { 353 354 if (ap->a_vpp != NULL) 355 *ap->a_vpp = ap->a_vp; 356 if (ap->a_bnp != NULL) 357 *ap->a_bnp = ap->a_bn; 358 return (0); 359 } 360 361 /* 362 * At the moment we do not do any locking. 363 */ 364 /* ARGSUSED */ 365 spec_lock (ap) 366 struct vop_lock_args *ap; 367 { 368 369 return (0); 370 } 371 372 /* ARGSUSED */ 373 spec_unlock (ap) 374 struct vop_unlock_args *ap; 375 { 376 377 return (0); 378 } 379 380 /* 381 * Device close routine 382 */ 383 /* ARGSUSED */ 384 spec_close (ap) 385 struct vop_close_args *ap; 386 { 387 register struct vnode *vp = ap->a_vp; 388 dev_t dev = vp->v_rdev; 389 int (*devclose) __P((dev_t, int, int, struct proc *)); 390 int mode; 391 392 switch (vp->v_type) { 393 394 case VCHR: 395 /* 396 * If the vnode is locked, then we are in the midst 397 * of forcably closing the device, otherwise we only 398 * close on last reference. 399 */ 400 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 401 return (0); 402 devclose = cdevsw[major(dev)].d_close; 403 mode = S_IFCHR; 404 break; 405 406 case VBLK: 407 /* 408 * On last close of a block device (that isn't mounted) 409 * we must invalidate any in core blocks, so that 410 * we can, for instance, change floppy disks. 411 */ 412 vflushbuf(vp, 0); 413 if (vinvalbuf(vp, 1)) 414 return (0); 415 /* 416 * We do not want to really close the device if it 417 * is still in use unless we are trying to close it 418 * forcibly. Since every use (buffer, vnode, swap, cmap) 419 * holds a reference to the vnode, and because we mark 420 * any other vnodes that alias this device, when the 421 * sum of the reference counts on all the aliased 422 * vnodes descends to one, we are on last close. 423 */ 424 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 425 return (0); 426 devclose = bdevsw[major(dev)].d_close; 427 mode = S_IFBLK; 428 break; 429 430 default: 431 panic("spec_close: not special"); 432 } 433 434 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p)); 435 } 436 437 /* 438 * Print out the contents of a special device vnode. 439 */ 440 spec_print (ap) 441 struct vop_print_args *ap; 442 { 443 444 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 445 minor(ap->a_vp->v_rdev)); 446 } 447 448 /* 449 * Special device advisory byte-level locks. 450 */ 451 /* ARGSUSED */ 452 spec_advlock (ap) 453 struct vop_advlock_args *ap; 454 { 455 456 return (EOPNOTSUPP); 457 } 458 459 /* 460 * Special device failed operation 461 */ 462 spec_ebadf() 463 { 464 465 return (EBADF); 466 } 467 468 /* 469 * Special device bad operation 470 */ 471 spec_badop() 472 { 473 474 panic("spec_badop called"); 475 /* NOTREACHED */ 476 } 477