1 /* 2 * Copyright (c) 1989, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)lfs_vfsops.c 7.81 (Berkeley) 07/23/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/namei.h> 13 #include <sys/proc.h> 14 #include <sys/kernel.h> 15 #include <sys/vnode.h> 16 #include <sys/mount.h> 17 #include <sys/buf.h> 18 #include <sys/mbuf.h> 19 #include <sys/file.h> 20 #include <sys/disklabel.h> 21 #include <sys/ioctl.h> 22 #include <sys/errno.h> 23 #include <sys/malloc.h> 24 #include <sys/socket.h> 25 26 #include <miscfs/specfs/specdev.h> 27 28 #include <ufs/ufs/quota.h> 29 #include <ufs/ufs/inode.h> 30 #include <ufs/ufs/ufsmount.h> 31 #include <ufs/ufs/ufs_extern.h> 32 33 #include <ufs/lfs/lfs.h> 34 #include <ufs/lfs/lfs_extern.h> 35 36 int lfs_mountfs __P((struct vnode *, struct mount *, struct proc *)); 37 38 struct vfsops lfs_vfsops = { 39 lfs_mount, 40 ufs_start, 41 lfs_unmount, 42 lfs_root, 43 ufs_quotactl, 44 lfs_statfs, 45 lfs_sync, 46 lfs_vget, 47 lfs_fhtovp, 48 lfs_vptofh, 49 lfs_init, 50 }; 51 52 int 53 lfs_mountroot() 54 { 55 panic("lfs_mountroot"); /* XXX -- implement */ 56 } 57 58 /* 59 * VFS Operations. 60 * 61 * mount system call 62 */ 63 lfs_mount(mp, path, data, ndp, p) 64 register struct mount *mp; 65 char *path; 66 caddr_t data; 67 struct nameidata *ndp; 68 struct proc *p; 69 { 70 struct vnode *devvp; 71 struct ufs_args args; 72 struct ufsmount *ump; 73 register struct lfs *fs; /* LFS */ 74 u_int size; 75 int error; 76 77 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args))) 78 return (error); 79 80 /* Until LFS can do NFS right. XXX */ 81 if (args.exflags & MNT_EXPORTED) 82 return (EINVAL); 83 /* 84 * If updating, check whether changing from read-only to 85 * read/write; if there is no device name, that's all we do. 86 */ 87 if (mp->mnt_flag & MNT_UPDATE) { 88 ump = VFSTOUFS(mp); 89 #ifdef NOTLFS /* LFS */ 90 fs = ump->um_fs; 91 if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0) 92 fs->fs_ronly = 0; 93 #else 94 fs = ump->um_lfs; 95 if (fs->lfs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0) 96 fs->lfs_ronly = 0; 97 #endif 98 if (args.fspec == 0) { 99 /* 100 * Process export requests. 101 */ 102 if (args.exflags & MNT_EXPORTED) { 103 if (error = ufs_hang_addrlist(mp, &args)) 104 return (error); 105 mp->mnt_flag |= MNT_EXPORTED; 106 } 107 if (args.exflags & MNT_DELEXPORT) { 108 ufs_free_addrlist(ump); 109 mp->mnt_flag &= 110 ~(MNT_EXPORTED | MNT_DEFEXPORTED); 111 } 112 return (0); 113 } 114 } 115 /* 116 * Not an update, or updating the name: look up the name 117 * and verify that it refers to a sensible block device. 118 */ 119 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); 120 if (error = namei(ndp)) 121 return (error); 122 devvp = ndp->ni_vp; 123 if (devvp->v_type != VBLK) { 124 vrele(devvp); 125 return (ENOTBLK); 126 } 127 if (major(devvp->v_rdev) >= nblkdev) { 128 vrele(devvp); 129 return (ENXIO); 130 } 131 if ((mp->mnt_flag & MNT_UPDATE) == 0) 132 error = lfs_mountfs(devvp, mp, p); /* LFS */ 133 else { 134 if (devvp != ump->um_devvp) 135 error = EINVAL; /* needs translation */ 136 else 137 vrele(devvp); 138 } 139 if (error) { 140 vrele(devvp); 141 return (error); 142 } 143 ump = VFSTOUFS(mp); 144 fs = ump->um_lfs; /* LFS */ 145 #ifdef NOTLFS /* LFS */ 146 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); 147 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); 148 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 149 MNAMELEN); 150 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 151 &size); 152 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 153 (void) ufs_statfs(mp, &mp->mnt_stat, p); 154 #else 155 (void)copyinstr(path, fs->lfs_fsmnt, sizeof(fs->lfs_fsmnt) - 1, &size); 156 bzero(fs->lfs_fsmnt + size, sizeof(fs->lfs_fsmnt) - size); 157 bcopy((caddr_t)fs->lfs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 158 MNAMELEN); 159 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 160 &size); 161 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 162 (void) lfs_statfs(mp, &mp->mnt_stat, p); 163 #endif 164 return (0); 165 } 166 167 /* 168 * Common code for mount and mountroot 169 * LFS specific 170 */ 171 int 172 lfs_mountfs(devvp, mp, p) 173 register struct vnode *devvp; 174 struct mount *mp; 175 struct proc *p; 176 { 177 extern struct vnode *rootvp; 178 register struct lfs *fs; 179 register struct ufsmount *ump; 180 struct vnode *vp; 181 struct buf *bp; 182 struct partinfo dpart; 183 dev_t dev; 184 int error, i, ronly, size; 185 186 /* 187 * Disallow multiple mounts of the same device. 188 * Disallow mounting of a device that is currently in use 189 * (except for root, which might share swap device for miniroot). 190 * Flush out any old buffers remaining from a previous use. 191 */ 192 if (error = ufs_mountedon(devvp)) 193 return (error); 194 if (vcount(devvp) > 1 && devvp != rootvp) 195 return (EBUSY); 196 if (error = vinvalbuf(devvp, 1, p->p_ucred, p)) 197 return (error); 198 199 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 200 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p)) 201 return (error); 202 203 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) 204 size = DEV_BSIZE; 205 else { 206 size = dpart.disklab->d_secsize; 207 #ifdef NEVER_USED 208 dpart.part->p_fstype = FS_LFS; 209 dpart.part->p_fsize = fs->lfs_fsize; /* frag size */ 210 dpart.part->p_frag = fs->lfs_frag; /* frags per block */ 211 dpart.part->p_cpg = fs->lfs_segshift; /* segment shift */ 212 #endif 213 } 214 215 /* Don't free random space on error. */ 216 bp = NULL; 217 ump = NULL; 218 219 /* Read in the superblock. */ 220 if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp)) 221 goto out; 222 fs = bp->b_un.b_lfs; 223 224 /* Check the basics. */ 225 if (fs->lfs_magic != LFS_MAGIC || fs->lfs_bsize > MAXBSIZE || 226 fs->lfs_bsize < sizeof(struct lfs)) { 227 error = EINVAL; /* XXX needs translation */ 228 goto out; 229 } 230 #ifdef DEBUG 231 lfs_dump_super(fs); 232 #endif 233 234 /* Allocate the mount structure, copy the superblock into it. */ 235 ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK); 236 fs = ump->um_lfs = malloc(sizeof(struct lfs), M_UFSMNT, M_WAITOK); 237 bcopy(bp->b_un.b_addr, fs, sizeof(struct lfs)); 238 if (sizeof(struct lfs) < LFS_SBPAD) /* XXX why? */ 239 bp->b_flags |= B_INVAL; 240 brelse(bp); 241 bp = NULL; 242 243 /* Set up the I/O information */ 244 fs->lfs_iocount = 0; 245 246 /* Set up the ifile and lock aflags */ 247 fs->lfs_doifile = 0; 248 fs->lfs_writer = 0; 249 fs->lfs_dirops = 0; 250 fs->lfs_seglock = 0; 251 252 /* Set the file system readonly/modify bits. */ 253 fs->lfs_ronly = ronly; 254 if (ronly == 0) 255 fs->lfs_fmod = 1; 256 257 /* Initialize the mount structure. */ 258 dev = devvp->v_rdev; 259 mp->mnt_data = (qaddr_t)ump; 260 mp->mnt_stat.f_fsid.val[0] = (long)dev; 261 mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS; 262 mp->mnt_flag |= MNT_LOCAL; 263 ump->um_mountp = mp; 264 ump->um_dev = dev; 265 ump->um_devvp = devvp; 266 for (i = 0; i < MAXQUOTAS; i++) 267 ump->um_quotas[i] = NULLVP; 268 devvp->v_specflags |= SI_MOUNTEDON; 269 270 /* 271 * We use the ifile vnode for almost every operation. Instead of 272 * retrieving it from the hash table each time we retrieve it here, 273 * artificially increment the reference count and keep a pointer 274 * to it in the incore copy of the superblock. 275 */ 276 if (error = VFS_VGET(mp, LFS_IFILE_INUM, &vp)) 277 goto out; 278 fs->lfs_ivnode = vp; 279 VREF(vp); 280 vput(vp); 281 282 return (0); 283 out: 284 if (bp) 285 brelse(bp); 286 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p); 287 if (ump) { 288 free(ump->um_lfs, M_UFSMNT); 289 free(ump, M_UFSMNT); 290 mp->mnt_data = (qaddr_t)0; 291 } 292 return (error); 293 } 294 295 /* 296 * unmount system call 297 */ 298 lfs_unmount(mp, mntflags, p) 299 struct mount *mp; 300 int mntflags; 301 struct proc *p; 302 { 303 extern int doforce; 304 register struct ufsmount *ump; 305 register struct lfs *fs; 306 int i, error, flags, ronly; 307 308 #ifdef VERBOSE 309 printf("lfs_unmount\n"); 310 #endif 311 flags = 0; 312 if (mntflags & MNT_FORCE) { 313 if (!doforce || mp == rootfs) 314 return (EINVAL); 315 flags |= FORCECLOSE; 316 } 317 318 ump = VFSTOUFS(mp); 319 fs = ump->um_lfs; 320 #ifdef QUOTA 321 if (mp->mnt_flag & MNT_QUOTA) { 322 if (error = vflush(mp, fs->lfs_ivnode, SKIPSYSTEM|flags)) 323 return (error); 324 for (i = 0; i < MAXQUOTAS; i++) { 325 if (ump->um_quotas[i] == NULLVP) 326 continue; 327 quotaoff(p, mp, i); 328 } 329 /* 330 * Here we fall through to vflush again to ensure 331 * that we have gotten rid of all the system vnodes. 332 */ 333 } 334 #endif 335 vrele(fs->lfs_ivnode); 336 if (error = vflush(mp, fs->lfs_ivnode, flags)) 337 return (error); 338 fs->lfs_clean = 1; 339 if (error = VFS_SYNC(mp, 1, p->p_ucred, p)) 340 return (error); 341 if (fs->lfs_ivnode->v_dirtyblkhd) 342 panic("lfs_unmount: still dirty blocks on ifile vnode\n"); 343 vgone(fs->lfs_ivnode); 344 345 ronly = !fs->lfs_ronly; 346 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON; 347 error = VOP_CLOSE(ump->um_devvp, 348 ronly ? FREAD : FREAD|FWRITE, NOCRED, p); 349 vrele(ump->um_devvp); 350 free(fs, M_UFSMNT); 351 free(ump, M_UFSMNT); 352 mp->mnt_data = (qaddr_t)0; 353 mp->mnt_flag &= ~MNT_LOCAL; 354 return (error); 355 } 356 357 /* 358 * Return root of a filesystem 359 */ 360 int 361 lfs_root(mp, vpp) 362 struct mount *mp; 363 struct vnode **vpp; 364 { 365 struct vnode *nvp; 366 int error; 367 368 #ifdef VERBOSE 369 printf("lfs_root\n"); 370 #endif 371 if (error = VFS_VGET(mp, (ino_t)ROOTINO, &nvp)) 372 return (error); 373 *vpp = nvp; 374 return (0); 375 } 376 377 /* 378 * Get file system statistics. 379 */ 380 lfs_statfs(mp, sbp, p) 381 struct mount *mp; 382 register struct statfs *sbp; 383 struct proc *p; 384 { 385 register struct lfs *fs; 386 register struct ufsmount *ump; 387 388 ump = VFSTOUFS(mp); 389 fs = ump->um_lfs; 390 if (fs->lfs_magic != LFS_MAGIC) 391 panic("lfs_statfs: magic"); 392 sbp->f_type = MOUNT_LFS; 393 sbp->f_bsize = fs->lfs_bsize; 394 sbp->f_iosize = fs->lfs_bsize; 395 sbp->f_blocks = fs->lfs_dsize; 396 sbp->f_bfree = dbtofsb(fs, fs->lfs_bfree); 397 sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) - 398 (fs->lfs_dsize - sbp->f_bfree); 399 sbp->f_files = fs->lfs_nfiles; 400 sbp->f_ffree = sbp->f_bfree * INOPB(fs); 401 if (sbp != &mp->mnt_stat) { 402 bcopy((caddr_t)mp->mnt_stat.f_mntonname, 403 (caddr_t)&sbp->f_mntonname[0], MNAMELEN); 404 bcopy((caddr_t)mp->mnt_stat.f_mntfromname, 405 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); 406 } 407 return (0); 408 } 409 410 /* 411 * Go through the disk queues to initiate sandbagged IO; 412 * go through the inodes to write those that have been modified; 413 * initiate the writing of the super block if it has been modified. 414 * 415 * Note: we are always called with the filesystem marked `MPBUSY'. 416 */ 417 lfs_sync(mp, waitfor, cred, p) 418 struct mount *mp; 419 int waitfor; 420 struct ucred *cred; 421 struct proc *p; 422 { 423 extern int crashandburn, syncprt; 424 int error; 425 426 #ifdef VERBOSE 427 printf("lfs_sync\n"); 428 #endif 429 430 #ifdef DIAGNOSTIC 431 if (crashandburn) 432 return (0); 433 #endif 434 435 /* All syncs must be checkpoints until roll-forward is implemented. */ 436 error = lfs_segwrite(mp, 1); 437 #ifdef QUOTA 438 qsync(mp); 439 #endif 440 return (error); 441 } 442 443 /* 444 * Look up an LFS dinode number to find its incore vnode. If not already 445 * in core, read it in from the specified device. Return the inode locked. 446 * Detection and handling of mount points must be done by the calling routine. 447 */ 448 int 449 lfs_vget(mp, ino, vpp) 450 struct mount *mp; 451 ino_t ino; 452 struct vnode **vpp; 453 { 454 register struct lfs *fs; 455 register struct inode *ip; 456 struct buf *bp; 457 struct ifile *ifp; 458 struct vnode *vp; 459 struct ufsmount *ump; 460 daddr_t daddr; 461 dev_t dev; 462 int error; 463 464 #ifdef VERBOSE 465 printf("lfs_vget\n"); 466 #endif 467 ump = VFSTOUFS(mp); 468 dev = ump->um_dev; 469 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) 470 return (0); 471 472 /* Translate the inode number to a disk address. */ 473 fs = ump->um_lfs; 474 if (ino == LFS_IFILE_INUM) 475 daddr = fs->lfs_idaddr; 476 else { 477 LFS_IENTRY(ifp, fs, ino, bp); 478 daddr = ifp->if_daddr; 479 brelse(bp); 480 if (daddr == LFS_UNUSED_DADDR) 481 return (ENOENT); 482 } 483 484 /* Allocate new vnode/inode. */ 485 if (error = lfs_vcreate(mp, ino, &vp)) { 486 *vpp = NULL; 487 return (error); 488 } 489 490 /* 491 * Put it onto its hash chain and lock it so that other requests for 492 * this inode will block if they arrive while we are sleeping waiting 493 * for old data structures to be purged or for the contents of the 494 * disk portion of this inode to be read. 495 */ 496 ip = VTOI(vp); 497 ufs_ihashins(ip); 498 499 /* 500 * XXX 501 * This may not need to be here, logically it should go down with 502 * the i_devvp initialization. 503 * Ask Kirk. 504 */ 505 ip->i_lfs = ump->um_lfs; 506 507 /* Read in the disk contents for the inode, copy into the inode. */ 508 if (error = 509 bread(ump->um_devvp, daddr, (int)fs->lfs_bsize, NOCRED, &bp)) { 510 /* 511 * The inode does not contain anything useful, so it 512 * would be misleading to leave it on its hash chain. 513 * Iput() will return it to the free list. 514 */ 515 ufs_ihashrem(ip); 516 517 /* Unlock and discard unneeded inode. */ 518 ufs_iput(ip); 519 brelse(bp); 520 *vpp = NULL; 521 return (error); 522 } 523 ip->i_din = *lfs_ifind(fs, ino, bp->b_un.b_dino); 524 brelse(bp); 525 526 /* 527 * Initialize the vnode from the inode, check for aliases. In all 528 * cases re-init ip, the underlying vnode/inode may have changed. 529 */ 530 if (error = ufs_vinit(mp, lfs_specop_p, LFS_FIFOOPS, &vp)) { 531 ufs_iput(ip); 532 *vpp = NULL; 533 return (error); 534 } 535 /* 536 * Finish inode initialization now that aliasing has been resolved. 537 */ 538 ip->i_devvp = ump->um_devvp; 539 VREF(ip->i_devvp); 540 *vpp = vp; 541 return (0); 542 } 543 544 /* 545 * File handle to vnode 546 * 547 * Have to be really careful about stale file handles: 548 * - check that the inode number is valid 549 * - call lfs_vget() to get the locked inode 550 * - check for an unallocated inode (i_mode == 0) 551 * - check that the generation number matches 552 * 553 * XXX 554 * use ifile to see if inode is allocated instead of reading off disk 555 * what is the relationship between my generational number and the NFS 556 * generational number. 557 */ 558 int 559 lfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) 560 register struct mount *mp; 561 struct fid *fhp; 562 struct mbuf *nam; 563 struct vnode **vpp; 564 int *exflagsp; 565 struct ucred **credanonp; 566 { 567 register struct inode *ip; 568 register struct ufid *ufhp; 569 register struct netaddrhash *np; 570 register struct ufsmount *ump = VFSTOUFS(mp); 571 struct vnode *nvp; 572 struct sockaddr *saddr; 573 int error; 574 575 ufhp = (struct ufid *)fhp; 576 if (ufhp->ufid_ino < ROOTINO) 577 return (ESTALE); 578 /* 579 * Get the export permission structure for this <mp, client> tuple. 580 */ 581 if ((mp->mnt_flag & MNT_EXPORTED) == 0) 582 return (EACCES); 583 if (nam == NULL) { 584 np = (struct netaddrhash *)0; 585 } else { 586 /* 587 * First search for a network match. 588 */ 589 np = ump->um_netaddr[NETMASK_HASH]; 590 while (np) { 591 if (netaddr_match(np->neth_family, &np->neth_haddr, 592 &np->neth_hmask, nam)) 593 break; 594 np = np->neth_next; 595 } 596 597 /* 598 * If not found, try for an address match. 599 */ 600 if (np == (struct netaddrhash *)0) { 601 saddr = mtod(nam, struct sockaddr *); 602 np = ump->um_netaddr[NETADDRHASH(saddr)]; 603 while (np) { 604 if (netaddr_match(np->neth_family, 605 &np->neth_haddr, (struct netaddrhash *)0, 606 nam)) 607 break; 608 np = np->neth_next; 609 } 610 } 611 } 612 if (np == (struct netaddrhash *)0) { 613 /* 614 * If no address match, use the default if it exists. 615 */ 616 if ((mp->mnt_flag & MNT_DEFEXPORTED) == 0) 617 return (EACCES); 618 np = &ump->um_defexported; 619 } 620 if (error = VFS_VGET(mp, ufhp->ufid_ino, &nvp)) { 621 *vpp = NULLVP; 622 return (error); 623 } 624 ip = VTOI(nvp); 625 if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen) { 626 ufs_iput(ip); 627 *vpp = NULLVP; 628 return (ESTALE); 629 } 630 *vpp = nvp; 631 *exflagsp = np->neth_exflags; 632 *credanonp = &np->neth_anon; 633 return (0); 634 } 635 636 /* 637 * Vnode pointer to File handle 638 */ 639 /* ARGSUSED */ 640 lfs_vptofh(vp, fhp) 641 struct vnode *vp; 642 struct fid *fhp; 643 { 644 register struct inode *ip; 645 register struct ufid *ufhp; 646 647 ip = VTOI(vp); 648 ufhp = (struct ufid *)fhp; 649 ufhp->ufid_len = sizeof(struct ufid); 650 ufhp->ufid_ino = ip->i_number; 651 ufhp->ufid_gen = ip->i_gen; 652 return (0); 653 } 654