1 /* 2 * Copyright (c) 1989, 1991, 1993, 1994 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_vfsops.c 8.13 (Berkeley) 10/27/94 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/namei.h> 13 #include <sys/proc.h> 14 #include <sys/kernel.h> 15 #include <sys/vnode.h> 16 #include <sys/socket.h> 17 #include <sys/mount.h> 18 #include <sys/buf.h> 19 #include <sys/mbuf.h> 20 #include <sys/file.h> 21 #include <sys/disklabel.h> 22 #include <sys/ioctl.h> 23 #include <sys/errno.h> 24 #include <sys/malloc.h> 25 26 #include <miscfs/specfs/specdev.h> 27 28 #include <ufs/ufs/quota.h> 29 #include <ufs/ufs/ufsmount.h> 30 #include <ufs/ufs/inode.h> 31 #include <ufs/ufs/ufs_extern.h> 32 33 #include <ufs/ffs/fs.h> 34 #include <ufs/ffs/ffs_extern.h> 35 36 int ffs_sbupdate __P((struct ufsmount *, int)); 37 38 struct vfsops ufs_vfsops = { 39 ffs_mount, 40 ufs_start, 41 ffs_unmount, 42 ufs_root, 43 ufs_quotactl, 44 ffs_statfs, 45 ffs_sync, 46 ffs_vget, 47 ffs_fhtovp, 48 ffs_vptofh, 49 ffs_init, 50 }; 51 52 extern u_long nextgennumber; 53 54 /* 55 * Called by main() when ufs is going to be mounted as root. 56 * 57 * Name is updated by mount(8) after booting. 58 */ 59 #define ROOTNAME "root_device" 60 61 ffs_mountroot() 62 { 63 extern struct vnode *rootvp; 64 register struct fs *fs; 65 register struct mount *mp; 66 struct proc *p = curproc; /* XXX */ 67 struct ufsmount *ump; 68 u_int size; 69 int error; 70 71 /* 72 * Get vnodes for swapdev and rootdev. 73 */ 74 if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp)) 75 panic("ffs_mountroot: can't setup bdevvp's"); 76 77 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 78 bzero((char *)mp, (u_long)sizeof(struct mount)); 79 mp->mnt_op = &ufs_vfsops; 80 mp->mnt_flag = MNT_RDONLY; 81 if (error = ffs_mountfs(rootvp, mp, p)) { 82 free(mp, M_MOUNT); 83 return (error); 84 } 85 if (error = vfs_lock(mp)) { 86 (void)ffs_unmount(mp, 0, p); 87 free(mp, M_MOUNT); 88 return (error); 89 } 90 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 91 mp->mnt_flag |= MNT_ROOTFS; 92 mp->mnt_vnodecovered = NULLVP; 93 ump = VFSTOUFS(mp); 94 fs = ump->um_fs; 95 bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); 96 fs->fs_fsmnt[0] = '/'; 97 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 98 MNAMELEN); 99 (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 100 &size); 101 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 102 (void)ffs_statfs(mp, &mp->mnt_stat, p); 103 vfs_unlock(mp); 104 inittodr(fs->fs_time); 105 return (0); 106 } 107 108 /* 109 * VFS Operations. 110 * 111 * mount system call 112 */ 113 int 114 ffs_mount(mp, path, data, ndp, p) 115 register struct mount *mp; 116 char *path; 117 caddr_t data; 118 struct nameidata *ndp; 119 struct proc *p; 120 { 121 struct vnode *devvp; 122 struct ufs_args args; 123 struct ufsmount *ump; 124 register struct fs *fs; 125 u_int size; 126 int error, flags; 127 mode_t accessmode; 128 129 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args))) 130 return (error); 131 /* 132 * If updating, check whether changing from read-only to 133 * read/write; if there is no device name, that's all we do. 134 */ 135 if (mp->mnt_flag & MNT_UPDATE) { 136 ump = VFSTOUFS(mp); 137 fs = ump->um_fs; 138 error = 0; 139 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 140 flags = WRITECLOSE; 141 if (mp->mnt_flag & MNT_FORCE) 142 flags |= FORCECLOSE; 143 if (vfs_busy(mp)) 144 return (EBUSY); 145 error = ffs_flushfiles(mp, flags, p); 146 vfs_unbusy(mp); 147 } 148 if (!error && (mp->mnt_flag & MNT_RELOAD)) 149 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p); 150 if (error) 151 return (error); 152 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) { 153 /* 154 * If upgrade to read-write by non-root, then verify 155 * that user has necessary permissions on the device. 156 */ 157 if (p->p_ucred->cr_uid != 0) { 158 devvp = ump->um_devvp; 159 VOP_LOCK(devvp); 160 if (error = VOP_ACCESS(devvp, VREAD | VWRITE, 161 p->p_ucred, p)) { 162 VOP_UNLOCK(devvp); 163 return (error); 164 } 165 VOP_UNLOCK(devvp); 166 } 167 fs->fs_ronly = 0; 168 } 169 if (args.fspec == 0) { 170 /* 171 * Process export requests. 172 */ 173 return (vfs_export(mp, &ump->um_export, &args.export)); 174 } 175 } 176 /* 177 * Not an update, or updating the name: look up the name 178 * and verify that it refers to a sensible block device. 179 */ 180 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); 181 if (error = namei(ndp)) 182 return (error); 183 devvp = ndp->ni_vp; 184 185 if (devvp->v_type != VBLK) { 186 vrele(devvp); 187 return (ENOTBLK); 188 } 189 if (major(devvp->v_rdev) >= nblkdev) { 190 vrele(devvp); 191 return (ENXIO); 192 } 193 /* 194 * If mount by non-root, then verify that user has necessary 195 * permissions on the device. 196 */ 197 if (p->p_ucred->cr_uid != 0) { 198 accessmode = VREAD; 199 if ((mp->mnt_flag & MNT_RDONLY) == 0) 200 accessmode |= VWRITE; 201 VOP_LOCK(devvp); 202 if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) { 203 vput(devvp); 204 return (error); 205 } 206 VOP_UNLOCK(devvp); 207 } 208 if ((mp->mnt_flag & MNT_UPDATE) == 0) 209 error = ffs_mountfs(devvp, mp, p); 210 else { 211 if (devvp != ump->um_devvp) 212 error = EINVAL; /* needs translation */ 213 else 214 vrele(devvp); 215 } 216 if (error) { 217 vrele(devvp); 218 return (error); 219 } 220 ump = VFSTOUFS(mp); 221 fs = ump->um_fs; 222 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); 223 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); 224 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 225 MNAMELEN); 226 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 227 &size); 228 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 229 (void)ffs_statfs(mp, &mp->mnt_stat, p); 230 return (0); 231 } 232 233 /* 234 * Reload all incore data for a filesystem (used after running fsck on 235 * the root filesystem and finding things to fix). The filesystem must 236 * be mounted read-only. 237 * 238 * Things to do to update the mount: 239 * 1) invalidate all cached meta-data. 240 * 2) re-read superblock from disk. 241 * 3) re-read summary information from disk. 242 * 4) invalidate all inactive vnodes. 243 * 5) invalidate all cached file data. 244 * 6) re-read inode data for all active vnodes. 245 */ 246 ffs_reload(mountp, cred, p) 247 register struct mount *mountp; 248 struct ucred *cred; 249 struct proc *p; 250 { 251 register struct vnode *vp, *nvp, *devvp; 252 struct inode *ip; 253 struct csum *space; 254 struct buf *bp; 255 struct fs *fs; 256 int i, blks, size, error; 257 258 if ((mountp->mnt_flag & MNT_RDONLY) == 0) 259 return (EINVAL); 260 /* 261 * Step 1: invalidate all cached meta-data. 262 */ 263 devvp = VFSTOUFS(mountp)->um_devvp; 264 if (vinvalbuf(devvp, 0, cred, p, 0, 0)) 265 panic("ffs_reload: dirty1"); 266 /* 267 * Step 2: re-read superblock from disk. 268 */ 269 if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) 270 return (error); 271 fs = (struct fs *)bp->b_data; 272 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 273 fs->fs_bsize < sizeof(struct fs)) { 274 brelse(bp); 275 return (EIO); /* XXX needs translation */ 276 } 277 fs = VFSTOUFS(mountp)->um_fs; 278 bcopy(&fs->fs_csp[0], &((struct fs *)bp->b_data)->fs_csp[0], 279 sizeof(fs->fs_csp)); 280 bcopy(bp->b_data, fs, (u_int)fs->fs_sbsize); 281 if (fs->fs_sbsize < SBSIZE) 282 bp->b_flags |= B_INVAL; 283 brelse(bp); 284 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 285 ffs_oldfscompat(fs); 286 /* 287 * Step 3: re-read summary information from disk. 288 */ 289 blks = howmany(fs->fs_cssize, fs->fs_fsize); 290 space = fs->fs_csp[0]; 291 for (i = 0; i < blks; i += fs->fs_frag) { 292 size = fs->fs_bsize; 293 if (i + fs->fs_frag > blks) 294 size = (blks - i) * fs->fs_fsize; 295 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 296 NOCRED, &bp)) 297 return (error); 298 bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size); 299 brelse(bp); 300 } 301 loop: 302 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 303 nvp = vp->v_mntvnodes.le_next; 304 /* 305 * Step 4: invalidate all inactive vnodes. 306 */ 307 if (vp->v_usecount == 0) { 308 vgone(vp); 309 continue; 310 } 311 /* 312 * Step 5: invalidate all cached file data. 313 */ 314 if (vget(vp, 1)) 315 goto loop; 316 if (vinvalbuf(vp, 0, cred, p, 0, 0)) 317 panic("ffs_reload: dirty2"); 318 /* 319 * Step 6: re-read inode data for all active vnodes. 320 */ 321 ip = VTOI(vp); 322 if (error = 323 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 324 (int)fs->fs_bsize, NOCRED, &bp)) { 325 vput(vp); 326 return (error); 327 } 328 ip->i_din = *((struct dinode *)bp->b_data + 329 ino_to_fsbo(fs, ip->i_number)); 330 brelse(bp); 331 vput(vp); 332 if (vp->v_mount != mountp) 333 goto loop; 334 } 335 return (0); 336 } 337 338 /* 339 * Common code for mount and mountroot 340 */ 341 int 342 ffs_mountfs(devvp, mp, p) 343 register struct vnode *devvp; 344 struct mount *mp; 345 struct proc *p; 346 { 347 register struct ufsmount *ump; 348 struct buf *bp; 349 register struct fs *fs; 350 dev_t dev = devvp->v_rdev; 351 struct partinfo dpart; 352 caddr_t base, space; 353 int havepart = 0, blks; 354 int error, i, size, ronly; 355 int32_t *lp; 356 extern struct vnode *rootvp; 357 358 /* 359 * Disallow multiple mounts of the same device. 360 * Disallow mounting of a device that is currently in use 361 * (except for root, which might share swap device for miniroot). 362 * Flush out any old buffers remaining from a previous use. 363 */ 364 if (error = vfs_mountedon(devvp)) 365 return (error); 366 if (vcount(devvp) > 1 && devvp != rootvp) 367 return (EBUSY); 368 if (error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0)) 369 return (error); 370 371 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 372 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p)) 373 return (error); 374 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) 375 size = DEV_BSIZE; 376 else { 377 havepart = 1; 378 size = dpart.disklab->d_secsize; 379 } 380 381 bp = NULL; 382 ump = NULL; 383 if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) 384 goto out; 385 fs = (struct fs *)bp->b_data; 386 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 387 fs->fs_bsize < sizeof(struct fs)) { 388 error = EINVAL; /* XXX needs translation */ 389 goto out; 390 } 391 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK); 392 bzero((caddr_t)ump, sizeof *ump); 393 ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, 394 M_WAITOK); 395 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); 396 if (fs->fs_sbsize < SBSIZE) 397 bp->b_flags |= B_INVAL; 398 brelse(bp); 399 bp = NULL; 400 fs = ump->um_fs; 401 fs->fs_ronly = ronly; 402 if (ronly == 0) 403 fs->fs_fmod = 1; 404 blks = howmany(fs->fs_cssize, fs->fs_fsize); 405 size = fs->fs_cssize; 406 if (fs->fs_contigsumsize > 0) 407 size += fs->fs_ncg * sizeof(int32_t); 408 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK); 409 for (i = 0; i < blks; i += fs->fs_frag) { 410 size = fs->fs_bsize; 411 if (i + fs->fs_frag > blks) 412 size = (blks - i) * fs->fs_fsize; 413 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 414 NOCRED, &bp); 415 if (error) { 416 free(base, M_UFSMNT); 417 goto out; 418 } 419 bcopy(bp->b_data, space, (u_int)size); 420 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space; 421 space += size; 422 brelse(bp); 423 bp = NULL; 424 } 425 if (fs->fs_contigsumsize > 0) { 426 fs->fs_maxcluster = lp = (int32_t *)space; 427 for (i = 0; i < fs->fs_ncg; i++) 428 *lp++ = fs->fs_contigsumsize; 429 } 430 mp->mnt_data = (qaddr_t)ump; 431 mp->mnt_stat.f_fsid.val[0] = (long)dev; 432 mp->mnt_stat.f_fsid.val[1] = MOUNT_UFS; 433 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 434 mp->mnt_flag |= MNT_LOCAL; 435 ump->um_mountp = mp; 436 ump->um_dev = dev; 437 ump->um_devvp = devvp; 438 ump->um_nindir = fs->fs_nindir; 439 ump->um_bptrtodb = fs->fs_fsbtodb; 440 ump->um_seqinc = fs->fs_frag; 441 for (i = 0; i < MAXQUOTAS; i++) 442 ump->um_quotas[i] = NULLVP; 443 devvp->v_specflags |= SI_MOUNTEDON; 444 ffs_oldfscompat(fs); 445 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */ 446 if (fs->fs_maxfilesize > (quad_t)1 << 39) /* XXX */ 447 fs->fs_maxfilesize = (quad_t)1 << 39; /* XXX */ 448 return (0); 449 out: 450 if (bp) 451 brelse(bp); 452 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p); 453 if (ump) { 454 free(ump->um_fs, M_UFSMNT); 455 free(ump, M_UFSMNT); 456 mp->mnt_data = (qaddr_t)0; 457 } 458 return (error); 459 } 460 461 /* 462 * Sanity checks for old file systems. 463 * 464 * XXX - goes away some day. 465 */ 466 ffs_oldfscompat(fs) 467 struct fs *fs; 468 { 469 int i; 470 471 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */ 472 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */ 473 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 474 fs->fs_nrpos = 8; /* XXX */ 475 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 476 quad_t sizepb = fs->fs_bsize; /* XXX */ 477 /* XXX */ 478 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */ 479 for (i = 0; i < NIADDR; i++) { /* XXX */ 480 sizepb *= NINDIR(fs); /* XXX */ 481 fs->fs_maxfilesize += sizepb; /* XXX */ 482 } /* XXX */ 483 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */ 484 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */ 485 } /* XXX */ 486 return (0); 487 } 488 489 /* 490 * unmount system call 491 */ 492 int 493 ffs_unmount(mp, mntflags, p) 494 struct mount *mp; 495 int mntflags; 496 struct proc *p; 497 { 498 register struct ufsmount *ump; 499 register struct fs *fs; 500 int error, flags; 501 502 flags = 0; 503 if (mntflags & MNT_FORCE) { 504 if (mp->mnt_flag & MNT_ROOTFS) 505 return (EINVAL); 506 flags |= FORCECLOSE; 507 } 508 if (error = ffs_flushfiles(mp, flags, p)) 509 return (error); 510 ump = VFSTOUFS(mp); 511 fs = ump->um_fs; 512 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON; 513 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE, 514 NOCRED, p); 515 vrele(ump->um_devvp); 516 free(fs->fs_csp[0], M_UFSMNT); 517 free(fs, M_UFSMNT); 518 free(ump, M_UFSMNT); 519 mp->mnt_data = (qaddr_t)0; 520 mp->mnt_flag &= ~MNT_LOCAL; 521 return (error); 522 } 523 524 /* 525 * Flush out all the files in a filesystem. 526 */ 527 ffs_flushfiles(mp, flags, p) 528 register struct mount *mp; 529 int flags; 530 struct proc *p; 531 { 532 extern int doforce; 533 register struct ufsmount *ump; 534 int i, error; 535 536 if (!doforce) 537 flags &= ~FORCECLOSE; 538 ump = VFSTOUFS(mp); 539 #ifdef QUOTA 540 if (mp->mnt_flag & MNT_QUOTA) { 541 if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) 542 return (error); 543 for (i = 0; i < MAXQUOTAS; i++) { 544 if (ump->um_quotas[i] == NULLVP) 545 continue; 546 quotaoff(p, mp, i); 547 } 548 /* 549 * Here we fall through to vflush again to ensure 550 * that we have gotten rid of all the system vnodes. 551 */ 552 } 553 #endif 554 error = vflush(mp, NULLVP, flags); 555 return (error); 556 } 557 558 /* 559 * Get file system statistics. 560 */ 561 int 562 ffs_statfs(mp, sbp, p) 563 struct mount *mp; 564 register struct statfs *sbp; 565 struct proc *p; 566 { 567 register struct ufsmount *ump; 568 register struct fs *fs; 569 570 ump = VFSTOUFS(mp); 571 fs = ump->um_fs; 572 if (fs->fs_magic != FS_MAGIC) 573 panic("ffs_statfs"); 574 sbp->f_type = MOUNT_UFS; 575 sbp->f_bsize = fs->fs_fsize; 576 sbp->f_iosize = fs->fs_bsize; 577 sbp->f_blocks = fs->fs_dsize; 578 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 579 fs->fs_cstotal.cs_nffree; 580 sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) - 581 (fs->fs_dsize - sbp->f_bfree); 582 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 583 sbp->f_ffree = fs->fs_cstotal.cs_nifree; 584 if (sbp != &mp->mnt_stat) { 585 bcopy((caddr_t)mp->mnt_stat.f_mntonname, 586 (caddr_t)&sbp->f_mntonname[0], MNAMELEN); 587 bcopy((caddr_t)mp->mnt_stat.f_mntfromname, 588 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); 589 } 590 return (0); 591 } 592 593 /* 594 * Go through the disk queues to initiate sandbagged IO; 595 * go through the inodes to write those that have been modified; 596 * initiate the writing of the super block if it has been modified. 597 * 598 * Note: we are always called with the filesystem marked `MPBUSY'. 599 */ 600 int 601 ffs_sync(mp, waitfor, cred, p) 602 struct mount *mp; 603 int waitfor; 604 struct ucred *cred; 605 struct proc *p; 606 { 607 register struct vnode *vp; 608 register struct inode *ip; 609 register struct ufsmount *ump = VFSTOUFS(mp); 610 register struct fs *fs; 611 int error, allerror = 0; 612 613 fs = ump->um_fs; 614 /* 615 * Write back modified superblock. 616 * Consistency check that the superblock 617 * is still in the buffer cache. 618 */ 619 if (fs->fs_fmod != 0) { 620 if (fs->fs_ronly != 0) { /* XXX */ 621 printf("fs = %s\n", fs->fs_fsmnt); 622 panic("update: rofs mod"); 623 } 624 fs->fs_fmod = 0; 625 fs->fs_time = time.tv_sec; 626 allerror = ffs_sbupdate(ump, waitfor); 627 } 628 /* 629 * Write back each (modified) inode. 630 */ 631 loop: 632 for (vp = mp->mnt_vnodelist.lh_first; 633 vp != NULL; 634 vp = vp->v_mntvnodes.le_next) { 635 /* 636 * If the vnode that we are about to sync is no longer 637 * associated with this mount point, start over. 638 */ 639 if (vp->v_mount != mp) 640 goto loop; 641 if (VOP_ISLOCKED(vp)) 642 continue; 643 ip = VTOI(vp); 644 if ((ip->i_flag & 645 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 646 vp->v_dirtyblkhd.lh_first == NULL) 647 continue; 648 if (vget(vp, 1)) 649 goto loop; 650 if (error = VOP_FSYNC(vp, cred, waitfor, p)) 651 allerror = error; 652 vput(vp); 653 } 654 /* 655 * Force stale file system control information to be flushed. 656 */ 657 if (error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) 658 allerror = error; 659 #ifdef QUOTA 660 qsync(mp); 661 #endif 662 return (allerror); 663 } 664 665 /* 666 * Look up a FFS dinode number to find its incore vnode, otherwise read it 667 * in from disk. If it is in core, wait for the lock bit to clear, then 668 * return the inode locked. Detection and handling of mount points must be 669 * done by the calling routine. 670 */ 671 int 672 ffs_vget(mp, ino, vpp) 673 struct mount *mp; 674 ino_t ino; 675 struct vnode **vpp; 676 { 677 register struct fs *fs; 678 register struct inode *ip; 679 struct ufsmount *ump; 680 struct buf *bp; 681 struct vnode *vp; 682 dev_t dev; 683 int i, type, error; 684 685 ump = VFSTOUFS(mp); 686 dev = ump->um_dev; 687 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) 688 return (0); 689 690 /* Allocate a new vnode/inode. */ 691 if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) { 692 *vpp = NULL; 693 return (error); 694 } 695 type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */ 696 MALLOC(ip, struct inode *, sizeof(struct inode), type, M_WAITOK); 697 bzero((caddr_t)ip, sizeof(struct inode)); 698 vp->v_data = ip; 699 ip->i_vnode = vp; 700 ip->i_fs = fs = ump->um_fs; 701 ip->i_dev = dev; 702 ip->i_number = ino; 703 #ifdef QUOTA 704 for (i = 0; i < MAXQUOTAS; i++) 705 ip->i_dquot[i] = NODQUOT; 706 #endif 707 /* 708 * Put it onto its hash chain and lock it so that other requests for 709 * this inode will block if they arrive while we are sleeping waiting 710 * for old data structures to be purged or for the contents of the 711 * disk portion of this inode to be read. 712 */ 713 ufs_ihashins(ip); 714 715 /* Read in the disk contents for the inode, copy into the inode. */ 716 if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 717 (int)fs->fs_bsize, NOCRED, &bp)) { 718 /* 719 * The inode does not contain anything useful, so it would 720 * be misleading to leave it on its hash chain. With mode 721 * still zero, it will be unlinked and returned to the free 722 * list by vput(). 723 */ 724 vput(vp); 725 brelse(bp); 726 *vpp = NULL; 727 return (error); 728 } 729 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 730 brelse(bp); 731 732 /* 733 * Initialize the vnode from the inode, check for aliases. 734 * Note that the underlying vnode may have changed. 735 */ 736 if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) { 737 vput(vp); 738 *vpp = NULL; 739 return (error); 740 } 741 /* 742 * Finish inode initialization now that aliasing has been resolved. 743 */ 744 ip->i_devvp = ump->um_devvp; 745 VREF(ip->i_devvp); 746 /* 747 * Set up a generation number for this inode if it does not 748 * already have one. This should only happen on old filesystems. 749 */ 750 if (ip->i_gen == 0) { 751 if (++nextgennumber < (u_long)time.tv_sec) 752 nextgennumber = time.tv_sec; 753 ip->i_gen = nextgennumber; 754 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 755 ip->i_flag |= IN_MODIFIED; 756 } 757 /* 758 * Ensure that uid and gid are correct. This is a temporary 759 * fix until fsck has been changed to do the update. 760 */ 761 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 762 ip->i_uid = ip->i_din.di_ouid; /* XXX */ 763 ip->i_gid = ip->i_din.di_ogid; /* XXX */ 764 } /* XXX */ 765 766 *vpp = vp; 767 return (0); 768 } 769 770 /* 771 * File handle to vnode 772 * 773 * Have to be really careful about stale file handles: 774 * - check that the inode number is valid 775 * - call ffs_vget() to get the locked inode 776 * - check for an unallocated inode (i_mode == 0) 777 * - check that the given client host has export rights and return 778 * those rights via. exflagsp and credanonp 779 */ 780 int 781 ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) 782 register struct mount *mp; 783 struct fid *fhp; 784 struct mbuf *nam; 785 struct vnode **vpp; 786 int *exflagsp; 787 struct ucred **credanonp; 788 { 789 register struct ufid *ufhp; 790 struct fs *fs; 791 792 ufhp = (struct ufid *)fhp; 793 fs = VFSTOUFS(mp)->um_fs; 794 if (ufhp->ufid_ino < ROOTINO || 795 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) 796 return (ESTALE); 797 return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp)); 798 } 799 800 /* 801 * Vnode pointer to File handle 802 */ 803 /* ARGSUSED */ 804 ffs_vptofh(vp, fhp) 805 struct vnode *vp; 806 struct fid *fhp; 807 { 808 register struct inode *ip; 809 register struct ufid *ufhp; 810 811 ip = VTOI(vp); 812 ufhp = (struct ufid *)fhp; 813 ufhp->ufid_len = sizeof(struct ufid); 814 ufhp->ufid_ino = ip->i_number; 815 ufhp->ufid_gen = ip->i_gen; 816 return (0); 817 } 818 819 /* 820 * Write a superblock and associated information back to disk. 821 */ 822 int 823 ffs_sbupdate(mp, waitfor) 824 struct ufsmount *mp; 825 int waitfor; 826 { 827 register struct fs *dfs, *fs = mp->um_fs; 828 register struct buf *bp; 829 int blks; 830 caddr_t space; 831 int i, size, error = 0; 832 833 bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0); 834 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 835 /* Restore compatibility to old file systems. XXX */ 836 dfs = (struct fs *)bp->b_data; /* XXX */ 837 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 838 dfs->fs_nrpos = -1; /* XXX */ 839 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 840 long *lp, tmp; /* XXX */ 841 /* XXX */ 842 lp = (long *)&dfs->fs_qbmask; /* XXX */ 843 tmp = lp[4]; /* XXX */ 844 for (i = 4; i > 0; i--) /* XXX */ 845 lp[i] = lp[i-1]; /* XXX */ 846 lp[0] = tmp; /* XXX */ 847 } /* XXX */ 848 dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */ 849 if (waitfor == MNT_WAIT) 850 error = bwrite(bp); 851 else 852 bawrite(bp); 853 blks = howmany(fs->fs_cssize, fs->fs_fsize); 854 space = (caddr_t)fs->fs_csp[0]; 855 for (i = 0; i < blks; i += fs->fs_frag) { 856 size = fs->fs_bsize; 857 if (i + fs->fs_frag > blks) 858 size = (blks - i) * fs->fs_fsize; 859 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), 860 size, 0, 0); 861 bcopy(space, bp->b_data, (u_int)size); 862 space += size; 863 if (waitfor == MNT_WAIT) 864 error = bwrite(bp); 865 else 866 bawrite(bp); 867 } 868 return (error); 869 } 870