1 /* 2 * Copyright (c) 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_vfsops.c 8.4 (Berkeley) 12/30/93 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/namei.h> 13 #include <sys/proc.h> 14 #include <sys/kernel.h> 15 #include <sys/vnode.h> 16 #include <sys/socket.h> 17 #include <sys/mount.h> 18 #include <sys/buf.h> 19 #include <sys/mbuf.h> 20 #include <sys/file.h> 21 #include <sys/disklabel.h> 22 #include <sys/ioctl.h> 23 #include <sys/errno.h> 24 #include <sys/malloc.h> 25 26 #include <miscfs/specfs/specdev.h> 27 28 #include <ufs/ufs/quota.h> 29 #include <ufs/ufs/ufsmount.h> 30 #include <ufs/ufs/inode.h> 31 #include <ufs/ufs/ufs_extern.h> 32 33 #include <ufs/ffs/fs.h> 34 #include <ufs/ffs/ffs_extern.h> 35 36 int ffs_sbupdate __P((struct ufsmount *, int)); 37 38 struct vfsops ufs_vfsops = { 39 ffs_mount, 40 ufs_start, 41 ffs_unmount, 42 ffs_root, 43 ufs_quotactl, 44 ffs_statfs, 45 ffs_sync, 46 ffs_vget, 47 ffs_fhtovp, 48 ffs_vptofh, 49 ffs_init, 50 }; 51 52 extern u_long nextgennumber; 53 54 /* 55 * Called by main() when ufs is going to be mounted as root. 56 * 57 * Name is updated by mount(8) after booting. 58 */ 59 #define ROOTNAME "root_device" 60 61 ffs_mountroot() 62 { 63 extern struct vnode *rootvp; 64 register struct fs *fs; 65 register struct mount *mp; 66 struct proc *p = curproc; /* XXX */ 67 struct ufsmount *ump; 68 u_int size; 69 int error; 70 71 /* 72 * Get vnodes for swapdev and rootdev. 73 */ 74 if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp)) 75 panic("ffs_mountroot: can't setup bdevvp's"); 76 77 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 78 bzero((char *)mp, (u_long)sizeof(struct mount)); 79 mp->mnt_op = &ufs_vfsops; 80 mp->mnt_flag = MNT_RDONLY; 81 if (error = ffs_mountfs(rootvp, mp, p)) { 82 free(mp, M_MOUNT); 83 return (error); 84 } 85 if (error = vfs_lock(mp)) { 86 (void)ffs_unmount(mp, 0, p); 87 free(mp, M_MOUNT); 88 return (error); 89 } 90 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 91 mp->mnt_flag |= MNT_ROOTFS; 92 mp->mnt_vnodecovered = NULLVP; 93 ump = VFSTOUFS(mp); 94 fs = ump->um_fs; 95 bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); 96 fs->fs_fsmnt[0] = '/'; 97 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 98 MNAMELEN); 99 (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 100 &size); 101 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 102 (void)ffs_statfs(mp, &mp->mnt_stat, p); 103 vfs_unlock(mp); 104 inittodr(fs->fs_time); 105 return (0); 106 } 107 108 /* 109 * VFS Operations. 110 * 111 * mount system call 112 */ 113 int 114 ffs_mount(mp, path, data, ndp, p) 115 register struct mount *mp; 116 char *path; 117 caddr_t data; 118 struct nameidata *ndp; 119 struct proc *p; 120 { 121 struct vnode *devvp; 122 struct ufs_args args; 123 struct ufsmount *ump; 124 register struct fs *fs; 125 u_int size; 126 int error, flags; 127 128 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args))) 129 return (error); 130 /* 131 * If updating, check whether changing from read-only to 132 * read/write; if there is no device name, that's all we do. 133 */ 134 if (mp->mnt_flag & MNT_UPDATE) { 135 ump = VFSTOUFS(mp); 136 fs = ump->um_fs; 137 error = 0; 138 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 139 flags = WRITECLOSE; 140 if (mp->mnt_flag & MNT_FORCE) 141 flags |= FORCECLOSE; 142 if (vfs_busy(mp)) 143 return (EBUSY); 144 error = ffs_flushfiles(mp, flags, p); 145 vfs_unbusy(mp); 146 } 147 if (!error && (mp->mnt_flag & MNT_RELOAD)) 148 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p); 149 if (error) 150 return (error); 151 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) 152 fs->fs_ronly = 0; 153 if (args.fspec == 0) { 154 /* 155 * Process export requests. 156 */ 157 if (args.exflags & MNT_EXPORTED) { 158 if (error = ufs_hang_addrlist(mp, &args)) 159 return (error); 160 mp->mnt_flag |= MNT_EXPORTED; 161 } 162 if (args.exflags & MNT_DELEXPORT) { 163 ufs_free_addrlist(ump); 164 mp->mnt_flag &= 165 ~(MNT_EXPORTED | MNT_DEFEXPORTED); 166 } 167 return (0); 168 } 169 } 170 /* 171 * Not an update, or updating the name: look up the name 172 * and verify that it refers to a sensible block device. 173 */ 174 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); 175 if (error = namei(ndp)) 176 return (error); 177 devvp = ndp->ni_vp; 178 179 if (devvp->v_type != VBLK) { 180 vrele(devvp); 181 return (ENOTBLK); 182 } 183 if (major(devvp->v_rdev) >= nblkdev) { 184 vrele(devvp); 185 return (ENXIO); 186 } 187 if ((mp->mnt_flag & MNT_UPDATE) == 0) 188 error = ffs_mountfs(devvp, mp, p); 189 else { 190 if (devvp != ump->um_devvp) 191 error = EINVAL; /* needs translation */ 192 else 193 vrele(devvp); 194 } 195 if (error) { 196 vrele(devvp); 197 return (error); 198 } 199 ump = VFSTOUFS(mp); 200 fs = ump->um_fs; 201 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); 202 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); 203 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 204 MNAMELEN); 205 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 206 &size); 207 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 208 (void)ffs_statfs(mp, &mp->mnt_stat, p); 209 return (0); 210 } 211 212 /* 213 * Reload all incore data for a filesystem (used after running fsck on 214 * the root filesystem and finding things to fix). The filesystem must 215 * be mounted read-only. 216 * 217 * Things to do to update the mount: 218 * 1) invalidate all cached meta-data. 219 * 2) re-read superblock from disk. 220 * 3) re-read summary information from disk. 221 * 4) invalidate all inactive vnodes. 222 * 5) invalidate all cached file data. 223 * 6) re-read inode data for all active vnodes. 224 */ 225 ffs_reload(mountp, cred, p) 226 register struct mount *mountp; 227 struct ucred *cred; 228 struct proc *p; 229 { 230 register struct vnode *vp, *nvp, *devvp; 231 struct inode *ip; 232 struct csum *space; 233 struct buf *bp; 234 struct fs *fs; 235 int i, blks, size, error; 236 237 if ((mountp->mnt_flag & MNT_RDONLY) == 0) 238 return (EINVAL); 239 /* 240 * Step 1: invalidate all cached meta-data. 241 */ 242 devvp = VFSTOUFS(mountp)->um_devvp; 243 if (vinvalbuf(devvp, 0, cred, p, 0, 0)) 244 panic("ffs_reload: dirty1"); 245 /* 246 * Step 2: re-read superblock from disk. 247 */ 248 if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) 249 return (error); 250 fs = (struct fs *)bp->b_data; 251 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 252 fs->fs_bsize < sizeof(struct fs)) { 253 brelse(bp); 254 return (EIO); /* XXX needs translation */ 255 } 256 fs = VFSTOUFS(mountp)->um_fs; 257 bcopy(&fs->fs_csp[0], &((struct fs *)bp->b_data)->fs_csp[0], 258 sizeof(fs->fs_csp)); 259 bcopy(bp->b_data, fs, (u_int)fs->fs_sbsize); 260 if (fs->fs_sbsize < SBSIZE) 261 bp->b_flags |= B_INVAL; 262 brelse(bp); 263 ffs_oldfscompat(fs); 264 /* 265 * Step 3: re-read summary information from disk. 266 */ 267 blks = howmany(fs->fs_cssize, fs->fs_fsize); 268 space = fs->fs_csp[0]; 269 for (i = 0; i < blks; i += fs->fs_frag) { 270 size = fs->fs_bsize; 271 if (i + fs->fs_frag > blks) 272 size = (blks - i) * fs->fs_fsize; 273 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 274 NOCRED, &bp)) 275 return (error); 276 bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size); 277 brelse(bp); 278 } 279 loop: 280 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 281 nvp = vp->v_mntvnodes.le_next; 282 /* 283 * Step 4: invalidate all inactive vnodes. 284 */ 285 if (vp->v_usecount == 0) { 286 vgone(vp); 287 continue; 288 } 289 /* 290 * Step 5: invalidate all cached file data. 291 */ 292 if (vget(vp, 1)) 293 goto loop; 294 if (vinvalbuf(vp, 0, cred, p, 0, 0)) 295 panic("ffs_reload: dirty2"); 296 /* 297 * Step 6: re-read inode data for all active vnodes. 298 */ 299 ip = VTOI(vp); 300 if (error = 301 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 302 (int)fs->fs_bsize, NOCRED, &bp)) { 303 vput(vp); 304 return (error); 305 } 306 ip->i_din = *((struct dinode *)bp->b_data + 307 ino_to_fsbo(fs, ip->i_number)); 308 brelse(bp); 309 vput(vp); 310 if (vp->v_mount != mountp) 311 goto loop; 312 } 313 return (0); 314 } 315 316 /* 317 * Common code for mount and mountroot 318 */ 319 int 320 ffs_mountfs(devvp, mp, p) 321 register struct vnode *devvp; 322 struct mount *mp; 323 struct proc *p; 324 { 325 register struct ufsmount *ump; 326 struct buf *bp; 327 register struct fs *fs; 328 dev_t dev = devvp->v_rdev; 329 struct partinfo dpart; 330 caddr_t base, space; 331 int havepart = 0, blks; 332 int error, i, size; 333 int ronly; 334 extern struct vnode *rootvp; 335 336 /* 337 * Disallow multiple mounts of the same device. 338 * Disallow mounting of a device that is currently in use 339 * (except for root, which might share swap device for miniroot). 340 * Flush out any old buffers remaining from a previous use. 341 */ 342 if (error = ufs_mountedon(devvp)) 343 return (error); 344 if (vcount(devvp) > 1 && devvp != rootvp) 345 return (EBUSY); 346 if (error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0)) 347 return (error); 348 349 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 350 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p)) 351 return (error); 352 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) 353 size = DEV_BSIZE; 354 else { 355 havepart = 1; 356 size = dpart.disklab->d_secsize; 357 } 358 359 bp = NULL; 360 ump = NULL; 361 if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) 362 goto out; 363 fs = (struct fs *)bp->b_data; 364 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 365 fs->fs_bsize < sizeof(struct fs)) { 366 error = EINVAL; /* XXX needs translation */ 367 goto out; 368 } 369 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK); 370 bzero((caddr_t)ump, sizeof *ump); 371 ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, 372 M_WAITOK); 373 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); 374 if (fs->fs_sbsize < SBSIZE) 375 bp->b_flags |= B_INVAL; 376 brelse(bp); 377 bp = NULL; 378 fs = ump->um_fs; 379 fs->fs_ronly = ronly; 380 if (ronly == 0) 381 fs->fs_fmod = 1; 382 if (havepart) { 383 dpart.part->p_fstype = FS_BSDFFS; 384 dpart.part->p_fsize = fs->fs_fsize; 385 dpart.part->p_frag = fs->fs_frag; 386 dpart.part->p_cpg = fs->fs_cpg; 387 } 388 blks = howmany(fs->fs_cssize, fs->fs_fsize); 389 base = space = malloc((u_long)fs->fs_cssize, M_UFSMNT, 390 M_WAITOK); 391 for (i = 0; i < blks; i += fs->fs_frag) { 392 size = fs->fs_bsize; 393 if (i + fs->fs_frag > blks) 394 size = (blks - i) * fs->fs_fsize; 395 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 396 NOCRED, &bp); 397 if (error) { 398 free(base, M_UFSMNT); 399 goto out; 400 } 401 bcopy(bp->b_data, space, (u_int)size); 402 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space; 403 space += size; 404 brelse(bp); 405 bp = NULL; 406 } 407 mp->mnt_data = (qaddr_t)ump; 408 mp->mnt_stat.f_fsid.val[0] = (long)dev; 409 mp->mnt_stat.f_fsid.val[1] = MOUNT_UFS; 410 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 411 mp->mnt_flag |= MNT_LOCAL; 412 ump->um_mountp = mp; 413 ump->um_dev = dev; 414 ump->um_devvp = devvp; 415 ump->um_nindir = fs->fs_nindir; 416 ump->um_bptrtodb = fs->fs_fsbtodb; 417 ump->um_seqinc = fs->fs_frag; 418 for (i = 0; i < MAXQUOTAS; i++) 419 ump->um_quotas[i] = NULLVP; 420 devvp->v_specflags |= SI_MOUNTEDON; 421 ffs_oldfscompat(fs); 422 return (0); 423 out: 424 if (bp) 425 brelse(bp); 426 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p); 427 if (ump) { 428 free(ump->um_fs, M_UFSMNT); 429 free(ump, M_UFSMNT); 430 mp->mnt_data = (qaddr_t)0; 431 } 432 return (error); 433 } 434 435 /* 436 * Sanity checks for old file systems. 437 * 438 * XXX - goes away some day. 439 */ 440 ffs_oldfscompat(fs) 441 struct fs *fs; 442 { 443 int i; 444 445 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */ 446 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */ 447 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 448 fs->fs_nrpos = 8; /* XXX */ 449 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 450 quad_t sizepb = fs->fs_bsize; /* XXX */ 451 /* XXX */ 452 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */ 453 for (i = 0; i < NIADDR; i++) { /* XXX */ 454 sizepb *= NINDIR(fs); /* XXX */ 455 fs->fs_maxfilesize += sizepb; /* XXX */ 456 } /* XXX */ 457 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */ 458 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */ 459 } /* XXX */ 460 return (0); 461 } 462 463 /* 464 * unmount system call 465 */ 466 int 467 ffs_unmount(mp, mntflags, p) 468 struct mount *mp; 469 int mntflags; 470 struct proc *p; 471 { 472 register struct ufsmount *ump; 473 register struct fs *fs; 474 int error, flags, ronly; 475 476 flags = 0; 477 if (mntflags & MNT_FORCE) { 478 if (mp->mnt_flag & MNT_ROOTFS) 479 return (EINVAL); 480 flags |= FORCECLOSE; 481 } 482 if (error = ffs_flushfiles(mp, flags, p)) 483 return (error); 484 ump = VFSTOUFS(mp); 485 fs = ump->um_fs; 486 ronly = !fs->fs_ronly; 487 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON; 488 error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE, 489 NOCRED, p); 490 vrele(ump->um_devvp); 491 free(fs->fs_csp[0], M_UFSMNT); 492 free(fs, M_UFSMNT); 493 free(ump, M_UFSMNT); 494 mp->mnt_data = (qaddr_t)0; 495 mp->mnt_flag &= ~MNT_LOCAL; 496 return (error); 497 } 498 499 /* 500 * Flush out all the files in a filesystem. 501 */ 502 ffs_flushfiles(mp, flags, p) 503 register struct mount *mp; 504 int flags; 505 struct proc *p; 506 { 507 extern int doforce; 508 register struct ufsmount *ump; 509 int i, error; 510 511 if (!doforce) 512 flags &= ~FORCECLOSE; 513 ump = VFSTOUFS(mp); 514 #ifdef QUOTA 515 if (mp->mnt_flag & MNT_QUOTA) { 516 if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) 517 return (error); 518 for (i = 0; i < MAXQUOTAS; i++) { 519 if (ump->um_quotas[i] == NULLVP) 520 continue; 521 quotaoff(p, mp, i); 522 } 523 /* 524 * Here we fall through to vflush again to ensure 525 * that we have gotten rid of all the system vnodes. 526 */ 527 } 528 #endif 529 error = vflush(mp, NULLVP, flags); 530 return (error); 531 } 532 533 /* 534 * Return root of a filesystem 535 */ 536 int 537 ffs_root(mp, vpp) 538 struct mount *mp; 539 struct vnode **vpp; 540 { 541 struct vnode *nvp; 542 int error; 543 544 if (error = VFS_VGET(mp, (ino_t)ROOTINO, &nvp)) 545 return (error); 546 *vpp = nvp; 547 return (0); 548 } 549 550 /* 551 * Get file system statistics. 552 */ 553 int 554 ffs_statfs(mp, sbp, p) 555 struct mount *mp; 556 register struct statfs *sbp; 557 struct proc *p; 558 { 559 register struct ufsmount *ump; 560 register struct fs *fs; 561 562 ump = VFSTOUFS(mp); 563 fs = ump->um_fs; 564 if (fs->fs_magic != FS_MAGIC) 565 panic("ffs_statfs"); 566 sbp->f_type = MOUNT_UFS; 567 sbp->f_bsize = fs->fs_fsize; 568 sbp->f_iosize = fs->fs_bsize; 569 sbp->f_blocks = fs->fs_dsize; 570 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 571 fs->fs_cstotal.cs_nffree; 572 sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) - 573 (fs->fs_dsize - sbp->f_bfree); 574 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 575 sbp->f_ffree = fs->fs_cstotal.cs_nifree; 576 if (sbp != &mp->mnt_stat) { 577 bcopy((caddr_t)mp->mnt_stat.f_mntonname, 578 (caddr_t)&sbp->f_mntonname[0], MNAMELEN); 579 bcopy((caddr_t)mp->mnt_stat.f_mntfromname, 580 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); 581 } 582 return (0); 583 } 584 585 /* 586 * Go through the disk queues to initiate sandbagged IO; 587 * go through the inodes to write those that have been modified; 588 * initiate the writing of the super block if it has been modified. 589 * 590 * Note: we are always called with the filesystem marked `MPBUSY'. 591 */ 592 int 593 ffs_sync(mp, waitfor, cred, p) 594 struct mount *mp; 595 int waitfor; 596 struct ucred *cred; 597 struct proc *p; 598 { 599 register struct vnode *vp; 600 register struct inode *ip; 601 register struct ufsmount *ump = VFSTOUFS(mp); 602 register struct fs *fs; 603 int error, allerror = 0; 604 605 fs = ump->um_fs; 606 /* 607 * Write back modified superblock. 608 * Consistency check that the superblock 609 * is still in the buffer cache. 610 */ 611 if (fs->fs_fmod != 0) { 612 if (fs->fs_ronly != 0) { /* XXX */ 613 printf("fs = %s\n", fs->fs_fsmnt); 614 panic("update: rofs mod"); 615 } 616 fs->fs_fmod = 0; 617 fs->fs_time = time.tv_sec; 618 allerror = ffs_sbupdate(ump, waitfor); 619 } 620 /* 621 * Write back each (modified) inode. 622 */ 623 loop: 624 for (vp = mp->mnt_vnodelist.lh_first; 625 vp != NULL; 626 vp = vp->v_mntvnodes.le_next) { 627 /* 628 * If the vnode that we are about to sync is no longer 629 * associated with this mount point, start over. 630 */ 631 if (vp->v_mount != mp) 632 goto loop; 633 if (VOP_ISLOCKED(vp)) 634 continue; 635 ip = VTOI(vp); 636 if ((ip->i_flag & 637 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 638 vp->v_dirtyblkhd.lh_first == NULL) 639 continue; 640 if (vget(vp, 1)) 641 goto loop; 642 if (error = VOP_FSYNC(vp, cred, waitfor, p)) 643 allerror = error; 644 vput(vp); 645 } 646 /* 647 * Force stale file system control information to be flushed. 648 */ 649 if (error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) 650 allerror = error; 651 #ifdef QUOTA 652 qsync(mp); 653 #endif 654 return (allerror); 655 } 656 657 /* 658 * Look up a FFS dinode number to find its incore vnode, otherwise read it 659 * in from disk. If it is in core, wait for the lock bit to clear, then 660 * return the inode locked. Detection and handling of mount points must be 661 * done by the calling routine. 662 */ 663 int 664 ffs_vget(mp, ino, vpp) 665 struct mount *mp; 666 ino_t ino; 667 struct vnode **vpp; 668 { 669 register struct fs *fs; 670 register struct inode *ip; 671 struct ufsmount *ump; 672 struct buf *bp; 673 struct vnode *vp; 674 union ihead *ih; 675 dev_t dev; 676 int i, type, error; 677 678 ump = VFSTOUFS(mp); 679 dev = ump->um_dev; 680 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) 681 return (0); 682 683 /* Allocate a new vnode/inode. */ 684 if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) { 685 *vpp = NULL; 686 return (error); 687 } 688 type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */ 689 MALLOC(ip, struct inode *, sizeof(struct inode), type, M_WAITOK); 690 bzero((caddr_t)ip, sizeof(struct inode)); 691 vp->v_data = ip; 692 ip->i_vnode = vp; 693 ip->i_fs = fs = ump->um_fs; 694 ip->i_dev = dev; 695 ip->i_number = ino; 696 #ifdef QUOTA 697 for (i = 0; i < MAXQUOTAS; i++) 698 ip->i_dquot[i] = NODQUOT; 699 #endif 700 /* 701 * Put it onto its hash chain and lock it so that other requests for 702 * this inode will block if they arrive while we are sleeping waiting 703 * for old data structures to be purged or for the contents of the 704 * disk portion of this inode to be read. 705 */ 706 ufs_ihashins(ip); 707 708 /* Read in the disk contents for the inode, copy into the inode. */ 709 if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 710 (int)fs->fs_bsize, NOCRED, &bp)) { 711 /* 712 * The inode does not contain anything useful, so it would 713 * be misleading to leave it on its hash chain. With mode 714 * still zero, it will be unlinked and returned to the free 715 * list by vput(). 716 */ 717 vput(vp); 718 brelse(bp); 719 *vpp = NULL; 720 return (error); 721 } 722 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 723 brelse(bp); 724 725 /* 726 * Initialize the vnode from the inode, check for aliases. 727 * Note that the underlying vnode may have changed. 728 */ 729 if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) { 730 vput(vp); 731 *vpp = NULL; 732 return (error); 733 } 734 /* 735 * Finish inode initialization now that aliasing has been resolved. 736 */ 737 ip->i_devvp = ump->um_devvp; 738 VREF(ip->i_devvp); 739 /* 740 * Set up a generation number for this inode if it does not 741 * already have one. This should only happen on old filesystems. 742 */ 743 if (ip->i_gen == 0) { 744 if (++nextgennumber < (u_long)time.tv_sec) 745 nextgennumber = time.tv_sec; 746 ip->i_gen = nextgennumber; 747 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 748 ip->i_flag |= IN_MODIFIED; 749 } 750 /* 751 * Ensure that uid and gid are correct. This is a temporary 752 * fix until fsck has been changed to do the update. 753 */ 754 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 755 ip->i_uid = ip->i_din.di_ouid; /* XXX */ 756 ip->i_gid = ip->i_din.di_ogid; /* XXX */ 757 } /* XXX */ 758 759 *vpp = vp; 760 return (0); 761 } 762 763 /* 764 * File handle to vnode 765 * 766 * Have to be really careful about stale file handles: 767 * - check that the inode number is valid 768 * - call ffs_vget() to get the locked inode 769 * - check for an unallocated inode (i_mode == 0) 770 * - check that the given client host has export rights and return 771 * those rights via. exflagsp and credanonp 772 */ 773 int 774 ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) 775 register struct mount *mp; 776 struct fid *fhp; 777 struct mbuf *nam; 778 struct vnode **vpp; 779 int *exflagsp; 780 struct ucred **credanonp; 781 { 782 register struct ufid *ufhp; 783 struct fs *fs; 784 785 ufhp = (struct ufid *)fhp; 786 fs = VFSTOUFS(mp)->um_fs; 787 if (ufhp->ufid_ino < ROOTINO || 788 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) 789 return (ESTALE); 790 return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp)); 791 } 792 793 /* 794 * Vnode pointer to File handle 795 */ 796 /* ARGSUSED */ 797 ffs_vptofh(vp, fhp) 798 struct vnode *vp; 799 struct fid *fhp; 800 { 801 register struct inode *ip; 802 register struct ufid *ufhp; 803 804 ip = VTOI(vp); 805 ufhp = (struct ufid *)fhp; 806 ufhp->ufid_len = sizeof(struct ufid); 807 ufhp->ufid_ino = ip->i_number; 808 ufhp->ufid_gen = ip->i_gen; 809 return (0); 810 } 811 812 /* 813 * Write a superblock and associated information back to disk. 814 */ 815 int 816 ffs_sbupdate(mp, waitfor) 817 struct ufsmount *mp; 818 int waitfor; 819 { 820 register struct fs *fs = mp->um_fs; 821 register struct buf *bp; 822 int blks; 823 caddr_t space; 824 int i, size, error = 0; 825 826 bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0); 827 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 828 /* Restore compatibility to old file systems. XXX */ 829 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 830 ((struct fs *)bp->b_data)->fs_nrpos = -1; /* XXX */ 831 if (waitfor == MNT_WAIT) 832 error = bwrite(bp); 833 else 834 bawrite(bp); 835 blks = howmany(fs->fs_cssize, fs->fs_fsize); 836 space = (caddr_t)fs->fs_csp[0]; 837 for (i = 0; i < blks; i += fs->fs_frag) { 838 size = fs->fs_bsize; 839 if (i + fs->fs_frag > blks) 840 size = (blks - i) * fs->fs_fsize; 841 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), 842 size, 0, 0); 843 bcopy(space, bp->b_data, (u_int)size); 844 space += size; 845 if (waitfor == MNT_WAIT) 846 error = bwrite(bp); 847 else 848 bawrite(bp); 849 } 850 return (error); 851 } 852