1 /* 2 * Copyright (c) 1989, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_vfsops.c 7.75 (Berkeley) 07/20/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/namei.h> 13 #include <sys/proc.h> 14 #include <sys/kernel.h> 15 #include <sys/vnode.h> 16 #include <sys/mount.h> 17 #include <sys/buf.h> 18 #include <sys/mbuf.h> 19 #include <sys/file.h> 20 #include <sys/disklabel.h> 21 #include <sys/ioctl.h> 22 #include <sys/errno.h> 23 #include <sys/malloc.h> 24 #include <sys/socket.h> 25 26 #include <miscfs/specfs/specdev.h> 27 28 #include <ufs/ufs/quota.h> 29 #include <ufs/ufs/ufsmount.h> 30 #include <ufs/ufs/inode.h> 31 #include <ufs/ufs/ufs_extern.h> 32 33 #include <ufs/ffs/fs.h> 34 #include <ufs/ffs/ffs_extern.h> 35 36 int ffs_sbupdate __P((struct ufsmount *, int)); 37 38 struct vfsops ufs_vfsops = { 39 ffs_mount, 40 ufs_start, 41 ffs_unmount, 42 ffs_root, 43 ufs_quotactl, 44 ffs_statfs, 45 ffs_sync, 46 ffs_vget, 47 ffs_fhtovp, 48 ffs_vptofh, 49 ffs_init, 50 }; 51 52 extern u_long nextgennumber; 53 54 /* 55 * Called by main() when ufs is going to be mounted as root. 56 * 57 * Name is updated by mount(8) after booting. 58 */ 59 #define ROOTNAME "root_device" 60 61 ffs_mountroot() 62 { 63 extern struct vnode *rootvp; 64 register struct fs *fs; 65 register struct mount *mp; 66 struct proc *p = curproc; /* XXX */ 67 struct ufsmount *ump; 68 u_int size; 69 int error; 70 71 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 72 bzero((char *)mp, (u_long)sizeof(struct mount)); 73 mp->mnt_op = &ufs_vfsops; 74 mp->mnt_flag = MNT_RDONLY; 75 if (error = ffs_mountfs(rootvp, mp, p)) { 76 free(mp, M_MOUNT); 77 return (error); 78 } 79 if (error = vfs_lock(mp)) { 80 (void)ffs_unmount(mp, 0, p); 81 free(mp, M_MOUNT); 82 return (error); 83 } 84 rootfs = mp; 85 mp->mnt_next = mp; 86 mp->mnt_prev = mp; 87 mp->mnt_vnodecovered = NULLVP; 88 ump = VFSTOUFS(mp); 89 fs = ump->um_fs; 90 bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); 91 fs->fs_fsmnt[0] = '/'; 92 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 93 MNAMELEN); 94 (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 95 &size); 96 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 97 (void)ffs_statfs(mp, &mp->mnt_stat, p); 98 vfs_unlock(mp); 99 inittodr(fs->fs_time); 100 return (0); 101 } 102 103 /* 104 * VFS Operations. 105 * 106 * mount system call 107 */ 108 int 109 ffs_mount(mp, path, data, ndp, p) 110 register struct mount *mp; 111 char *path; 112 caddr_t data; 113 struct nameidata *ndp; 114 struct proc *p; 115 { 116 struct vnode *devvp; 117 struct ufs_args args; 118 struct ufsmount *ump; 119 register struct fs *fs; 120 u_int size; 121 int error; 122 123 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args))) 124 return (error); 125 /* 126 * If updating, check whether changing from read-only to 127 * read/write; if there is no device name, that's all we do. 128 */ 129 if (mp->mnt_flag & MNT_UPDATE) { 130 ump = VFSTOUFS(mp); 131 fs = ump->um_fs; 132 if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0) 133 fs->fs_ronly = 0; 134 if (args.fspec == 0) { 135 /* 136 * Process export requests. 137 */ 138 if (args.exflags & MNT_EXPORTED) { 139 if (error = ufs_hang_addrlist(mp, &args)) 140 return (error); 141 mp->mnt_flag |= MNT_EXPORTED; 142 } 143 if (args.exflags & MNT_DELEXPORT) { 144 ufs_free_addrlist(ump); 145 mp->mnt_flag &= 146 ~(MNT_EXPORTED | MNT_DEFEXPORTED); 147 } 148 return (0); 149 } 150 } 151 /* 152 * Not an update, or updating the name: look up the name 153 * and verify that it refers to a sensible block device. 154 */ 155 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); 156 if (error = namei(ndp)) 157 return (error); 158 devvp = ndp->ni_vp; 159 160 if (devvp->v_type != VBLK) { 161 vrele(devvp); 162 return (ENOTBLK); 163 } 164 if (major(devvp->v_rdev) >= nblkdev) { 165 vrele(devvp); 166 return (ENXIO); 167 } 168 if ((mp->mnt_flag & MNT_UPDATE) == 0) 169 error = ffs_mountfs(devvp, mp, p); 170 else { 171 if (devvp != ump->um_devvp) 172 error = EINVAL; /* needs translation */ 173 else 174 vrele(devvp); 175 } 176 if (error) { 177 vrele(devvp); 178 return (error); 179 } 180 ump = VFSTOUFS(mp); 181 fs = ump->um_fs; 182 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); 183 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); 184 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 185 MNAMELEN); 186 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 187 &size); 188 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 189 (void)ffs_statfs(mp, &mp->mnt_stat, p); 190 return (0); 191 } 192 193 /* 194 * Common code for mount and mountroot 195 */ 196 int 197 ffs_mountfs(devvp, mp, p) 198 register struct vnode *devvp; 199 struct mount *mp; 200 struct proc *p; 201 { 202 register struct ufsmount *ump; 203 struct buf *bp; 204 register struct fs *fs; 205 dev_t dev = devvp->v_rdev; 206 struct partinfo dpart; 207 caddr_t base, space; 208 int havepart = 0, blks; 209 int error, i, size; 210 int ronly; 211 extern struct vnode *rootvp; 212 213 /* 214 * Disallow multiple mounts of the same device. 215 * Disallow mounting of a device that is currently in use 216 * (except for root, which might share swap device for miniroot). 217 * Flush out any old buffers remaining from a previous use. 218 */ 219 if (error = ufs_mountedon(devvp)) 220 return (error); 221 if (vcount(devvp) > 1 && devvp != rootvp) 222 return (EBUSY); 223 if (error = vinvalbuf(devvp, 1, p->p_ucred, p)) 224 return (error); 225 226 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 227 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p)) 228 return (error); 229 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) 230 size = DEV_BSIZE; 231 else { 232 havepart = 1; 233 size = dpart.disklab->d_secsize; 234 } 235 236 bp = NULL; 237 ump = NULL; 238 if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) 239 goto out; 240 fs = bp->b_un.b_fs; 241 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 242 fs->fs_bsize < sizeof(struct fs)) { 243 error = EINVAL; /* XXX needs translation */ 244 goto out; 245 } 246 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK); 247 bzero((caddr_t)ump, sizeof *ump); 248 ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, 249 M_WAITOK); 250 bcopy((caddr_t)bp->b_un.b_addr, (caddr_t)ump->um_fs, 251 (u_int)fs->fs_sbsize); 252 if (fs->fs_sbsize < SBSIZE) 253 bp->b_flags |= B_INVAL; 254 brelse(bp); 255 bp = NULL; 256 fs = ump->um_fs; 257 fs->fs_ronly = ronly; 258 if (ronly == 0) 259 fs->fs_fmod = 1; 260 if (havepart) { 261 dpart.part->p_fstype = FS_BSDFFS; 262 dpart.part->p_fsize = fs->fs_fsize; 263 dpart.part->p_frag = fs->fs_frag; 264 dpart.part->p_cpg = fs->fs_cpg; 265 } 266 blks = howmany(fs->fs_cssize, fs->fs_fsize); 267 base = space = malloc((u_long)fs->fs_cssize, M_UFSMNT, 268 M_WAITOK); 269 for (i = 0; i < blks; i += fs->fs_frag) { 270 size = fs->fs_bsize; 271 if (i + fs->fs_frag > blks) 272 size = (blks - i) * fs->fs_fsize; 273 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 274 NOCRED, &bp); 275 if (error) { 276 free(base, M_UFSMNT); 277 goto out; 278 } 279 bcopy((caddr_t)bp->b_un.b_addr, space, (u_int)size); 280 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space; 281 space += size; 282 brelse(bp); 283 bp = NULL; 284 } 285 mp->mnt_data = (qaddr_t)ump; 286 mp->mnt_stat.f_fsid.val[0] = (long)dev; 287 mp->mnt_stat.f_fsid.val[1] = MOUNT_UFS; 288 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 289 mp->mnt_flag |= MNT_LOCAL; 290 ump->um_mountp = mp; 291 ump->um_dev = dev; 292 ump->um_devvp = devvp; 293 for (i = 0; i < MAXQUOTAS; i++) 294 ump->um_quotas[i] = NULLVP; 295 devvp->v_specflags |= SI_MOUNTEDON; 296 297 /* Sanity checks for old file systems. XXX */ 298 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */ 299 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */ 300 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 301 fs->fs_nrpos = 8; /* XXX */ 302 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 303 quad_t sizepb = fs->fs_bsize; /* XXX */ 304 /* XXX */ 305 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */ 306 for (i = 0; i < NIADDR; i++) { /* XXX */ 307 sizepb *= NINDIR(fs); /* XXX */ 308 fs->fs_maxfilesize += sizepb; /* XXX */ 309 } /* XXX */ 310 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */ 311 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */ 312 } /* XXX */ 313 return (0); 314 out: 315 if (bp) 316 brelse(bp); 317 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p); 318 if (ump) { 319 free(ump->um_fs, M_UFSMNT); 320 free(ump, M_UFSMNT); 321 mp->mnt_data = (qaddr_t)0; 322 } 323 return (error); 324 } 325 326 /* 327 * unmount system call 328 */ 329 int 330 ffs_unmount(mp, mntflags, p) 331 struct mount *mp; 332 int mntflags; 333 struct proc *p; 334 { 335 extern int doforce; 336 register struct ufsmount *ump; 337 register struct fs *fs; 338 int i, error, flags, ronly; 339 340 flags = 0; 341 if (mntflags & MNT_FORCE) { 342 if (!doforce || mp == rootfs) 343 return (EINVAL); 344 flags |= FORCECLOSE; 345 } 346 ump = VFSTOUFS(mp); 347 #ifdef QUOTA 348 if (mp->mnt_flag & MNT_QUOTA) { 349 if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) 350 return (error); 351 for (i = 0; i < MAXQUOTAS; i++) { 352 if (ump->um_quotas[i] == NULLVP) 353 continue; 354 quotaoff(p, mp, i); 355 } 356 /* 357 * Here we fall through to vflush again to ensure 358 * that we have gotten rid of all the system vnodes. 359 */ 360 } 361 #endif 362 if (error = vflush(mp, NULLVP, flags)) 363 return (error); 364 fs = ump->um_fs; 365 ronly = !fs->fs_ronly; 366 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON; 367 error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE, 368 NOCRED, p); 369 vrele(ump->um_devvp); 370 free(fs->fs_csp[0], M_UFSMNT); 371 free(fs, M_UFSMNT); 372 free(ump, M_UFSMNT); 373 mp->mnt_data = (qaddr_t)0; 374 mp->mnt_flag &= ~MNT_LOCAL; 375 return (error); 376 } 377 378 /* 379 * Return root of a filesystem 380 */ 381 int 382 ffs_root(mp, vpp) 383 struct mount *mp; 384 struct vnode **vpp; 385 { 386 struct vnode *nvp; 387 int error; 388 389 if (error = VFS_VGET(mp, (ino_t)ROOTINO, &nvp)) 390 return (error); 391 *vpp = nvp; 392 return (0); 393 } 394 395 /* 396 * Get file system statistics. 397 */ 398 int 399 ffs_statfs(mp, sbp, p) 400 struct mount *mp; 401 register struct statfs *sbp; 402 struct proc *p; 403 { 404 register struct ufsmount *ump; 405 register struct fs *fs; 406 407 ump = VFSTOUFS(mp); 408 fs = ump->um_fs; 409 if (fs->fs_magic != FS_MAGIC) 410 panic("ffs_statfs"); 411 sbp->f_type = MOUNT_UFS; 412 sbp->f_bsize = fs->fs_fsize; 413 sbp->f_iosize = fs->fs_bsize; 414 sbp->f_blocks = fs->fs_dsize; 415 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 416 fs->fs_cstotal.cs_nffree; 417 sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) - 418 (fs->fs_dsize - sbp->f_bfree); 419 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 420 sbp->f_ffree = fs->fs_cstotal.cs_nifree; 421 if (sbp != &mp->mnt_stat) { 422 bcopy((caddr_t)mp->mnt_stat.f_mntonname, 423 (caddr_t)&sbp->f_mntonname[0], MNAMELEN); 424 bcopy((caddr_t)mp->mnt_stat.f_mntfromname, 425 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); 426 } 427 return (0); 428 } 429 430 /* 431 * Go through the disk queues to initiate sandbagged IO; 432 * go through the inodes to write those that have been modified; 433 * initiate the writing of the super block if it has been modified. 434 * 435 * Note: we are always called with the filesystem marked `MPBUSY'. 436 */ 437 int 438 ffs_sync(mp, waitfor, cred, p) 439 struct mount *mp; 440 int waitfor; 441 struct ucred *cred; 442 struct proc *p; 443 { 444 extern int syncprt; 445 register struct vnode *vp; 446 register struct inode *ip; 447 register struct ufsmount *ump = VFSTOUFS(mp); 448 register struct fs *fs; 449 int error, allerror = 0; 450 451 if (syncprt) 452 ufs_bufstats(); 453 fs = ump->um_fs; 454 /* 455 * Write back modified superblock. 456 * Consistency check that the superblock 457 * is still in the buffer cache. 458 */ 459 if (fs->fs_fmod != 0) { 460 if (fs->fs_ronly != 0) { /* XXX */ 461 printf("fs = %s\n", fs->fs_fsmnt); 462 panic("update: rofs mod"); 463 } 464 fs->fs_fmod = 0; 465 fs->fs_time = time.tv_sec; 466 allerror = ffs_sbupdate(ump, waitfor); 467 } 468 /* 469 * Write back each (modified) inode. 470 */ 471 loop: 472 for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) { 473 /* 474 * If the vnode that we are about to sync is no longer 475 * associated with this mount point, start over. 476 */ 477 if (vp->v_mount != mp) 478 goto loop; 479 if (VOP_ISLOCKED(vp)) 480 continue; 481 ip = VTOI(vp); 482 if ((ip->i_flag & (IMOD|IACC|IUPD|ICHG)) == 0 && 483 vp->v_dirtyblkhd == NULL) 484 continue; 485 if (vget(vp)) 486 goto loop; 487 if (error = VOP_FSYNC(vp, cred, waitfor, p)) 488 allerror = error; 489 vput(vp); 490 } 491 /* 492 * Force stale file system control information to be flushed. 493 */ 494 if (error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) 495 allerror = error; 496 #ifdef QUOTA 497 qsync(mp); 498 #endif 499 return (allerror); 500 } 501 502 /* 503 * Look up a FFS dinode number to find its incore vnode. 504 * If it is not in core, read it in from the specified device. 505 * If it is in core, wait for the lock bit to clear, then 506 * return the inode locked. Detection and handling of mount 507 * points must be done by the calling routine. 508 */ 509 int 510 ffs_vget(mp, ino, vpp) 511 struct mount *mp; 512 ino_t ino; 513 struct vnode **vpp; 514 { 515 register struct fs *fs; 516 register struct inode *ip; 517 struct ufsmount *ump; 518 struct buf *bp; 519 struct dinode *dp; 520 struct vnode *vp; 521 union ihead *ih; 522 dev_t dev; 523 int i, type, error; 524 525 ump = VFSTOUFS(mp); 526 dev = ump->um_dev; 527 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) 528 return (0); 529 530 /* Allocate a new vnode/inode. */ 531 if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) { 532 *vpp = NULL; 533 return (error); 534 } 535 type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */ 536 MALLOC(ip, struct inode *, sizeof(struct inode), type, M_WAITOK); 537 vp->v_data = ip; 538 ip->i_vnode = vp; 539 ip->i_flag = 0; 540 ip->i_devvp = 0; 541 ip->i_mode = 0; 542 ip->i_diroff = 0; 543 ip->i_lockf = 0; 544 ip->i_fs = fs = ump->um_fs; 545 ip->i_dev = dev; 546 ip->i_number = ino; 547 #ifdef QUOTA 548 for (i = 0; i < MAXQUOTAS; i++) 549 ip->i_dquot[i] = NODQUOT; 550 #endif 551 /* 552 * Put it onto its hash chain and lock it so that other requests for 553 * this inode will block if they arrive while we are sleeping waiting 554 * for old data structures to be purged or for the contents of the 555 * disk portion of this inode to be read. 556 */ 557 ufs_ihashins(ip); 558 559 /* Read in the disk contents for the inode, copy into the inode. */ 560 if (error = bread(ump->um_devvp, fsbtodb(fs, itod(fs, ino)), 561 (int)fs->fs_bsize, NOCRED, &bp)) { 562 /* 563 * The inode does not contain anything useful, so it would 564 * be misleading to leave it on its hash chain. It will be 565 * returned to the free list by ufs_iput(). 566 */ 567 ufs_ihashrem(ip); 568 569 /* Unlock and discard unneeded inode. */ 570 ufs_iput(ip); 571 brelse(bp); 572 *vpp = NULL; 573 return (error); 574 } 575 dp = bp->b_un.b_dino; 576 dp += itoo(fs, ino); 577 ip->i_din = *dp; 578 brelse(bp); 579 580 /* 581 * Initialize the vnode from the inode, check for aliases. 582 * Note that the underlying vnode may have changed. 583 */ 584 if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) { 585 ufs_iput(ip); 586 *vpp = NULL; 587 return (error); 588 } 589 /* 590 * Finish inode initialization now that aliasing has been resolved. 591 */ 592 ip->i_devvp = ump->um_devvp; 593 VREF(ip->i_devvp); 594 /* 595 * Set up a generation number for this inode if it does not 596 * already have one. This should only happen on old filesystems. 597 */ 598 if (ip->i_gen == 0) { 599 if (++nextgennumber < (u_long)time.tv_sec) 600 nextgennumber = time.tv_sec; 601 ip->i_gen = nextgennumber; 602 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 603 ip->i_flag |= IMOD; 604 } 605 /* 606 * Ensure that uid and gid are correct. This is a temporary 607 * fix until fsck has been changed to do the update. 608 */ 609 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 610 ip->i_uid = ip->i_din.di_ouid; /* XXX */ 611 ip->i_gid = ip->i_din.di_ogid; /* XXX */ 612 } /* XXX */ 613 614 *vpp = vp; 615 return (0); 616 } 617 618 /* 619 * File handle to vnode 620 * 621 * Have to be really careful about stale file handles: 622 * - check that the inode number is valid 623 * - call ffs_vget() to get the locked inode 624 * - check for an unallocated inode (i_mode == 0) 625 * - check that the given client host has export rights and return 626 * those rights via. exflagsp and credanonp 627 */ 628 int 629 ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) 630 register struct mount *mp; 631 struct fid *fhp; 632 struct mbuf *nam; 633 struct vnode **vpp; 634 int *exflagsp; 635 struct ucred **credanonp; 636 { 637 register struct inode *ip; 638 register struct ufid *ufhp; 639 register struct netaddrhash *np; 640 register struct ufsmount *ump = VFSTOUFS(mp); 641 struct fs *fs; 642 struct vnode *nvp; 643 struct sockaddr *saddr; 644 int error; 645 646 ufhp = (struct ufid *)fhp; 647 fs = ump->um_fs; 648 if (ufhp->ufid_ino < ROOTINO || 649 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) 650 return (ESTALE); 651 /* 652 * Get the export permission structure for this <mp, client> tuple. 653 */ 654 if ((mp->mnt_flag & MNT_EXPORTED) == 0) 655 return (EACCES); 656 if (nam == NULL) { 657 np = (struct netaddrhash *)0; 658 } else { 659 /* 660 * First search for a network match. 661 */ 662 np = ump->um_netaddr[NETMASK_HASH]; 663 while (np) { 664 if (netaddr_match(np->neth_family, &np->neth_haddr, 665 &np->neth_hmask, nam)) 666 break; 667 np = np->neth_next; 668 } 669 670 /* 671 * If not found, try for an address match. 672 */ 673 if (np == (struct netaddrhash *)0) { 674 saddr = mtod(nam, struct sockaddr *); 675 np = ump->um_netaddr[NETADDRHASH(saddr)]; 676 while (np) { 677 if (netaddr_match(np->neth_family, 678 &np->neth_haddr, (struct netaddrhash *)0, 679 nam)) 680 break; 681 np = np->neth_next; 682 } 683 } 684 } 685 if (np == (struct netaddrhash *)0) { 686 /* 687 * If no address match, use the default if it exists. 688 */ 689 if ((mp->mnt_flag & MNT_DEFEXPORTED) == 0) 690 return (EACCES); 691 np = &ump->um_defexported; 692 } 693 if (error = VFS_VGET(mp, ufhp->ufid_ino, &nvp)) { 694 *vpp = NULLVP; 695 return (error); 696 } 697 ip = VTOI(nvp); 698 if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen) { 699 ufs_iput(ip); 700 *vpp = NULLVP; 701 return (ESTALE); 702 } 703 *vpp = nvp; 704 *exflagsp = np->neth_exflags; 705 *credanonp = &np->neth_anon; 706 return (0); 707 } 708 709 /* 710 * Vnode pointer to File handle 711 */ 712 /* ARGSUSED */ 713 ffs_vptofh(vp, fhp) 714 struct vnode *vp; 715 struct fid *fhp; 716 { 717 register struct inode *ip; 718 register struct ufid *ufhp; 719 720 ip = VTOI(vp); 721 ufhp = (struct ufid *)fhp; 722 ufhp->ufid_len = sizeof(struct ufid); 723 ufhp->ufid_ino = ip->i_number; 724 ufhp->ufid_gen = ip->i_gen; 725 return (0); 726 } 727 728 /* 729 * Write a superblock and associated information back to disk. 730 */ 731 int 732 ffs_sbupdate(mp, waitfor) 733 struct ufsmount *mp; 734 int waitfor; 735 { 736 register struct fs *fs = mp->um_fs; 737 register struct buf *bp; 738 int blks; 739 caddr_t space; 740 int i, size, error = 0; 741 742 bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize); 743 bcopy((caddr_t)fs, bp->b_un.b_addr, (u_int)fs->fs_sbsize); 744 /* Restore compatibility to old file systems. XXX */ 745 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 746 bp->b_un.b_fs->fs_nrpos = -1; /* XXX */ 747 if (waitfor == MNT_WAIT) 748 error = bwrite(bp); 749 else 750 bawrite(bp); 751 blks = howmany(fs->fs_cssize, fs->fs_fsize); 752 space = (caddr_t)fs->fs_csp[0]; 753 for (i = 0; i < blks; i += fs->fs_frag) { 754 size = fs->fs_bsize; 755 if (i + fs->fs_frag > blks) 756 size = (blks - i) * fs->fs_fsize; 757 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), size); 758 bcopy(space, bp->b_un.b_addr, (u_int)size); 759 space += size; 760 if (waitfor == MNT_WAIT) 761 error = bwrite(bp); 762 else 763 bawrite(bp); 764 } 765 return (error); 766 } 767