1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)lfs_vfsops.c 7.54 (Berkeley) 04/19/91 8 */ 9 10 #include "param.h" 11 #include "systm.h" 12 #include "namei.h" 13 #include "proc.h" 14 #include "kernel.h" 15 #include "vnode.h" 16 #include "specdev.h" 17 #include "mount.h" 18 #include "buf.h" 19 #include "file.h" 20 #include "disklabel.h" 21 #include "ioctl.h" 22 #include "errno.h" 23 #include "malloc.h" 24 25 #include "quota.h" 26 #include "fs.h" 27 #include "ufsmount.h" 28 #include "inode.h" 29 30 struct vfsops ufs_vfsops = { 31 ufs_mount, 32 ufs_start, 33 ufs_unmount, 34 ufs_root, 35 ufs_quotactl, 36 ufs_statfs, 37 ufs_sync, 38 ufs_fhtovp, 39 ufs_vptofh, 40 ufs_init 41 }; 42 43 /* 44 * Flag to allow forcible unmounting. 45 */ 46 int doforce = 1; 47 48 /* 49 * Called by vfs_mountroot when ufs is going to be mounted as root. 50 * 51 * Name is updated by mount(8) after booting. 52 */ 53 #define ROOTNAME "root_device" 54 55 ufs_mountroot() 56 { 57 register struct mount *mp; 58 extern struct vnode *rootvp; 59 struct proc *p = curproc; /* XXX */ 60 struct ufsmount *ump; 61 register struct fs *fs; 62 u_int size; 63 int error; 64 65 mp = (struct mount *)malloc((u_long)sizeof(struct mount), 66 M_MOUNT, M_WAITOK); 67 mp->mnt_op = &ufs_vfsops; 68 mp->mnt_flag = MNT_RDONLY; 69 mp->mnt_exroot = 0; 70 mp->mnt_mounth = NULLVP; 71 error = mountfs(rootvp, mp, p); 72 if (error) { 73 free((caddr_t)mp, M_MOUNT); 74 return (error); 75 } 76 if (error = vfs_lock(mp)) { 77 (void)ufs_unmount(mp, 0, p); 78 free((caddr_t)mp, M_MOUNT); 79 return (error); 80 } 81 rootfs = mp; 82 mp->mnt_next = mp; 83 mp->mnt_prev = mp; 84 mp->mnt_vnodecovered = NULLVP; 85 ump = VFSTOUFS(mp); 86 fs = ump->um_fs; 87 bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); 88 fs->fs_fsmnt[0] = '/'; 89 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 90 MNAMELEN); 91 (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 92 &size); 93 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 94 (void) ufs_statfs(mp, &mp->mnt_stat, p); 95 vfs_unlock(mp); 96 inittodr(fs->fs_time); 97 return (0); 98 } 99 100 /* 101 * VFS Operations. 102 * 103 * mount system call 104 */ 105 ufs_mount(mp, path, data, ndp, p) 106 register struct mount *mp; 107 char *path; 108 caddr_t data; 109 struct nameidata *ndp; 110 struct proc *p; 111 { 112 struct vnode *devvp; 113 struct ufs_args args; 114 struct ufsmount *ump; 115 register struct fs *fs; 116 u_int size; 117 int error; 118 119 if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args))) 120 return (error); 121 /* 122 * Process export requests. 123 */ 124 if ((args.exflags & MNT_EXPORTED) || (mp->mnt_flag & MNT_EXPORTED)) { 125 if (args.exflags & MNT_EXPORTED) 126 mp->mnt_flag |= MNT_EXPORTED; 127 else 128 mp->mnt_flag &= ~MNT_EXPORTED; 129 if (args.exflags & MNT_EXRDONLY) 130 mp->mnt_flag |= MNT_EXRDONLY; 131 else 132 mp->mnt_flag &= ~MNT_EXRDONLY; 133 mp->mnt_exroot = args.exroot; 134 } 135 if ((mp->mnt_flag & MNT_UPDATE) == 0) { 136 if ((error = getmdev(&devvp, args.fspec, ndp, p)) != 0) 137 return (error); 138 error = mountfs(devvp, mp, p); 139 } else { 140 ump = VFSTOUFS(mp); 141 fs = ump->um_fs; 142 if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0) 143 fs->fs_ronly = 0; 144 /* 145 * Verify that the specified device is the one that 146 * is really being used for the root file system. 147 */ 148 if (args.fspec == 0) 149 return (0); 150 if ((error = getmdev(&devvp, args.fspec, ndp, p)) != 0) 151 return (error); 152 if (devvp != ump->um_devvp) 153 error = EINVAL; /* needs translation */ 154 else 155 vrele(devvp); 156 } 157 if (error) { 158 vrele(devvp); 159 return (error); 160 } 161 ump = VFSTOUFS(mp); 162 fs = ump->um_fs; 163 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); 164 bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); 165 bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, 166 MNAMELEN); 167 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 168 &size); 169 bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 170 (void) ufs_statfs(mp, &mp->mnt_stat, p); 171 return (0); 172 } 173 174 /* 175 * Common code for mount and mountroot 176 */ 177 mountfs(devvp, mp, p) 178 register struct vnode *devvp; 179 struct mount *mp; 180 struct proc *p; 181 { 182 register struct ufsmount *ump = (struct ufsmount *)0; 183 struct buf *bp = NULL; 184 register struct fs *fs; 185 dev_t dev = devvp->v_rdev; 186 struct partinfo dpart; 187 caddr_t base, space; 188 int havepart = 0, blks; 189 int error, i, size; 190 int needclose = 0; 191 int ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 192 extern struct vnode *rootvp; 193 194 /* 195 * Disallow multiple mounts of the same device. 196 * Disallow mounting of a device that is currently in use 197 * (except for root, which might share swap device for miniroot). 198 * Flush out any old buffers remaining from a previous use. 199 */ 200 if (error = mountedon(devvp)) 201 return (error); 202 if (vcount(devvp) > 1 && devvp != rootvp) 203 return (EBUSY); 204 vinvalbuf(devvp, 1); 205 if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p)) 206 return (error); 207 needclose = 1; 208 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) 209 size = DEV_BSIZE; 210 else { 211 havepart = 1; 212 size = dpart.disklab->d_secsize; 213 } 214 if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) 215 goto out; 216 fs = bp->b_un.b_fs; 217 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 218 fs->fs_bsize < sizeof(struct fs)) { 219 error = EINVAL; /* XXX needs translation */ 220 goto out; 221 } 222 ump = (struct ufsmount *)malloc(sizeof *ump, M_UFSMNT, M_WAITOK); 223 ump->um_fs = (struct fs *)malloc((u_long)fs->fs_sbsize, M_SUPERBLK, 224 M_WAITOK); 225 bcopy((caddr_t)bp->b_un.b_addr, (caddr_t)ump->um_fs, 226 (u_int)fs->fs_sbsize); 227 if (fs->fs_sbsize < SBSIZE) 228 bp->b_flags |= B_INVAL; 229 brelse(bp); 230 bp = NULL; 231 fs = ump->um_fs; 232 fs->fs_ronly = ronly; 233 if (ronly == 0) 234 fs->fs_fmod = 1; 235 if (havepart) { 236 dpart.part->p_fstype = FS_BSDFFS; 237 dpart.part->p_fsize = fs->fs_fsize; 238 dpart.part->p_frag = fs->fs_frag; 239 dpart.part->p_cpg = fs->fs_cpg; 240 } 241 blks = howmany(fs->fs_cssize, fs->fs_fsize); 242 base = space = (caddr_t)malloc((u_long)fs->fs_cssize, M_SUPERBLK, 243 M_WAITOK); 244 for (i = 0; i < blks; i += fs->fs_frag) { 245 size = fs->fs_bsize; 246 if (i + fs->fs_frag > blks) 247 size = (blks - i) * fs->fs_fsize; 248 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 249 NOCRED, &bp); 250 if (error) { 251 free((caddr_t)base, M_SUPERBLK); 252 goto out; 253 } 254 bcopy((caddr_t)bp->b_un.b_addr, space, (u_int)size); 255 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space; 256 space += size; 257 brelse(bp); 258 bp = NULL; 259 } 260 mp->mnt_data = (qaddr_t)ump; 261 mp->mnt_stat.f_fsid.val[0] = (long)dev; 262 mp->mnt_stat.f_fsid.val[1] = MOUNT_UFS; 263 mp->mnt_flag |= MNT_LOCAL; 264 ump->um_mountp = mp; 265 ump->um_dev = dev; 266 ump->um_devvp = devvp; 267 for (i = 0; i < MAXQUOTAS; i++) 268 ump->um_quotas[i] = NULLVP; 269 devvp->v_specflags |= SI_MOUNTEDON; 270 271 /* Sanity checks for old file systems. XXX */ 272 fs->fs_npsect = MAX(fs->fs_npsect, fs->fs_nsect); /* XXX */ 273 fs->fs_interleave = MAX(fs->fs_interleave, 1); /* XXX */ 274 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 275 fs->fs_nrpos = 8; /* XXX */ 276 return (0); 277 out: 278 if (bp) 279 brelse(bp); 280 if (needclose) 281 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p); 282 if (ump) { 283 free((caddr_t)ump->um_fs, M_SUPERBLK); 284 free((caddr_t)ump, M_UFSMNT); 285 mp->mnt_data = (qaddr_t)0; 286 } 287 return (error); 288 } 289 290 /* 291 * Make a filesystem operational. 292 * Nothing to do at the moment. 293 */ 294 /* ARGSUSED */ 295 ufs_start(mp, flags, p) 296 struct mount *mp; 297 int flags; 298 struct proc *p; 299 { 300 301 return (0); 302 } 303 304 /* 305 * unmount system call 306 */ 307 ufs_unmount(mp, mntflags, p) 308 struct mount *mp; 309 int mntflags; 310 struct proc *p; 311 { 312 register struct ufsmount *ump; 313 register struct fs *fs; 314 int i, error, ronly, flags = 0; 315 316 if (mntflags & MNT_FORCE) { 317 if (!doforce || mp == rootfs) 318 return (EINVAL); 319 flags |= FORCECLOSE; 320 } 321 mntflushbuf(mp, 0); 322 if (mntinvalbuf(mp)) 323 return (EBUSY); 324 ump = VFSTOUFS(mp); 325 #ifdef QUOTA 326 if (mp->mnt_flag & MNT_QUOTA) { 327 if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) 328 return (error); 329 for (i = 0; i < MAXQUOTAS; i++) { 330 if (ump->um_quotas[i] == NULLVP) 331 continue; 332 quotaoff(mp, i); 333 } 334 /* 335 * Here we fall through to vflush again to ensure 336 * that we have gotten rid of all the system vnodes. 337 */ 338 } 339 #endif 340 if (error = vflush(mp, NULLVP, flags)) 341 return (error); 342 fs = ump->um_fs; 343 ronly = !fs->fs_ronly; 344 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON; 345 error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE, 346 NOCRED, p); 347 vrele(ump->um_devvp); 348 free((caddr_t)fs->fs_csp[0], M_SUPERBLK); 349 free((caddr_t)fs, M_SUPERBLK); 350 free((caddr_t)ump, M_UFSMNT); 351 mp->mnt_data = (qaddr_t)0; 352 mp->mnt_flag &= ~MNT_LOCAL; 353 return (error); 354 } 355 356 /* 357 * Check to see if a filesystem is mounted on a block device. 358 */ 359 mountedon(vp) 360 register struct vnode *vp; 361 { 362 register struct vnode *vq; 363 364 if (vp->v_specflags & SI_MOUNTEDON) 365 return (EBUSY); 366 if (vp->v_flag & VALIASED) { 367 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 368 if (vq->v_rdev != vp->v_rdev || 369 vq->v_type != vp->v_type) 370 continue; 371 if (vq->v_specflags & SI_MOUNTEDON) 372 return (EBUSY); 373 } 374 } 375 return (0); 376 } 377 378 /* 379 * Return root of a filesystem 380 */ 381 ufs_root(mp, vpp) 382 struct mount *mp; 383 struct vnode **vpp; 384 { 385 register struct inode *ip; 386 struct inode *nip; 387 struct vnode tvp; 388 int error; 389 390 tvp.v_mount = mp; 391 ip = VTOI(&tvp); 392 ip->i_vnode = &tvp; 393 ip->i_dev = VFSTOUFS(mp)->um_dev; 394 error = iget(ip, (ino_t)ROOTINO, &nip); 395 if (error) 396 return (error); 397 *vpp = ITOV(nip); 398 return (0); 399 } 400 401 /* 402 * Do operations associated with quotas 403 */ 404 ufs_quotactl(mp, cmds, uid, arg, p) 405 struct mount *mp; 406 int cmds; 407 uid_t uid; 408 caddr_t arg; 409 struct proc *p; 410 { 411 struct ufsmount *ump = VFSTOUFS(mp); 412 int cmd, type, error; 413 414 #ifndef QUOTA 415 return (EOPNOTSUPP); 416 #else 417 if (uid == -1) 418 uid = p->p_cred->p_ruid; 419 cmd = cmds >> SUBCMDSHIFT; 420 421 switch (cmd) { 422 case Q_GETQUOTA: 423 case Q_SYNC: 424 if (uid == p->p_cred->p_ruid) 425 break; 426 /* fall through */ 427 default: 428 if (error = suser(p->p_ucred, &p->p_acflag)) 429 return (error); 430 } 431 432 type = cmd & SUBCMDMASK; 433 if ((u_int)type >= MAXQUOTAS) 434 return (EINVAL); 435 436 switch (cmd) { 437 438 case Q_QUOTAON: 439 return (quotaon(p, mp, type, arg)); 440 441 case Q_QUOTAOFF: 442 if (vfs_busy(mp)) 443 return (0); 444 error = quotaoff(mp, type); 445 vfs_unbusy(mp); 446 return (error); 447 448 case Q_SETQUOTA: 449 return (setquota(mp, uid, type, arg)); 450 451 case Q_SETUSE: 452 return (setuse(mp, uid, type, arg)); 453 454 case Q_GETQUOTA: 455 return (getquota(mp, uid, type, arg)); 456 457 case Q_SYNC: 458 if (vfs_busy(mp)) 459 return (0); 460 error = qsync(mp); 461 vfs_unbusy(mp); 462 return (error); 463 464 default: 465 return (EINVAL); 466 } 467 /* NOTREACHED */ 468 #endif 469 } 470 471 /* 472 * Get file system statistics. 473 */ 474 ufs_statfs(mp, sbp, p) 475 struct mount *mp; 476 register struct statfs *sbp; 477 struct proc *p; 478 { 479 register struct ufsmount *ump; 480 register struct fs *fs; 481 482 ump = VFSTOUFS(mp); 483 fs = ump->um_fs; 484 if (fs->fs_magic != FS_MAGIC) 485 panic("ufs_statfs"); 486 sbp->f_type = MOUNT_UFS; 487 sbp->f_fsize = fs->fs_fsize; 488 sbp->f_bsize = fs->fs_bsize; 489 sbp->f_blocks = fs->fs_dsize; 490 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 491 fs->fs_cstotal.cs_nffree; 492 sbp->f_bavail = (fs->fs_dsize * (100 - fs->fs_minfree) / 100) - 493 (fs->fs_dsize - sbp->f_bfree); 494 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 495 sbp->f_ffree = fs->fs_cstotal.cs_nifree; 496 if (sbp != &mp->mnt_stat) { 497 bcopy((caddr_t)mp->mnt_stat.f_mntonname, 498 (caddr_t)&sbp->f_mntonname[0], MNAMELEN); 499 bcopy((caddr_t)mp->mnt_stat.f_mntfromname, 500 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); 501 } 502 return (0); 503 } 504 505 int syncprt = 0; 506 507 /* 508 * Go through the disk queues to initiate sandbagged IO; 509 * go through the inodes to write those that have been modified; 510 * initiate the writing of the super block if it has been modified. 511 * 512 * Note: we are always called with the filesystem marked `MPBUSY'. 513 */ 514 ufs_sync(mp, waitfor) 515 struct mount *mp; 516 int waitfor; 517 { 518 register struct vnode *vp; 519 register struct inode *ip; 520 register struct ufsmount *ump = VFSTOUFS(mp); 521 register struct fs *fs; 522 int error, allerror = 0; 523 524 if (syncprt) 525 bufstats(); 526 fs = ump->um_fs; 527 /* 528 * Write back modified superblock. 529 * Consistency check that the superblock 530 * is still in the buffer cache. 531 */ 532 if (fs->fs_fmod != 0) { 533 if (fs->fs_ronly != 0) { /* XXX */ 534 printf("fs = %s\n", fs->fs_fsmnt); 535 panic("update: rofs mod"); 536 } 537 fs->fs_fmod = 0; 538 fs->fs_time = time.tv_sec; 539 allerror = sbupdate(ump, waitfor); 540 } 541 /* 542 * Write back each (modified) inode. 543 */ 544 loop: 545 for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) { 546 /* 547 * If the vnode that we are about to sync is no longer 548 * associated with this mount point, start over. 549 */ 550 if (vp->v_mount != mp) 551 goto loop; 552 if (VOP_ISLOCKED(vp)) 553 continue; 554 ip = VTOI(vp); 555 if ((ip->i_flag & (IMOD|IACC|IUPD|ICHG)) == 0 && 556 vp->v_dirtyblkhd == NULL) 557 continue; 558 if (vget(vp)) 559 goto loop; 560 if (vp->v_dirtyblkhd) 561 vflushbuf(vp, 0); 562 if ((ip->i_flag & (IMOD|IACC|IUPD|ICHG)) && 563 (error = iupdat(ip, &time, &time, 0))) 564 allerror = error; 565 vput(vp); 566 } 567 /* 568 * Force stale file system control information to be flushed. 569 */ 570 vflushbuf(ump->um_devvp, waitfor == MNT_WAIT ? B_SYNC : 0); 571 #ifdef QUOTA 572 qsync(mp); 573 #endif 574 return (allerror); 575 } 576 577 /* 578 * Write a superblock and associated information back to disk. 579 */ 580 sbupdate(mp, waitfor) 581 struct ufsmount *mp; 582 int waitfor; 583 { 584 register struct fs *fs = mp->um_fs; 585 register struct buf *bp; 586 int blks; 587 caddr_t space; 588 int i, size, error = 0; 589 590 bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize); 591 bcopy((caddr_t)fs, bp->b_un.b_addr, (u_int)fs->fs_sbsize); 592 /* Restore compatibility to old file systems. XXX */ 593 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 594 bp->b_un.b_fs->fs_nrpos = -1; /* XXX */ 595 if (waitfor == MNT_WAIT) 596 error = bwrite(bp); 597 else 598 bawrite(bp); 599 blks = howmany(fs->fs_cssize, fs->fs_fsize); 600 space = (caddr_t)fs->fs_csp[0]; 601 for (i = 0; i < blks; i += fs->fs_frag) { 602 size = fs->fs_bsize; 603 if (i + fs->fs_frag > blks) 604 size = (blks - i) * fs->fs_fsize; 605 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), size); 606 bcopy(space, bp->b_un.b_addr, (u_int)size); 607 space += size; 608 if (waitfor == MNT_WAIT) 609 error = bwrite(bp); 610 else 611 bawrite(bp); 612 } 613 return (error); 614 } 615 616 /* 617 * Print out statistics on the current allocation of the buffer pool. 618 * Can be enabled to print out on every ``sync'' by setting "syncprt" 619 * above. 620 */ 621 bufstats() 622 { 623 int s, i, j, count; 624 register struct buf *bp, *dp; 625 int counts[MAXBSIZE/CLBYTES+1]; 626 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" }; 627 628 for (bp = bfreelist, i = 0; bp < &bfreelist[BQUEUES]; bp++, i++) { 629 count = 0; 630 for (j = 0; j <= MAXBSIZE/CLBYTES; j++) 631 counts[j] = 0; 632 s = splbio(); 633 for (dp = bp->av_forw; dp != bp; dp = dp->av_forw) { 634 counts[dp->b_bufsize/CLBYTES]++; 635 count++; 636 } 637 splx(s); 638 printf("%s: total-%d", bname[i], count); 639 for (j = 0; j <= MAXBSIZE/CLBYTES; j++) 640 if (counts[j] != 0) 641 printf(", %d-%d", j * CLBYTES, counts[j]); 642 printf("\n"); 643 } 644 } 645 646 /* 647 * File handle to vnode 648 * 649 * Have to be really careful about stale file handles: 650 * - check that the inode number is in range 651 * - call iget() to get the locked inode 652 * - check for an unallocated inode (i_mode == 0) 653 * - check that the generation number matches 654 */ 655 ufs_fhtovp(mp, fhp, vpp) 656 register struct mount *mp; 657 struct fid *fhp; 658 struct vnode **vpp; 659 { 660 register struct ufid *ufhp; 661 register struct fs *fs; 662 register struct inode *ip; 663 struct inode *nip; 664 struct vnode tvp; 665 int error; 666 667 ufhp = (struct ufid *)fhp; 668 fs = VFSTOUFS(mp)->um_fs; 669 if (ufhp->ufid_ino < ROOTINO || 670 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) { 671 *vpp = NULLVP; 672 return (EINVAL); 673 } 674 tvp.v_mount = mp; 675 ip = VTOI(&tvp); 676 ip->i_vnode = &tvp; 677 ip->i_dev = VFSTOUFS(mp)->um_dev; 678 if (error = iget(ip, ufhp->ufid_ino, &nip)) { 679 *vpp = NULLVP; 680 return (error); 681 } 682 ip = nip; 683 if (ip->i_mode == 0) { 684 iput(ip); 685 *vpp = NULLVP; 686 return (EINVAL); 687 } 688 if (ip->i_gen != ufhp->ufid_gen) { 689 iput(ip); 690 *vpp = NULLVP; 691 return (EINVAL); 692 } 693 *vpp = ITOV(ip); 694 return (0); 695 } 696 697 /* 698 * Vnode pointer to File handle 699 */ 700 /* ARGSUSED */ 701 ufs_vptofh(vp, fhp) 702 struct vnode *vp; 703 struct fid *fhp; 704 { 705 register struct inode *ip = VTOI(vp); 706 register struct ufid *ufhp; 707 708 ufhp = (struct ufid *)fhp; 709 ufhp->ufid_len = sizeof(struct ufid); 710 ufhp->ufid_ino = ip->i_number; 711 ufhp->ufid_gen = ip->i_gen; 712 return (0); 713 } 714 715 /* 716 * Check that the user's argument is a reasonable 717 * thing on which to mount, and return the device number if so. 718 */ 719 getmdev(devvpp, fname, ndp, p) 720 struct vnode **devvpp; 721 caddr_t fname; 722 register struct nameidata *ndp; 723 struct proc *p; 724 { 725 register struct vnode *vp; 726 int error; 727 728 ndp->ni_nameiop = LOOKUP | FOLLOW; 729 ndp->ni_segflg = UIO_USERSPACE; 730 ndp->ni_dirp = fname; 731 if (error = namei(ndp, p)) 732 return (error); 733 vp = ndp->ni_vp; 734 if (vp->v_type != VBLK) { 735 vrele(vp); 736 return (ENOTBLK); 737 } 738 if (major(vp->v_rdev) >= nblkdev) { 739 vrele(vp); 740 return (ENXIO); 741 } 742 *devvpp = vp; 743 return (0); 744 } 745