1 /* 2 * Copyright (c) 1994 The Regents of the University of California. 3 * Copyright (c) 1994 Jan-Simon Pendry. 4 * All rights reserved. 5 * 6 * This code is derived from software donated to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)union_vfsops.c 8.12 (Berkeley) 07/28/94 12 */ 13 14 /* 15 * Union Layer 16 */ 17 18 #include <sys/param.h> 19 #include <sys/systm.h> 20 #include <sys/time.h> 21 #include <sys/types.h> 22 #include <sys/proc.h> 23 #include <sys/vnode.h> 24 #include <sys/mount.h> 25 #include <sys/namei.h> 26 #include <sys/malloc.h> 27 #include <sys/filedesc.h> 28 #include <sys/queue.h> 29 #include <miscfs/union/union.h> 30 31 /* 32 * Mount union filesystem 33 */ 34 int 35 union_mount(mp, path, data, ndp, p) 36 struct mount *mp; 37 char *path; 38 caddr_t data; 39 struct nameidata *ndp; 40 struct proc *p; 41 { 42 int error = 0; 43 struct union_args args; 44 struct vnode *lowerrootvp = NULLVP; 45 struct vnode *upperrootvp = NULLVP; 46 struct union_mount *um = 0; 47 struct ucred *cred = 0; 48 struct ucred *scred; 49 struct vattr va; 50 char *cp; 51 int len; 52 u_int size; 53 54 #ifdef UNION_DIAGNOSTIC 55 printf("union_mount(mp = %x)\n", mp); 56 #endif 57 58 /* 59 * Update is a no-op 60 */ 61 if (mp->mnt_flag & MNT_UPDATE) { 62 /* 63 * Need to provide. 64 * 1. a way to convert between rdonly and rdwr mounts. 65 * 2. support for nfs exports. 66 */ 67 error = EOPNOTSUPP; 68 goto bad; 69 } 70 71 /* 72 * Get argument 73 */ 74 if (error = copyin(data, (caddr_t)&args, sizeof(struct union_args))) 75 goto bad; 76 77 lowerrootvp = mp->mnt_vnodecovered; 78 VREF(lowerrootvp); 79 80 /* 81 * Find upper node. 82 */ 83 NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT, 84 UIO_USERSPACE, args.target, p); 85 86 if (error = namei(ndp)) 87 goto bad; 88 89 upperrootvp = ndp->ni_vp; 90 vrele(ndp->ni_dvp); 91 ndp->ni_dvp = NULL; 92 93 if (upperrootvp->v_type != VDIR) { 94 error = EINVAL; 95 goto bad; 96 } 97 98 um = (struct union_mount *) malloc(sizeof(struct union_mount), 99 M_UFSMNT, M_WAITOK); /* XXX */ 100 101 /* 102 * Keep a held reference to the target vnodes. 103 * They are vrele'd in union_unmount. 104 * 105 * Depending on the _BELOW flag, the filesystems are 106 * viewed in a different order. In effect, this is the 107 * same as providing a mount under option to the mount syscall. 108 */ 109 110 um->um_op = args.mntflags & UNMNT_OPMASK; 111 switch (um->um_op) { 112 case UNMNT_ABOVE: 113 um->um_lowervp = lowerrootvp; 114 um->um_uppervp = upperrootvp; 115 break; 116 117 case UNMNT_BELOW: 118 um->um_lowervp = upperrootvp; 119 um->um_uppervp = lowerrootvp; 120 break; 121 122 case UNMNT_REPLACE: 123 vrele(lowerrootvp); 124 lowerrootvp = NULLVP; 125 um->um_uppervp = upperrootvp; 126 um->um_lowervp = lowerrootvp; 127 break; 128 129 default: 130 error = EINVAL; 131 goto bad; 132 } 133 134 /* 135 * Unless the mount is readonly, ensure that the top layer 136 * supports whiteout operations 137 */ 138 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 139 error = VOP_WHITEOUT(um->um_uppervp, (struct componentname *) 0, LOOKUP); 140 if (error) 141 goto bad; 142 } 143 144 um->um_cred = p->p_ucred; 145 crhold(um->um_cred); 146 um->um_cmode = UN_DIRMODE &~ p->p_fd->fd_cmask; 147 148 /* 149 * Depending on what you think the MNT_LOCAL flag might mean, 150 * you may want the && to be || on the conditional below. 151 * At the moment it has been defined that the filesystem is 152 * only local if it is all local, ie the MNT_LOCAL flag implies 153 * that the entire namespace is local. If you think the MNT_LOCAL 154 * flag implies that some of the files might be stored locally 155 * then you will want to change the conditional. 156 */ 157 if (um->um_op == UNMNT_ABOVE) { 158 if (((um->um_lowervp == NULLVP) || 159 (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && 160 (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) 161 mp->mnt_flag |= MNT_LOCAL; 162 } 163 164 /* 165 * Copy in the upper layer's RDONLY flag. This is for the benefit 166 * of lookup() which explicitly checks the flag, rather than asking 167 * the filesystem for it's own opinion. This means, that an update 168 * mount of the underlying filesystem to go from rdonly to rdwr 169 * will leave the unioned view as read-only. 170 */ 171 mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); 172 173 mp->mnt_data = (qaddr_t) um; 174 getnewfsid(mp, MOUNT_UNION); 175 176 (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); 177 bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); 178 179 switch (um->um_op) { 180 case UNMNT_ABOVE: 181 cp = "<above>:"; 182 break; 183 case UNMNT_BELOW: 184 cp = "<below>:"; 185 break; 186 case UNMNT_REPLACE: 187 cp = ""; 188 break; 189 } 190 len = strlen(cp); 191 bcopy(cp, mp->mnt_stat.f_mntfromname, len); 192 193 cp = mp->mnt_stat.f_mntfromname + len; 194 len = MNAMELEN - len; 195 196 (void) copyinstr(args.target, cp, len - 1, &size); 197 bzero(cp + size, len - size); 198 199 #ifdef UNION_DIAGNOSTIC 200 printf("union_mount: from %s, on %s\n", 201 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); 202 #endif 203 return (0); 204 205 bad: 206 if (um) 207 free(um, M_UFSMNT); 208 if (cred) 209 crfree(cred); 210 if (upperrootvp) 211 vrele(upperrootvp); 212 if (lowerrootvp) 213 vrele(lowerrootvp); 214 return (error); 215 } 216 217 /* 218 * VFS start. Nothing needed here - the start routine 219 * on the underlying filesystem(s) will have been called 220 * when that filesystem was mounted. 221 */ 222 int 223 union_start(mp, flags, p) 224 struct mount *mp; 225 int flags; 226 struct proc *p; 227 { 228 229 return (0); 230 } 231 232 /* 233 * Free reference to union layer 234 */ 235 int 236 union_unmount(mp, mntflags, p) 237 struct mount *mp; 238 int mntflags; 239 struct proc *p; 240 { 241 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 242 struct vnode *um_rootvp; 243 int error; 244 int freeing; 245 int flags = 0; 246 extern int doforce; 247 248 #ifdef UNION_DIAGNOSTIC 249 printf("union_unmount(mp = %x)\n", mp); 250 #endif 251 252 if (mntflags & MNT_FORCE) { 253 /* union can never be rootfs so don't check for it */ 254 if (!doforce) 255 return (EINVAL); 256 flags |= FORCECLOSE; 257 } 258 259 if (error = union_root(mp, &um_rootvp)) 260 return (error); 261 262 /* 263 * Keep flushing vnodes from the mount list. 264 * This is needed because of the un_pvp held 265 * reference to the parent vnode. 266 * If more vnodes have been freed on a given pass, 267 * the try again. The loop will iterate at most 268 * (d) times, where (d) is the maximum tree depth 269 * in the filesystem. 270 */ 271 for (freeing = 0; vflush(mp, um_rootvp, flags) != 0;) { 272 struct vnode *vp; 273 int n; 274 275 /* count #vnodes held on mount list */ 276 for (n = 0, vp = mp->mnt_vnodelist.lh_first; 277 vp != NULLVP; 278 vp = vp->v_mntvnodes.le_next) 279 n++; 280 281 /* if this is unchanged then stop */ 282 if (n == freeing) 283 break; 284 285 /* otherwise try once more time */ 286 freeing = n; 287 } 288 289 /* At this point the root vnode should have a single reference */ 290 if (um_rootvp->v_usecount > 1) { 291 vput(um_rootvp); 292 return (EBUSY); 293 } 294 295 #ifdef UNION_DIAGNOSTIC 296 vprint("union root", um_rootvp); 297 #endif 298 /* 299 * Discard references to upper and lower target vnodes. 300 */ 301 if (um->um_lowervp) 302 vrele(um->um_lowervp); 303 vrele(um->um_uppervp); 304 crfree(um->um_cred); 305 /* 306 * Release reference on underlying root vnode 307 */ 308 vput(um_rootvp); 309 /* 310 * And blow it away for future re-use 311 */ 312 vgone(um_rootvp); 313 /* 314 * Finally, throw away the union_mount structure 315 */ 316 free(mp->mnt_data, M_UFSMNT); /* XXX */ 317 mp->mnt_data = 0; 318 return (0); 319 } 320 321 int 322 union_root(mp, vpp) 323 struct mount *mp; 324 struct vnode **vpp; 325 { 326 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 327 int error; 328 int loselock; 329 330 /* 331 * Return locked reference to root. 332 */ 333 VREF(um->um_uppervp); 334 if ((um->um_op == UNMNT_BELOW) && 335 VOP_ISLOCKED(um->um_uppervp)) { 336 loselock = 1; 337 } else { 338 VOP_LOCK(um->um_uppervp); 339 loselock = 0; 340 } 341 if (um->um_lowervp) 342 VREF(um->um_lowervp); 343 error = union_allocvp(vpp, mp, 344 (struct vnode *) 0, 345 (struct vnode *) 0, 346 (struct componentname *) 0, 347 um->um_uppervp, 348 um->um_lowervp); 349 350 if (error) { 351 if (!loselock) 352 VOP_UNLOCK(um->um_uppervp); 353 vrele(um->um_uppervp); 354 if (um->um_lowervp) 355 vrele(um->um_lowervp); 356 } else { 357 if (loselock) 358 VTOUNION(*vpp)->un_flags &= ~UN_ULOCK; 359 } 360 361 return (error); 362 } 363 364 int 365 union_quotactl(mp, cmd, uid, arg, p) 366 struct mount *mp; 367 int cmd; 368 uid_t uid; 369 caddr_t arg; 370 struct proc *p; 371 { 372 373 return (EOPNOTSUPP); 374 } 375 376 int 377 union_statfs(mp, sbp, p) 378 struct mount *mp; 379 struct statfs *sbp; 380 struct proc *p; 381 { 382 int error; 383 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 384 struct statfs mstat; 385 int lbsize; 386 387 #ifdef UNION_DIAGNOSTIC 388 printf("union_statfs(mp = %x, lvp = %x, uvp = %x)\n", mp, 389 um->um_lowervp, 390 um->um_uppervp); 391 #endif 392 393 bzero(&mstat, sizeof(mstat)); 394 395 if (um->um_lowervp) { 396 error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, p); 397 if (error) 398 return (error); 399 } 400 401 /* now copy across the "interesting" information and fake the rest */ 402 #if 0 403 sbp->f_type = mstat.f_type; 404 sbp->f_flags = mstat.f_flags; 405 sbp->f_bsize = mstat.f_bsize; 406 sbp->f_iosize = mstat.f_iosize; 407 #endif 408 lbsize = mstat.f_bsize; 409 sbp->f_blocks = mstat.f_blocks; 410 sbp->f_bfree = mstat.f_bfree; 411 sbp->f_bavail = mstat.f_bavail; 412 sbp->f_files = mstat.f_files; 413 sbp->f_ffree = mstat.f_ffree; 414 415 error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, p); 416 if (error) 417 return (error); 418 419 sbp->f_type = MOUNT_UNION; 420 sbp->f_flags = mstat.f_flags; 421 sbp->f_bsize = mstat.f_bsize; 422 sbp->f_iosize = mstat.f_iosize; 423 424 /* 425 * if the lower and upper blocksizes differ, then frig the 426 * block counts so that the sizes reported by df make some 427 * kind of sense. none of this makes sense though. 428 */ 429 430 if (mstat.f_bsize != lbsize) { 431 sbp->f_blocks = sbp->f_blocks * lbsize / mstat.f_bsize; 432 sbp->f_bfree = sbp->f_bfree * lbsize / mstat.f_bsize; 433 sbp->f_bavail = sbp->f_bavail * lbsize / mstat.f_bsize; 434 } 435 sbp->f_blocks += mstat.f_blocks; 436 sbp->f_bfree += mstat.f_bfree; 437 sbp->f_bavail += mstat.f_bavail; 438 sbp->f_files += mstat.f_files; 439 sbp->f_ffree += mstat.f_ffree; 440 441 if (sbp != &mp->mnt_stat) { 442 bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); 443 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 444 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 445 } 446 return (0); 447 } 448 449 int 450 union_sync(mp, waitfor, cred, p) 451 struct mount *mp; 452 int waitfor; 453 struct ucred *cred; 454 struct proc *p; 455 { 456 457 /* 458 * XXX - Assumes no data cached at union layer. 459 */ 460 return (0); 461 } 462 463 int 464 union_vget(mp, ino, vpp) 465 struct mount *mp; 466 ino_t ino; 467 struct vnode **vpp; 468 { 469 470 return (EOPNOTSUPP); 471 } 472 473 int 474 union_fhtovp(mp, fidp, nam, vpp, exflagsp, credanonp) 475 struct mount *mp; 476 struct fid *fidp; 477 struct mbuf *nam; 478 struct vnode **vpp; 479 int *exflagsp; 480 struct ucred **credanonp; 481 { 482 483 return (EOPNOTSUPP); 484 } 485 486 int 487 union_vptofh(vp, fhp) 488 struct vnode *vp; 489 struct fid *fhp; 490 { 491 492 return (EOPNOTSUPP); 493 } 494 495 int union_init __P((void)); 496 497 struct vfsops union_vfsops = { 498 union_mount, 499 union_start, 500 union_unmount, 501 union_root, 502 union_quotactl, 503 union_statfs, 504 union_sync, 505 union_vget, 506 union_fhtovp, 507 union_vptofh, 508 union_init, 509 }; 510