1 /* 2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry. 3 * Copyright (c) 1992, 1993, 1994, 1995 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)union_vnops.c 8.27 (Berkeley) 05/14/95 12 */ 13 14 #include <sys/param.h> 15 #include <sys/systm.h> 16 #include <sys/proc.h> 17 #include <sys/file.h> 18 #include <sys/time.h> 19 #include <sys/stat.h> 20 #include <sys/types.h> 21 #include <sys/vnode.h> 22 #include <sys/mount.h> 23 #include <sys/namei.h> 24 #include <sys/malloc.h> 25 #include <sys/buf.h> 26 #include <sys/queue.h> 27 #include <sys/lock.h> 28 #include <miscfs/union/union.h> 29 30 #define FIXUP(un, p) { \ 31 if (((un)->un_flags & UN_ULOCK) == 0) { \ 32 union_fixup(un, p); \ 33 } \ 34 } 35 36 static void 37 union_fixup(un, p) 38 struct union_node *un; 39 struct proc *p; 40 { 41 42 vn_lock(un->un_uppervp, LK_EXCLUSIVE | LK_RETRY, p); 43 un->un_flags |= UN_ULOCK; 44 } 45 46 static int 47 union_lookup1(udvp, dvpp, vpp, cnp) 48 struct vnode *udvp; 49 struct vnode **dvpp; 50 struct vnode **vpp; 51 struct componentname *cnp; 52 { 53 int error; 54 struct proc *p = cnp->cn_proc; 55 struct vnode *tdvp; 56 struct vnode *dvp; 57 struct mount *mp; 58 59 dvp = *dvpp; 60 61 /* 62 * If stepping up the directory tree, check for going 63 * back across the mount point, in which case do what 64 * lookup would do by stepping back down the mount 65 * hierarchy. 66 */ 67 if (cnp->cn_flags & ISDOTDOT) { 68 while ((dvp != udvp) && (dvp->v_flag & VROOT)) { 69 /* 70 * Don't do the NOCROSSMOUNT check 71 * at this level. By definition, 72 * union fs deals with namespaces, not 73 * filesystems. 74 */ 75 tdvp = dvp; 76 *dvpp = dvp = dvp->v_mount->mnt_vnodecovered; 77 vput(tdvp); 78 VREF(dvp); 79 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 80 } 81 } 82 83 error = VOP_LOOKUP(dvp, &tdvp, cnp); 84 if (error) 85 return (error); 86 87 /* 88 * The parent directory will have been unlocked, unless lookup 89 * found the last component. In which case, re-lock the node 90 * here to allow it to be unlocked again (phew) in union_lookup. 91 */ 92 if (dvp != tdvp && !(cnp->cn_flags & ISLASTCN)) 93 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 94 95 dvp = tdvp; 96 97 /* 98 * Lastly check if the current node is a mount point in 99 * which case walk up the mount hierarchy making sure not to 100 * bump into the root of the mount tree (ie. dvp != udvp). 101 */ 102 while (dvp != udvp && (dvp->v_type == VDIR) && 103 (mp = dvp->v_mountedhere)) { 104 105 if (mp->mnt_flag & MNT_MLOCK) { 106 mp->mnt_flag |= MNT_MWAIT; 107 sleep((caddr_t) mp, PVFS); 108 continue; 109 } 110 111 if (error = VFS_ROOT(mp, &tdvp)) { 112 vput(dvp); 113 return (error); 114 } 115 116 vput(dvp); 117 dvp = tdvp; 118 } 119 120 *vpp = dvp; 121 return (0); 122 } 123 124 int 125 union_lookup(ap) 126 struct vop_lookup_args /* { 127 struct vnodeop_desc *a_desc; 128 struct vnode *a_dvp; 129 struct vnode **a_vpp; 130 struct componentname *a_cnp; 131 } */ *ap; 132 { 133 int error; 134 int uerror, lerror; 135 struct vnode *uppervp, *lowervp; 136 struct vnode *upperdvp, *lowerdvp; 137 struct vnode *dvp = ap->a_dvp; 138 struct union_node *dun = VTOUNION(dvp); 139 struct componentname *cnp = ap->a_cnp; 140 struct proc *p = cnp->cn_proc; 141 int lockparent = cnp->cn_flags & LOCKPARENT; 142 int rdonly = cnp->cn_flags & RDONLY; 143 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount); 144 struct ucred *saved_cred; 145 int iswhiteout; 146 struct vattr va; 147 148 #ifdef notyet 149 if (cnp->cn_namelen == 3 && 150 cnp->cn_nameptr[2] == '.' && 151 cnp->cn_nameptr[1] == '.' && 152 cnp->cn_nameptr[0] == '.') { 153 dvp = *ap->a_vpp = LOWERVP(ap->a_dvp); 154 if (dvp == NULLVP) 155 return (ENOENT); 156 VREF(dvp); 157 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 158 if (!lockparent || !(cnp->cn_flags & ISLASTCN)) 159 VOP_UNLOCK(ap->a_dvp, 0, p); 160 return (0); 161 } 162 #endif 163 164 cnp->cn_flags |= LOCKPARENT; 165 166 upperdvp = dun->un_uppervp; 167 lowerdvp = dun->un_lowervp; 168 uppervp = NULLVP; 169 lowervp = NULLVP; 170 iswhiteout = 0; 171 172 /* 173 * do the lookup in the upper level. 174 * if that level comsumes additional pathnames, 175 * then assume that something special is going 176 * on and just return that vnode. 177 */ 178 if (upperdvp != NULLVP) { 179 FIXUP(dun, p); 180 uerror = union_lookup1(um->um_uppervp, &upperdvp, 181 &uppervp, cnp); 182 /*if (uppervp == upperdvp) 183 dun->un_flags |= UN_KLOCK;*/ 184 185 if (cnp->cn_consume != 0) { 186 *ap->a_vpp = uppervp; 187 if (!lockparent) 188 cnp->cn_flags &= ~LOCKPARENT; 189 return (uerror); 190 } 191 if (uerror == ENOENT || uerror == EJUSTRETURN) { 192 if (cnp->cn_flags & ISWHITEOUT) { 193 iswhiteout = 1; 194 } else if (lowerdvp != NULLVP) { 195 lerror = VOP_GETATTR(upperdvp, &va, 196 cnp->cn_cred, cnp->cn_proc); 197 if (lerror == 0 && (va.va_flags & OPAQUE)) 198 iswhiteout = 1; 199 } 200 } 201 } else { 202 uerror = ENOENT; 203 } 204 205 /* 206 * in a similar way to the upper layer, do the lookup 207 * in the lower layer. this time, if there is some 208 * component magic going on, then vput whatever we got 209 * back from the upper layer and return the lower vnode 210 * instead. 211 */ 212 if (lowerdvp != NULLVP && !iswhiteout) { 213 int nameiop; 214 215 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, p); 216 217 /* 218 * Only do a LOOKUP on the bottom node, since 219 * we won't be making changes to it anyway. 220 */ 221 nameiop = cnp->cn_nameiop; 222 cnp->cn_nameiop = LOOKUP; 223 if (um->um_op == UNMNT_BELOW) { 224 saved_cred = cnp->cn_cred; 225 cnp->cn_cred = um->um_cred; 226 } 227 lerror = union_lookup1(um->um_lowervp, &lowerdvp, 228 &lowervp, cnp); 229 if (um->um_op == UNMNT_BELOW) 230 cnp->cn_cred = saved_cred; 231 cnp->cn_nameiop = nameiop; 232 233 if (lowervp != lowerdvp) 234 VOP_UNLOCK(lowerdvp, 0, p); 235 236 if (cnp->cn_consume != 0) { 237 if (uppervp != NULLVP) { 238 if (uppervp == upperdvp) 239 vrele(uppervp); 240 else 241 vput(uppervp); 242 uppervp = NULLVP; 243 } 244 *ap->a_vpp = lowervp; 245 if (!lockparent) 246 cnp->cn_flags &= ~LOCKPARENT; 247 return (lerror); 248 } 249 } else { 250 lerror = ENOENT; 251 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) { 252 lowervp = LOWERVP(dun->un_pvp); 253 if (lowervp != NULLVP) { 254 VREF(lowervp); 255 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, p); 256 lerror = 0; 257 } 258 } 259 } 260 261 if (!lockparent) 262 cnp->cn_flags &= ~LOCKPARENT; 263 264 /* 265 * at this point, we have uerror and lerror indicating 266 * possible errors with the lookups in the upper and lower 267 * layers. additionally, uppervp and lowervp are (locked) 268 * references to existing vnodes in the upper and lower layers. 269 * 270 * there are now three cases to consider. 271 * 1. if both layers returned an error, then return whatever 272 * error the upper layer generated. 273 * 274 * 2. if the top layer failed and the bottom layer succeeded 275 * then two subcases occur. 276 * a. the bottom vnode is not a directory, in which 277 * case just return a new union vnode referencing 278 * an empty top layer and the existing bottom layer. 279 * b. the bottom vnode is a directory, in which case 280 * create a new directory in the top-level and 281 * continue as in case 3. 282 * 283 * 3. if the top layer succeeded then return a new union 284 * vnode referencing whatever the new top layer and 285 * whatever the bottom layer returned. 286 */ 287 288 *ap->a_vpp = NULLVP; 289 290 /* case 1. */ 291 if ((uerror != 0) && (lerror != 0)) { 292 return (uerror); 293 } 294 295 /* case 2. */ 296 if (uerror != 0 /* && (lerror == 0) */ ) { 297 if (lowervp->v_type == VDIR) { /* case 2b. */ 298 dun->un_flags &= ~UN_ULOCK; 299 VOP_UNLOCK(upperdvp, 0, p); 300 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp); 301 vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY, p); 302 dun->un_flags |= UN_ULOCK; 303 304 if (uerror) { 305 if (lowervp != NULLVP) { 306 vput(lowervp); 307 lowervp = NULLVP; 308 } 309 return (uerror); 310 } 311 } 312 } 313 314 if (lowervp != NULLVP) 315 VOP_UNLOCK(lowervp, 0, p); 316 317 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp, 318 uppervp, lowervp, 1); 319 320 if (error) { 321 if (uppervp != NULLVP) 322 vput(uppervp); 323 if (lowervp != NULLVP) 324 vrele(lowervp); 325 } else { 326 if (*ap->a_vpp != dvp) 327 if (!lockparent || !(cnp->cn_flags & ISLASTCN)) 328 VOP_UNLOCK(dvp, 0, p); 329 } 330 331 return (error); 332 } 333 334 int 335 union_create(ap) 336 struct vop_create_args /* { 337 struct vnode *a_dvp; 338 struct vnode **a_vpp; 339 struct componentname *a_cnp; 340 struct vattr *a_vap; 341 } */ *ap; 342 { 343 struct union_node *un = VTOUNION(ap->a_dvp); 344 struct vnode *dvp = un->un_uppervp; 345 struct componentname *cnp = ap->a_cnp; 346 struct proc *p = cnp->cn_proc; 347 348 if (dvp != NULLVP) { 349 int error; 350 struct vnode *vp; 351 struct mount *mp; 352 353 FIXUP(un, p); 354 355 VREF(dvp); 356 un->un_flags |= UN_KLOCK; 357 mp = ap->a_dvp->v_mount; 358 vput(ap->a_dvp); 359 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap); 360 if (error) 361 return (error); 362 363 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp, 364 NULLVP, 1); 365 if (error) 366 vput(vp); 367 return (error); 368 } 369 370 vput(ap->a_dvp); 371 return (EROFS); 372 } 373 374 int 375 union_whiteout(ap) 376 struct vop_whiteout_args /* { 377 struct vnode *a_dvp; 378 struct componentname *a_cnp; 379 int a_flags; 380 } */ *ap; 381 { 382 struct union_node *un = VTOUNION(ap->a_dvp); 383 struct componentname *cnp = ap->a_cnp; 384 struct proc *p = cnp->cn_proc; 385 386 if (un->un_uppervp == NULLVP) 387 return (EOPNOTSUPP); 388 389 FIXUP(un, p); 390 return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags)); 391 } 392 393 int 394 union_mknod(ap) 395 struct vop_mknod_args /* { 396 struct vnode *a_dvp; 397 struct vnode **a_vpp; 398 struct componentname *a_cnp; 399 struct vattr *a_vap; 400 } */ *ap; 401 { 402 struct union_node *un = VTOUNION(ap->a_dvp); 403 struct vnode *dvp = un->un_uppervp; 404 struct componentname *cnp = ap->a_cnp; 405 struct proc *p = cnp->cn_proc; 406 407 if (dvp != NULLVP) { 408 int error; 409 struct vnode *vp; 410 struct mount *mp; 411 412 FIXUP(un, p); 413 414 VREF(dvp); 415 un->un_flags |= UN_KLOCK; 416 mp = ap->a_dvp->v_mount; 417 vput(ap->a_dvp); 418 error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap); 419 if (error) 420 return (error); 421 422 if (vp != NULLVP) { 423 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, 424 cnp, vp, NULLVP, 1); 425 if (error) 426 vput(vp); 427 } 428 return (error); 429 } 430 431 vput(ap->a_dvp); 432 return (EROFS); 433 } 434 435 int 436 union_open(ap) 437 struct vop_open_args /* { 438 struct vnodeop_desc *a_desc; 439 struct vnode *a_vp; 440 int a_mode; 441 struct ucred *a_cred; 442 struct proc *a_p; 443 } */ *ap; 444 { 445 struct union_node *un = VTOUNION(ap->a_vp); 446 struct vnode *tvp; 447 int mode = ap->a_mode; 448 struct ucred *cred = ap->a_cred; 449 struct proc *p = ap->a_p; 450 int error; 451 452 /* 453 * If there is an existing upper vp then simply open that. 454 */ 455 tvp = un->un_uppervp; 456 if (tvp == NULLVP) { 457 /* 458 * If the lower vnode is being opened for writing, then 459 * copy the file contents to the upper vnode and open that, 460 * otherwise can simply open the lower vnode. 461 */ 462 tvp = un->un_lowervp; 463 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { 464 error = union_copyup(un, (mode&O_TRUNC) == 0, cred, p); 465 if (error == 0) 466 error = VOP_OPEN(un->un_uppervp, mode, cred, p); 467 return (error); 468 } 469 470 /* 471 * Just open the lower vnode 472 */ 473 un->un_openl++; 474 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); 475 error = VOP_OPEN(tvp, mode, cred, p); 476 VOP_UNLOCK(tvp, 0, p); 477 478 return (error); 479 } 480 481 FIXUP(un, p); 482 483 error = VOP_OPEN(tvp, mode, cred, p); 484 485 return (error); 486 } 487 488 int 489 union_close(ap) 490 struct vop_close_args /* { 491 struct vnode *a_vp; 492 int a_fflag; 493 struct ucred *a_cred; 494 struct proc *a_p; 495 } */ *ap; 496 { 497 struct union_node *un = VTOUNION(ap->a_vp); 498 struct vnode *vp; 499 500 if (un->un_uppervp != NULLVP) { 501 vp = un->un_uppervp; 502 } else { 503 #ifdef UNION_DIAGNOSTIC 504 if (un->un_openl <= 0) 505 panic("union: un_openl cnt"); 506 #endif 507 --un->un_openl; 508 vp = un->un_lowervp; 509 } 510 511 return (VOP_CLOSE(vp, ap->a_fflag, ap->a_cred, ap->a_p)); 512 } 513 514 /* 515 * Check access permission on the union vnode. 516 * The access check being enforced is to check 517 * against both the underlying vnode, and any 518 * copied vnode. This ensures that no additional 519 * file permissions are given away simply because 520 * the user caused an implicit file copy. 521 */ 522 int 523 union_access(ap) 524 struct vop_access_args /* { 525 struct vnodeop_desc *a_desc; 526 struct vnode *a_vp; 527 int a_mode; 528 struct ucred *a_cred; 529 struct proc *a_p; 530 } */ *ap; 531 { 532 struct union_node *un = VTOUNION(ap->a_vp); 533 struct proc *p = ap->a_p; 534 int error = EACCES; 535 struct vnode *vp; 536 537 if ((vp = un->un_uppervp) != NULLVP) { 538 FIXUP(un, p); 539 return (VOP_ACCESS(vp, ap->a_mode, ap->a_cred, p)); 540 } 541 542 if ((vp = un->un_lowervp) != NULLVP) { 543 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 544 error = VOP_ACCESS(vp, ap->a_mode, ap->a_cred, p); 545 if (error == 0) { 546 struct union_mount *um = MOUNTTOUNIONMOUNT(vp->v_mount); 547 548 if (um->um_op == UNMNT_BELOW) 549 error = VOP_ACCESS(vp, ap->a_mode, 550 um->um_cred, p); 551 } 552 VOP_UNLOCK(vp, 0, p); 553 if (error) 554 return (error); 555 } 556 557 return (error); 558 } 559 560 /* 561 * We handle getattr only to change the fsid and 562 * track object sizes 563 */ 564 int 565 union_getattr(ap) 566 struct vop_getattr_args /* { 567 struct vnode *a_vp; 568 struct vattr *a_vap; 569 struct ucred *a_cred; 570 struct proc *a_p; 571 } */ *ap; 572 { 573 int error; 574 struct union_node *un = VTOUNION(ap->a_vp); 575 struct vnode *vp = un->un_uppervp; 576 struct proc *p = ap->a_p; 577 struct vattr *vap; 578 struct vattr va; 579 580 581 /* 582 * Some programs walk the filesystem hierarchy by counting 583 * links to directories to avoid stat'ing all the time. 584 * This means the link count on directories needs to be "correct". 585 * The only way to do that is to call getattr on both layers 586 * and fix up the link count. The link count will not necessarily 587 * be accurate but will be large enough to defeat the tree walkers. 588 */ 589 590 vap = ap->a_vap; 591 592 vp = un->un_uppervp; 593 if (vp != NULLVP) { 594 /* 595 * It's not clear whether VOP_GETATTR is to be 596 * called with the vnode locked or not. stat() calls 597 * it with (vp) locked, and fstat calls it with 598 * (vp) unlocked. 599 * In the mean time, compensate here by checking 600 * the union_node's lock flag. 601 */ 602 if (un->un_flags & UN_LOCKED) 603 FIXUP(un, p); 604 605 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); 606 if (error) 607 return (error); 608 union_newsize(ap->a_vp, vap->va_size, VNOVAL); 609 } 610 611 if (vp == NULLVP) { 612 vp = un->un_lowervp; 613 } else if (vp->v_type == VDIR) { 614 vp = un->un_lowervp; 615 vap = &va; 616 } else { 617 vp = NULLVP; 618 } 619 620 if (vp != NULLVP) { 621 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); 622 if (error) 623 return (error); 624 union_newsize(ap->a_vp, VNOVAL, vap->va_size); 625 } 626 627 if ((vap != ap->a_vap) && (vap->va_type == VDIR)) 628 ap->a_vap->va_nlink += vap->va_nlink; 629 630 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 631 return (0); 632 } 633 634 int 635 union_setattr(ap) 636 struct vop_setattr_args /* { 637 struct vnode *a_vp; 638 struct vattr *a_vap; 639 struct ucred *a_cred; 640 struct proc *a_p; 641 } */ *ap; 642 { 643 struct union_node *un = VTOUNION(ap->a_vp); 644 struct proc *p = ap->a_p; 645 int error; 646 647 /* 648 * Handle case of truncating lower object to zero size, 649 * by creating a zero length upper object. This is to 650 * handle the case of open with O_TRUNC and O_CREAT. 651 */ 652 if ((un->un_uppervp == NULLVP) && 653 /* assert(un->un_lowervp != NULLVP) */ 654 (un->un_lowervp->v_type == VREG)) { 655 error = union_copyup(un, (ap->a_vap->va_size != 0), 656 ap->a_cred, ap->a_p); 657 if (error) 658 return (error); 659 } 660 661 /* 662 * Try to set attributes in upper layer, 663 * otherwise return read-only filesystem error. 664 */ 665 if (un->un_uppervp != NULLVP) { 666 FIXUP(un, p); 667 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, 668 ap->a_cred, ap->a_p); 669 if ((error == 0) && (ap->a_vap->va_size != VNOVAL)) 670 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL); 671 } else { 672 error = EROFS; 673 } 674 675 return (error); 676 } 677 678 int 679 union_read(ap) 680 struct vop_read_args /* { 681 struct vnode *a_vp; 682 struct uio *a_uio; 683 int a_ioflag; 684 struct ucred *a_cred; 685 } */ *ap; 686 { 687 int error; 688 struct proc *p = ap->a_uio->uio_procp; 689 struct vnode *vp = OTHERVP(ap->a_vp); 690 int dolock = (vp == LOWERVP(ap->a_vp)); 691 692 if (dolock) 693 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 694 else 695 FIXUP(VTOUNION(ap->a_vp), p); 696 error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 697 if (dolock) 698 VOP_UNLOCK(vp, 0, p); 699 700 /* 701 * XXX 702 * perhaps the size of the underlying object has changed under 703 * our feet. take advantage of the offset information present 704 * in the uio structure. 705 */ 706 if (error == 0) { 707 struct union_node *un = VTOUNION(ap->a_vp); 708 off_t cur = ap->a_uio->uio_offset; 709 710 if (vp == un->un_uppervp) { 711 if (cur > un->un_uppersz) 712 union_newsize(ap->a_vp, cur, VNOVAL); 713 } else { 714 if (cur > un->un_lowersz) 715 union_newsize(ap->a_vp, VNOVAL, cur); 716 } 717 } 718 719 return (error); 720 } 721 722 int 723 union_write(ap) 724 struct vop_read_args /* { 725 struct vnode *a_vp; 726 struct uio *a_uio; 727 int a_ioflag; 728 struct ucred *a_cred; 729 } */ *ap; 730 { 731 int error; 732 struct vnode *vp; 733 struct union_node *un = VTOUNION(ap->a_vp); 734 struct proc *p = ap->a_uio->uio_procp; 735 736 vp = UPPERVP(ap->a_vp); 737 if (vp == NULLVP) 738 panic("union: missing upper layer in write"); 739 740 FIXUP(un, p); 741 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 742 743 /* 744 * the size of the underlying object may be changed by the 745 * write. 746 */ 747 if (error == 0) { 748 off_t cur = ap->a_uio->uio_offset; 749 750 if (cur > un->un_uppersz) 751 union_newsize(ap->a_vp, cur, VNOVAL); 752 } 753 754 return (error); 755 } 756 757 union_lease(ap) 758 struct vop_lease_args /* { 759 struct vnode *a_vp; 760 struct proc *a_p; 761 struct ucred *a_cred; 762 int a_flag; 763 } */ *ap; 764 { 765 766 return (VOP_LEASE(OTHERVP(ap->a_vp), ap->a_p, ap->a_cred, ap->a_flag)); 767 } 768 769 int 770 union_ioctl(ap) 771 struct vop_ioctl_args /* { 772 struct vnode *a_vp; 773 int a_command; 774 caddr_t a_data; 775 int a_fflag; 776 struct ucred *a_cred; 777 struct proc *a_p; 778 } */ *ap; 779 { 780 781 return (VOP_IOCTL(OTHERVP(ap->a_vp), ap->a_command, ap->a_data, 782 ap->a_fflag, ap->a_cred, ap->a_p)); 783 } 784 785 int 786 union_select(ap) 787 struct vop_select_args /* { 788 struct vnode *a_vp; 789 int a_which; 790 int a_fflags; 791 struct ucred *a_cred; 792 struct proc *a_p; 793 } */ *ap; 794 { 795 796 return (VOP_SELECT(OTHERVP(ap->a_vp), ap->a_which, ap->a_fflags, 797 ap->a_cred, ap->a_p)); 798 } 799 800 int 801 union_revoke(ap) 802 struct vop_revoke_args /* { 803 struct vnode *a_vp; 804 int a_flags; 805 struct proc *a_p; 806 } */ *ap; 807 { 808 struct vnode *vp = ap->a_vp; 809 810 if (UPPERVP(vp)) 811 VOP_REVOKE(UPPERVP(vp), ap->a_flags); 812 if (LOWERVP(vp)) 813 VOP_REVOKE(LOWERVP(vp), ap->a_flags); 814 vgone(vp); 815 } 816 817 int 818 union_mmap(ap) 819 struct vop_mmap_args /* { 820 struct vnode *a_vp; 821 int a_fflags; 822 struct ucred *a_cred; 823 struct proc *a_p; 824 } */ *ap; 825 { 826 827 return (VOP_MMAP(OTHERVP(ap->a_vp), ap->a_fflags, 828 ap->a_cred, ap->a_p)); 829 } 830 831 int 832 union_fsync(ap) 833 struct vop_fsync_args /* { 834 struct vnode *a_vp; 835 struct ucred *a_cred; 836 int a_waitfor; 837 struct proc *a_p; 838 } */ *ap; 839 { 840 int error = 0; 841 struct proc *p = ap->a_p; 842 struct vnode *targetvp = OTHERVP(ap->a_vp); 843 844 if (targetvp != NULLVP) { 845 int dolock = (targetvp == LOWERVP(ap->a_vp)); 846 847 if (dolock) 848 vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY, p); 849 else 850 FIXUP(VTOUNION(ap->a_vp), p); 851 error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, p); 852 if (dolock) 853 VOP_UNLOCK(targetvp, 0, p); 854 } 855 856 return (error); 857 } 858 859 int 860 union_seek(ap) 861 struct vop_seek_args /* { 862 struct vnode *a_vp; 863 off_t a_oldoff; 864 off_t a_newoff; 865 struct ucred *a_cred; 866 } */ *ap; 867 { 868 869 return (VOP_SEEK(OTHERVP(ap->a_vp), ap->a_oldoff, ap->a_newoff, ap->a_cred)); 870 } 871 872 int 873 union_remove(ap) 874 struct vop_remove_args /* { 875 struct vnode *a_dvp; 876 struct vnode *a_vp; 877 struct componentname *a_cnp; 878 } */ *ap; 879 { 880 int error; 881 struct union_node *dun = VTOUNION(ap->a_dvp); 882 struct union_node *un = VTOUNION(ap->a_vp); 883 struct componentname *cnp = ap->a_cnp; 884 struct proc *p = cnp->cn_proc; 885 886 if (dun->un_uppervp == NULLVP) 887 panic("union remove: null upper vnode"); 888 889 if (un->un_uppervp != NULLVP) { 890 struct vnode *dvp = dun->un_uppervp; 891 struct vnode *vp = un->un_uppervp; 892 893 FIXUP(dun, p); 894 VREF(dvp); 895 dun->un_flags |= UN_KLOCK; 896 vput(ap->a_dvp); 897 FIXUP(un, p); 898 VREF(vp); 899 un->un_flags |= UN_KLOCK; 900 vput(ap->a_vp); 901 902 if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc)) 903 cnp->cn_flags |= DOWHITEOUT; 904 error = VOP_REMOVE(dvp, vp, cnp); 905 if (!error) 906 union_removed_upper(un); 907 } else { 908 FIXUP(dun, p); 909 error = union_mkwhiteout( 910 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), 911 dun->un_uppervp, ap->a_cnp, un->un_path); 912 vput(ap->a_dvp); 913 vput(ap->a_vp); 914 } 915 916 return (error); 917 } 918 919 int 920 union_link(ap) 921 struct vop_link_args /* { 922 struct vnode *a_vp; 923 struct vnode *a_tdvp; 924 struct componentname *a_cnp; 925 } */ *ap; 926 { 927 int error = 0; 928 struct componentname *cnp = ap->a_cnp; 929 struct proc *p = cnp->cn_proc; 930 struct union_node *un; 931 struct vnode *vp; 932 struct vnode *tdvp; 933 934 un = VTOUNION(ap->a_tdvp); 935 936 if (ap->a_tdvp->v_op != ap->a_vp->v_op) { 937 vp = ap->a_vp; 938 } else { 939 struct union_node *tun = VTOUNION(ap->a_vp); 940 if (tun->un_uppervp == NULLVP) { 941 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p); 942 if (un->un_uppervp == tun->un_dirvp) { 943 un->un_flags &= ~UN_ULOCK; 944 VOP_UNLOCK(un->un_uppervp, 0, p); 945 } 946 error = union_copyup(tun, 1, cnp->cn_cred, p); 947 if (un->un_uppervp == tun->un_dirvp) { 948 vn_lock(un->un_uppervp, 949 LK_EXCLUSIVE | LK_RETRY, p); 950 un->un_flags |= UN_ULOCK; 951 } 952 VOP_UNLOCK(ap->a_vp, 0, p); 953 } 954 vp = tun->un_uppervp; 955 } 956 957 tdvp = un->un_uppervp; 958 if (tdvp == NULLVP) 959 error = EROFS; 960 961 if (error) { 962 vput(ap->a_tdvp); 963 return (error); 964 } 965 966 FIXUP(un, p); 967 VREF(tdvp); 968 un->un_flags |= UN_KLOCK; 969 vput(ap->a_tdvp); 970 971 return (VOP_LINK(vp, tdvp, cnp)); 972 } 973 974 int 975 union_rename(ap) 976 struct vop_rename_args /* { 977 struct vnode *a_fdvp; 978 struct vnode *a_fvp; 979 struct componentname *a_fcnp; 980 struct vnode *a_tdvp; 981 struct vnode *a_tvp; 982 struct componentname *a_tcnp; 983 } */ *ap; 984 { 985 int error; 986 987 struct vnode *fdvp = ap->a_fdvp; 988 struct vnode *fvp = ap->a_fvp; 989 struct vnode *tdvp = ap->a_tdvp; 990 struct vnode *tvp = ap->a_tvp; 991 992 if (fdvp->v_op == union_vnodeop_p) { /* always true */ 993 struct union_node *un = VTOUNION(fdvp); 994 if (un->un_uppervp == NULLVP) { 995 /* 996 * this should never happen in normal 997 * operation but might if there was 998 * a problem creating the top-level shadow 999 * directory. 1000 */ 1001 error = EXDEV; 1002 goto bad; 1003 } 1004 1005 fdvp = un->un_uppervp; 1006 VREF(fdvp); 1007 vrele(ap->a_fdvp); 1008 } 1009 1010 if (fvp->v_op == union_vnodeop_p) { /* always true */ 1011 struct union_node *un = VTOUNION(fvp); 1012 if (un->un_uppervp == NULLVP) { 1013 /* XXX: should do a copyup */ 1014 error = EXDEV; 1015 goto bad; 1016 } 1017 1018 if (un->un_lowervp != NULLVP) 1019 ap->a_fcnp->cn_flags |= DOWHITEOUT; 1020 1021 fvp = un->un_uppervp; 1022 VREF(fvp); 1023 vrele(ap->a_fvp); 1024 } 1025 1026 if (tdvp->v_op == union_vnodeop_p) { 1027 struct union_node *un = VTOUNION(tdvp); 1028 if (un->un_uppervp == NULLVP) { 1029 /* 1030 * this should never happen in normal 1031 * operation but might if there was 1032 * a problem creating the top-level shadow 1033 * directory. 1034 */ 1035 error = EXDEV; 1036 goto bad; 1037 } 1038 1039 tdvp = un->un_uppervp; 1040 VREF(tdvp); 1041 un->un_flags |= UN_KLOCK; 1042 vput(ap->a_tdvp); 1043 } 1044 1045 if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) { 1046 struct union_node *un = VTOUNION(tvp); 1047 1048 tvp = un->un_uppervp; 1049 if (tvp != NULLVP) { 1050 VREF(tvp); 1051 un->un_flags |= UN_KLOCK; 1052 } 1053 vput(ap->a_tvp); 1054 } 1055 1056 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); 1057 1058 bad: 1059 vrele(fdvp); 1060 vrele(fvp); 1061 vput(tdvp); 1062 if (tvp != NULLVP) 1063 vput(tvp); 1064 1065 return (error); 1066 } 1067 1068 int 1069 union_mkdir(ap) 1070 struct vop_mkdir_args /* { 1071 struct vnode *a_dvp; 1072 struct vnode **a_vpp; 1073 struct componentname *a_cnp; 1074 struct vattr *a_vap; 1075 } */ *ap; 1076 { 1077 struct union_node *un = VTOUNION(ap->a_dvp); 1078 struct vnode *dvp = un->un_uppervp; 1079 struct componentname *cnp = ap->a_cnp; 1080 struct proc *p = cnp->cn_proc; 1081 1082 if (dvp != NULLVP) { 1083 int error; 1084 struct vnode *vp; 1085 1086 FIXUP(un, p); 1087 VREF(dvp); 1088 un->un_flags |= UN_KLOCK; 1089 VOP_UNLOCK(ap->a_dvp, 0, p); 1090 error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap); 1091 if (error) { 1092 vrele(ap->a_dvp); 1093 return (error); 1094 } 1095 1096 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp, 1097 NULLVP, cnp, vp, NULLVP, 1); 1098 vrele(ap->a_dvp); 1099 if (error) 1100 vput(vp); 1101 return (error); 1102 } 1103 1104 vput(ap->a_dvp); 1105 return (EROFS); 1106 } 1107 1108 int 1109 union_rmdir(ap) 1110 struct vop_rmdir_args /* { 1111 struct vnode *a_dvp; 1112 struct vnode *a_vp; 1113 struct componentname *a_cnp; 1114 } */ *ap; 1115 { 1116 int error; 1117 struct union_node *dun = VTOUNION(ap->a_dvp); 1118 struct union_node *un = VTOUNION(ap->a_vp); 1119 struct componentname *cnp = ap->a_cnp; 1120 struct proc *p = cnp->cn_proc; 1121 1122 if (dun->un_uppervp == NULLVP) 1123 panic("union rmdir: null upper vnode"); 1124 1125 if (un->un_uppervp != NULLVP) { 1126 struct vnode *dvp = dun->un_uppervp; 1127 struct vnode *vp = un->un_uppervp; 1128 1129 FIXUP(dun, p); 1130 VREF(dvp); 1131 dun->un_flags |= UN_KLOCK; 1132 vput(ap->a_dvp); 1133 FIXUP(un, p); 1134 VREF(vp); 1135 un->un_flags |= UN_KLOCK; 1136 vput(ap->a_vp); 1137 1138 if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc)) 1139 cnp->cn_flags |= DOWHITEOUT; 1140 error = VOP_RMDIR(dvp, vp, ap->a_cnp); 1141 if (!error) 1142 union_removed_upper(un); 1143 } else { 1144 FIXUP(dun, p); 1145 error = union_mkwhiteout( 1146 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), 1147 dun->un_uppervp, ap->a_cnp, un->un_path); 1148 vput(ap->a_dvp); 1149 vput(ap->a_vp); 1150 } 1151 1152 return (error); 1153 } 1154 1155 int 1156 union_symlink(ap) 1157 struct vop_symlink_args /* { 1158 struct vnode *a_dvp; 1159 struct vnode **a_vpp; 1160 struct componentname *a_cnp; 1161 struct vattr *a_vap; 1162 char *a_target; 1163 } */ *ap; 1164 { 1165 struct union_node *un = VTOUNION(ap->a_dvp); 1166 struct vnode *dvp = un->un_uppervp; 1167 struct componentname *cnp = ap->a_cnp; 1168 struct proc *p = cnp->cn_proc; 1169 1170 if (dvp != NULLVP) { 1171 int error; 1172 struct vnode *vp; 1173 struct mount *mp = ap->a_dvp->v_mount; 1174 1175 FIXUP(un, p); 1176 VREF(dvp); 1177 un->un_flags |= UN_KLOCK; 1178 vput(ap->a_dvp); 1179 error = VOP_SYMLINK(dvp, &vp, cnp, ap->a_vap, ap->a_target); 1180 *ap->a_vpp = NULLVP; 1181 return (error); 1182 } 1183 1184 vput(ap->a_dvp); 1185 return (EROFS); 1186 } 1187 1188 /* 1189 * union_readdir works in concert with getdirentries and 1190 * readdir(3) to provide a list of entries in the unioned 1191 * directories. getdirentries is responsible for walking 1192 * down the union stack. readdir(3) is responsible for 1193 * eliminating duplicate names from the returned data stream. 1194 */ 1195 int 1196 union_readdir(ap) 1197 struct vop_readdir_args /* { 1198 struct vnodeop_desc *a_desc; 1199 struct vnode *a_vp; 1200 struct uio *a_uio; 1201 struct ucred *a_cred; 1202 int *a_eofflag; 1203 u_long *a_cookies; 1204 int a_ncookies; 1205 } */ *ap; 1206 { 1207 struct union_node *un = VTOUNION(ap->a_vp); 1208 struct vnode *uvp = un->un_uppervp; 1209 struct proc *p = ap->a_uio->uio_procp; 1210 1211 if (uvp == NULLVP) 1212 return (0); 1213 1214 FIXUP(un, p); 1215 ap->a_vp = uvp; 1216 return (VOCALL(uvp->v_op, VOFFSET(vop_readdir), ap)); 1217 } 1218 1219 int 1220 union_readlink(ap) 1221 struct vop_readlink_args /* { 1222 struct vnode *a_vp; 1223 struct uio *a_uio; 1224 struct ucred *a_cred; 1225 } */ *ap; 1226 { 1227 int error; 1228 struct uio *uio = ap->a_uio; 1229 struct proc *p = uio->uio_procp; 1230 struct vnode *vp = OTHERVP(ap->a_vp); 1231 int dolock = (vp == LOWERVP(ap->a_vp)); 1232 1233 if (dolock) 1234 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1235 else 1236 FIXUP(VTOUNION(ap->a_vp), p); 1237 error = VOP_READLINK(vp, uio, ap->a_cred); 1238 if (dolock) 1239 VOP_UNLOCK(vp, 0, p); 1240 1241 return (error); 1242 } 1243 1244 int 1245 union_abortop(ap) 1246 struct vop_abortop_args /* { 1247 struct vnode *a_dvp; 1248 struct componentname *a_cnp; 1249 } */ *ap; 1250 { 1251 int error; 1252 struct componentname *cnp = ap->a_cnp; 1253 struct proc *p = cnp->cn_proc; 1254 struct vnode *vp = OTHERVP(ap->a_dvp); 1255 struct union_node *un = VTOUNION(ap->a_dvp); 1256 int islocked = un->un_flags & UN_LOCKED; 1257 int dolock = (vp == LOWERVP(ap->a_dvp)); 1258 1259 if (islocked) { 1260 if (dolock) 1261 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1262 else 1263 FIXUP(VTOUNION(ap->a_dvp), p); 1264 } 1265 error = VOP_ABORTOP(vp, cnp); 1266 if (islocked && dolock) 1267 VOP_UNLOCK(vp, 0, p); 1268 1269 return (error); 1270 } 1271 1272 int 1273 union_inactive(ap) 1274 struct vop_inactive_args /* { 1275 struct vnode *a_vp; 1276 struct proc *a_p; 1277 } */ *ap; 1278 { 1279 struct union_node *un = VTOUNION(ap->a_vp); 1280 struct vnode **vpp; 1281 1282 /* 1283 * Do nothing (and _don't_ bypass). 1284 * Wait to vrele lowervp until reclaim, 1285 * so that until then our union_node is in the 1286 * cache and reusable. 1287 * 1288 * NEEDSWORK: Someday, consider inactive'ing 1289 * the lowervp and then trying to reactivate it 1290 * with capabilities (v_id) 1291 * like they do in the name lookup cache code. 1292 * That's too much work for now. 1293 */ 1294 1295 #ifdef UNION_DIAGNOSTIC 1296 if (un->un_flags & UN_LOCKED) 1297 panic("union: inactivating locked node"); 1298 if (un->un_flags & UN_ULOCK) 1299 panic("union: inactivating w/locked upper node"); 1300 #endif 1301 1302 if (un->un_dircache != 0) { 1303 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++) 1304 vrele(*vpp); 1305 free(un->un_dircache, M_TEMP); 1306 un->un_dircache = 0; 1307 } 1308 1309 if ((un->un_flags & UN_CACHED) == 0) 1310 vgone(ap->a_vp); 1311 1312 return (0); 1313 } 1314 1315 int 1316 union_reclaim(ap) 1317 struct vop_reclaim_args /* { 1318 struct vnode *a_vp; 1319 } */ *ap; 1320 { 1321 1322 union_freevp(ap->a_vp); 1323 1324 return (0); 1325 } 1326 1327 int 1328 union_lock(ap) 1329 struct vop_lock_args *ap; 1330 { 1331 struct vnode *vp = ap->a_vp; 1332 struct proc *p = ap->a_p; 1333 int flags = ap->a_flags; 1334 struct union_node *un; 1335 1336 start: 1337 if ((flags & LK_INTERLOCK) == 0) 1338 simple_lock(&vp->v_interlock); 1339 if (vp->v_flag & VXLOCK) { 1340 vp->v_flag |= VXWANT; 1341 simple_unlock(&vp->v_interlock); 1342 tsleep((caddr_t)vp, PINOD, "unionlk1", 0); 1343 return (ENOENT); 1344 } 1345 simple_unlock(&vp->v_interlock); 1346 flags &= ~LK_INTERLOCK; 1347 1348 un = VTOUNION(vp); 1349 1350 if (un->un_uppervp != NULLVP) { 1351 if (((un->un_flags & UN_ULOCK) == 0) && 1352 (vp->v_usecount != 0)) { 1353 if (vn_lock(un->un_uppervp, flags, p)) 1354 goto start; 1355 un->un_flags |= UN_ULOCK; 1356 } 1357 #ifdef DIAGNOSTIC 1358 if (un->un_flags & UN_KLOCK) { 1359 vprint("union: dangling klock", vp); 1360 panic("union: dangling upper lock (%lx)", vp); 1361 } 1362 #endif 1363 } 1364 1365 if (un->un_flags & UN_LOCKED) { 1366 #ifdef DIAGNOSTIC 1367 if (curproc && un->un_pid == curproc->p_pid && 1368 un->un_pid > -1 && curproc->p_pid > -1) 1369 panic("union: locking against myself"); 1370 #endif 1371 un->un_flags |= UN_WANT; 1372 tsleep((caddr_t)&un->un_flags, PINOD, "unionlk2", 0); 1373 goto start; 1374 } 1375 1376 #ifdef DIAGNOSTIC 1377 if (curproc) 1378 un->un_pid = curproc->p_pid; 1379 else 1380 un->un_pid = -1; 1381 #endif 1382 1383 un->un_flags |= UN_LOCKED; 1384 return (0); 1385 } 1386 1387 /* 1388 * When operations want to vput() a union node yet retain a lock on 1389 * the upper vnode (say, to do some further operations like link(), 1390 * mkdir(), ...), they set UN_KLOCK on the union node, then call 1391 * vput() which calls VOP_UNLOCK() and comes here. union_unlock() 1392 * unlocks the union node (leaving the upper vnode alone), clears the 1393 * KLOCK flag, and then returns to vput(). The caller then does whatever 1394 * is left to do with the upper vnode, and ensures that it gets unlocked. 1395 * 1396 * If UN_KLOCK isn't set, then the upper vnode is unlocked here. 1397 */ 1398 int 1399 union_unlock(ap) 1400 struct vop_lock_args *ap; 1401 { 1402 struct union_node *un = VTOUNION(ap->a_vp); 1403 struct proc *p = ap->a_p; 1404 1405 #ifdef DIAGNOSTIC 1406 if ((un->un_flags & UN_LOCKED) == 0) 1407 panic("union: unlock unlocked node"); 1408 if (curproc && un->un_pid != curproc->p_pid && 1409 curproc->p_pid > -1 && un->un_pid > -1) 1410 panic("union: unlocking other process's union node"); 1411 #endif 1412 1413 un->un_flags &= ~UN_LOCKED; 1414 1415 if ((un->un_flags & (UN_ULOCK|UN_KLOCK)) == UN_ULOCK) 1416 VOP_UNLOCK(un->un_uppervp, 0, p); 1417 1418 un->un_flags &= ~(UN_ULOCK|UN_KLOCK); 1419 1420 if (un->un_flags & UN_WANT) { 1421 un->un_flags &= ~UN_WANT; 1422 wakeup((caddr_t) &un->un_flags); 1423 } 1424 1425 #ifdef DIAGNOSTIC 1426 un->un_pid = 0; 1427 #endif 1428 1429 return (0); 1430 } 1431 1432 int 1433 union_bmap(ap) 1434 struct vop_bmap_args /* { 1435 struct vnode *a_vp; 1436 daddr_t a_bn; 1437 struct vnode **a_vpp; 1438 daddr_t *a_bnp; 1439 int *a_runp; 1440 } */ *ap; 1441 { 1442 int error; 1443 struct proc *p = curproc; /* XXX */ 1444 struct vnode *vp = OTHERVP(ap->a_vp); 1445 int dolock = (vp == LOWERVP(ap->a_vp)); 1446 1447 if (dolock) 1448 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1449 else 1450 FIXUP(VTOUNION(ap->a_vp), p); 1451 error = VOP_BMAP(vp, ap->a_bn, ap->a_vpp, ap->a_bnp, ap->a_runp); 1452 if (dolock) 1453 VOP_UNLOCK(vp, 0, p); 1454 1455 return (error); 1456 } 1457 1458 int 1459 union_print(ap) 1460 struct vop_print_args /* { 1461 struct vnode *a_vp; 1462 } */ *ap; 1463 { 1464 struct vnode *vp = ap->a_vp; 1465 1466 printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n", 1467 vp, UPPERVP(vp), LOWERVP(vp)); 1468 if (UPPERVP(vp) != NULLVP) 1469 vprint("union: upper", UPPERVP(vp)); 1470 if (LOWERVP(vp) != NULLVP) 1471 vprint("union: lower", LOWERVP(vp)); 1472 1473 return (0); 1474 } 1475 1476 int 1477 union_islocked(ap) 1478 struct vop_islocked_args /* { 1479 struct vnode *a_vp; 1480 } */ *ap; 1481 { 1482 1483 return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0); 1484 } 1485 1486 int 1487 union_pathconf(ap) 1488 struct vop_pathconf_args /* { 1489 struct vnode *a_vp; 1490 int a_name; 1491 int *a_retval; 1492 } */ *ap; 1493 { 1494 int error; 1495 struct proc *p = curproc; /* XXX */ 1496 struct vnode *vp = OTHERVP(ap->a_vp); 1497 int dolock = (vp == LOWERVP(ap->a_vp)); 1498 1499 if (dolock) 1500 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1501 else 1502 FIXUP(VTOUNION(ap->a_vp), p); 1503 error = VOP_PATHCONF(vp, ap->a_name, ap->a_retval); 1504 if (dolock) 1505 VOP_UNLOCK(vp, 0, p); 1506 1507 return (error); 1508 } 1509 1510 int 1511 union_advlock(ap) 1512 struct vop_advlock_args /* { 1513 struct vnode *a_vp; 1514 caddr_t a_id; 1515 int a_op; 1516 struct flock *a_fl; 1517 int a_flags; 1518 } */ *ap; 1519 { 1520 1521 return (VOP_ADVLOCK(OTHERVP(ap->a_vp), ap->a_id, ap->a_op, 1522 ap->a_fl, ap->a_flags)); 1523 } 1524 1525 1526 /* 1527 * XXX - vop_strategy must be hand coded because it has no 1528 * vnode in its arguments. 1529 * This goes away with a merged VM/buffer cache. 1530 */ 1531 int 1532 union_strategy(ap) 1533 struct vop_strategy_args /* { 1534 struct buf *a_bp; 1535 } */ *ap; 1536 { 1537 struct buf *bp = ap->a_bp; 1538 int error; 1539 struct vnode *savedvp; 1540 1541 savedvp = bp->b_vp; 1542 bp->b_vp = OTHERVP(bp->b_vp); 1543 1544 #ifdef DIAGNOSTIC 1545 if (bp->b_vp == NULLVP) 1546 panic("union_strategy: nil vp"); 1547 if (((bp->b_flags & B_READ) == 0) && 1548 (bp->b_vp == LOWERVP(savedvp))) 1549 panic("union_strategy: writing to lowervp"); 1550 #endif 1551 1552 error = VOP_STRATEGY(bp); 1553 bp->b_vp = savedvp; 1554 1555 return (error); 1556 } 1557 1558 /* 1559 * Global vfs data structures 1560 */ 1561 int (**union_vnodeop_p)(); 1562 struct vnodeopv_entry_desc union_vnodeop_entries[] = { 1563 { &vop_default_desc, vn_default_error }, 1564 { &vop_lookup_desc, union_lookup }, /* lookup */ 1565 { &vop_create_desc, union_create }, /* create */ 1566 { &vop_whiteout_desc, union_whiteout }, /* whiteout */ 1567 { &vop_mknod_desc, union_mknod }, /* mknod */ 1568 { &vop_open_desc, union_open }, /* open */ 1569 { &vop_close_desc, union_close }, /* close */ 1570 { &vop_access_desc, union_access }, /* access */ 1571 { &vop_getattr_desc, union_getattr }, /* getattr */ 1572 { &vop_setattr_desc, union_setattr }, /* setattr */ 1573 { &vop_read_desc, union_read }, /* read */ 1574 { &vop_write_desc, union_write }, /* write */ 1575 { &vop_lease_desc, union_lease }, /* lease */ 1576 { &vop_ioctl_desc, union_ioctl }, /* ioctl */ 1577 { &vop_select_desc, union_select }, /* select */ 1578 { &vop_revoke_desc, union_revoke }, /* revoke */ 1579 { &vop_mmap_desc, union_mmap }, /* mmap */ 1580 { &vop_fsync_desc, union_fsync }, /* fsync */ 1581 { &vop_seek_desc, union_seek }, /* seek */ 1582 { &vop_remove_desc, union_remove }, /* remove */ 1583 { &vop_link_desc, union_link }, /* link */ 1584 { &vop_rename_desc, union_rename }, /* rename */ 1585 { &vop_mkdir_desc, union_mkdir }, /* mkdir */ 1586 { &vop_rmdir_desc, union_rmdir }, /* rmdir */ 1587 { &vop_symlink_desc, union_symlink }, /* symlink */ 1588 { &vop_readdir_desc, union_readdir }, /* readdir */ 1589 { &vop_readlink_desc, union_readlink }, /* readlink */ 1590 { &vop_abortop_desc, union_abortop }, /* abortop */ 1591 { &vop_inactive_desc, union_inactive }, /* inactive */ 1592 { &vop_reclaim_desc, union_reclaim }, /* reclaim */ 1593 { &vop_lock_desc, union_lock }, /* lock */ 1594 { &vop_unlock_desc, union_unlock }, /* unlock */ 1595 { &vop_bmap_desc, union_bmap }, /* bmap */ 1596 { &vop_strategy_desc, union_strategy }, /* strategy */ 1597 { &vop_print_desc, union_print }, /* print */ 1598 { &vop_islocked_desc, union_islocked }, /* islocked */ 1599 { &vop_pathconf_desc, union_pathconf }, /* pathconf */ 1600 { &vop_advlock_desc, union_advlock }, /* advlock */ 1601 #ifdef notdef 1602 { &vop_blkatoff_desc, union_blkatoff }, /* blkatoff */ 1603 { &vop_valloc_desc, union_valloc }, /* valloc */ 1604 { &vop_vfree_desc, union_vfree }, /* vfree */ 1605 { &vop_truncate_desc, union_truncate }, /* truncate */ 1606 { &vop_update_desc, union_update }, /* update */ 1607 { &vop_bwrite_desc, union_bwrite }, /* bwrite */ 1608 #endif 1609 { (struct vnodeop_desc*)NULL, (int(*)())NULL } 1610 }; 1611 struct vnodeopv_desc union_vnodeop_opv_desc = 1612 { &union_vnodeop_p, union_vnodeop_entries }; 1613