1 /* 2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry. 3 * Copyright (c) 1992, 1993, 1994, 1995 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)union_vnops.c 8.28 (Berkeley) 05/19/95 12 */ 13 14 #include <sys/param.h> 15 #include <sys/systm.h> 16 #include <sys/proc.h> 17 #include <sys/file.h> 18 #include <sys/time.h> 19 #include <sys/stat.h> 20 #include <sys/types.h> 21 #include <sys/vnode.h> 22 #include <sys/mount.h> 23 #include <sys/namei.h> 24 #include <sys/malloc.h> 25 #include <sys/buf.h> 26 #include <sys/queue.h> 27 #include <sys/lock.h> 28 #include <miscfs/union/union.h> 29 30 #define FIXUP(un, p) { \ 31 if (((un)->un_flags & UN_ULOCK) == 0) { \ 32 union_fixup(un, p); \ 33 } \ 34 } 35 36 static void 37 union_fixup(un, p) 38 struct union_node *un; 39 struct proc *p; 40 { 41 42 vn_lock(un->un_uppervp, LK_EXCLUSIVE | LK_RETRY, p); 43 un->un_flags |= UN_ULOCK; 44 } 45 46 static int 47 union_lookup1(udvp, dvpp, vpp, cnp) 48 struct vnode *udvp; 49 struct vnode **dvpp; 50 struct vnode **vpp; 51 struct componentname *cnp; 52 { 53 int error; 54 struct proc *p = cnp->cn_proc; 55 struct vnode *tdvp; 56 struct vnode *dvp; 57 struct mount *mp; 58 59 dvp = *dvpp; 60 61 /* 62 * If stepping up the directory tree, check for going 63 * back across the mount point, in which case do what 64 * lookup would do by stepping back down the mount 65 * hierarchy. 66 */ 67 if (cnp->cn_flags & ISDOTDOT) { 68 while ((dvp != udvp) && (dvp->v_flag & VROOT)) { 69 /* 70 * Don't do the NOCROSSMOUNT check 71 * at this level. By definition, 72 * union fs deals with namespaces, not 73 * filesystems. 74 */ 75 tdvp = dvp; 76 *dvpp = dvp = dvp->v_mount->mnt_vnodecovered; 77 vput(tdvp); 78 VREF(dvp); 79 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 80 } 81 } 82 83 error = VOP_LOOKUP(dvp, &tdvp, cnp); 84 if (error) 85 return (error); 86 87 /* 88 * The parent directory will have been unlocked, unless lookup 89 * found the last component. In which case, re-lock the node 90 * here to allow it to be unlocked again (phew) in union_lookup. 91 */ 92 if (dvp != tdvp && !(cnp->cn_flags & ISLASTCN)) 93 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 94 95 dvp = tdvp; 96 97 /* 98 * Lastly check if the current node is a mount point in 99 * which case walk up the mount hierarchy making sure not to 100 * bump into the root of the mount tree (ie. dvp != udvp). 101 */ 102 while (dvp != udvp && (dvp->v_type == VDIR) && 103 (mp = dvp->v_mountedhere)) { 104 105 if (vfs_busy(mp, 0, 0, p)) 106 continue; 107 108 error = VFS_ROOT(mp, &tdvp); 109 vfs_unbusy(mp, p); 110 if (error) { 111 vput(dvp); 112 return (error); 113 } 114 115 vput(dvp); 116 dvp = tdvp; 117 } 118 119 *vpp = dvp; 120 return (0); 121 } 122 123 int 124 union_lookup(ap) 125 struct vop_lookup_args /* { 126 struct vnodeop_desc *a_desc; 127 struct vnode *a_dvp; 128 struct vnode **a_vpp; 129 struct componentname *a_cnp; 130 } */ *ap; 131 { 132 int error; 133 int uerror, lerror; 134 struct vnode *uppervp, *lowervp; 135 struct vnode *upperdvp, *lowerdvp; 136 struct vnode *dvp = ap->a_dvp; 137 struct union_node *dun = VTOUNION(dvp); 138 struct componentname *cnp = ap->a_cnp; 139 struct proc *p = cnp->cn_proc; 140 int lockparent = cnp->cn_flags & LOCKPARENT; 141 int rdonly = cnp->cn_flags & RDONLY; 142 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount); 143 struct ucred *saved_cred; 144 int iswhiteout; 145 struct vattr va; 146 147 #ifdef notyet 148 if (cnp->cn_namelen == 3 && 149 cnp->cn_nameptr[2] == '.' && 150 cnp->cn_nameptr[1] == '.' && 151 cnp->cn_nameptr[0] == '.') { 152 dvp = *ap->a_vpp = LOWERVP(ap->a_dvp); 153 if (dvp == NULLVP) 154 return (ENOENT); 155 VREF(dvp); 156 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 157 if (!lockparent || !(cnp->cn_flags & ISLASTCN)) 158 VOP_UNLOCK(ap->a_dvp, 0, p); 159 return (0); 160 } 161 #endif 162 163 cnp->cn_flags |= LOCKPARENT; 164 165 upperdvp = dun->un_uppervp; 166 lowerdvp = dun->un_lowervp; 167 uppervp = NULLVP; 168 lowervp = NULLVP; 169 iswhiteout = 0; 170 171 /* 172 * do the lookup in the upper level. 173 * if that level comsumes additional pathnames, 174 * then assume that something special is going 175 * on and just return that vnode. 176 */ 177 if (upperdvp != NULLVP) { 178 FIXUP(dun, p); 179 uerror = union_lookup1(um->um_uppervp, &upperdvp, 180 &uppervp, cnp); 181 /*if (uppervp == upperdvp) 182 dun->un_flags |= UN_KLOCK;*/ 183 184 if (cnp->cn_consume != 0) { 185 *ap->a_vpp = uppervp; 186 if (!lockparent) 187 cnp->cn_flags &= ~LOCKPARENT; 188 return (uerror); 189 } 190 if (uerror == ENOENT || uerror == EJUSTRETURN) { 191 if (cnp->cn_flags & ISWHITEOUT) { 192 iswhiteout = 1; 193 } else if (lowerdvp != NULLVP) { 194 lerror = VOP_GETATTR(upperdvp, &va, 195 cnp->cn_cred, cnp->cn_proc); 196 if (lerror == 0 && (va.va_flags & OPAQUE)) 197 iswhiteout = 1; 198 } 199 } 200 } else { 201 uerror = ENOENT; 202 } 203 204 /* 205 * in a similar way to the upper layer, do the lookup 206 * in the lower layer. this time, if there is some 207 * component magic going on, then vput whatever we got 208 * back from the upper layer and return the lower vnode 209 * instead. 210 */ 211 if (lowerdvp != NULLVP && !iswhiteout) { 212 int nameiop; 213 214 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, p); 215 216 /* 217 * Only do a LOOKUP on the bottom node, since 218 * we won't be making changes to it anyway. 219 */ 220 nameiop = cnp->cn_nameiop; 221 cnp->cn_nameiop = LOOKUP; 222 if (um->um_op == UNMNT_BELOW) { 223 saved_cred = cnp->cn_cred; 224 cnp->cn_cred = um->um_cred; 225 } 226 lerror = union_lookup1(um->um_lowervp, &lowerdvp, 227 &lowervp, cnp); 228 if (um->um_op == UNMNT_BELOW) 229 cnp->cn_cred = saved_cred; 230 cnp->cn_nameiop = nameiop; 231 232 if (lowervp != lowerdvp) 233 VOP_UNLOCK(lowerdvp, 0, p); 234 235 if (cnp->cn_consume != 0) { 236 if (uppervp != NULLVP) { 237 if (uppervp == upperdvp) 238 vrele(uppervp); 239 else 240 vput(uppervp); 241 uppervp = NULLVP; 242 } 243 *ap->a_vpp = lowervp; 244 if (!lockparent) 245 cnp->cn_flags &= ~LOCKPARENT; 246 return (lerror); 247 } 248 } else { 249 lerror = ENOENT; 250 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) { 251 lowervp = LOWERVP(dun->un_pvp); 252 if (lowervp != NULLVP) { 253 VREF(lowervp); 254 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, p); 255 lerror = 0; 256 } 257 } 258 } 259 260 if (!lockparent) 261 cnp->cn_flags &= ~LOCKPARENT; 262 263 /* 264 * at this point, we have uerror and lerror indicating 265 * possible errors with the lookups in the upper and lower 266 * layers. additionally, uppervp and lowervp are (locked) 267 * references to existing vnodes in the upper and lower layers. 268 * 269 * there are now three cases to consider. 270 * 1. if both layers returned an error, then return whatever 271 * error the upper layer generated. 272 * 273 * 2. if the top layer failed and the bottom layer succeeded 274 * then two subcases occur. 275 * a. the bottom vnode is not a directory, in which 276 * case just return a new union vnode referencing 277 * an empty top layer and the existing bottom layer. 278 * b. the bottom vnode is a directory, in which case 279 * create a new directory in the top-level and 280 * continue as in case 3. 281 * 282 * 3. if the top layer succeeded then return a new union 283 * vnode referencing whatever the new top layer and 284 * whatever the bottom layer returned. 285 */ 286 287 *ap->a_vpp = NULLVP; 288 289 /* case 1. */ 290 if ((uerror != 0) && (lerror != 0)) { 291 return (uerror); 292 } 293 294 /* case 2. */ 295 if (uerror != 0 /* && (lerror == 0) */ ) { 296 if (lowervp->v_type == VDIR) { /* case 2b. */ 297 dun->un_flags &= ~UN_ULOCK; 298 VOP_UNLOCK(upperdvp, 0, p); 299 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp); 300 vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY, p); 301 dun->un_flags |= UN_ULOCK; 302 303 if (uerror) { 304 if (lowervp != NULLVP) { 305 vput(lowervp); 306 lowervp = NULLVP; 307 } 308 return (uerror); 309 } 310 } 311 } 312 313 if (lowervp != NULLVP) 314 VOP_UNLOCK(lowervp, 0, p); 315 316 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp, 317 uppervp, lowervp, 1); 318 319 if (error) { 320 if (uppervp != NULLVP) 321 vput(uppervp); 322 if (lowervp != NULLVP) 323 vrele(lowervp); 324 } else { 325 if (*ap->a_vpp != dvp) 326 if (!lockparent || !(cnp->cn_flags & ISLASTCN)) 327 VOP_UNLOCK(dvp, 0, p); 328 } 329 330 return (error); 331 } 332 333 int 334 union_create(ap) 335 struct vop_create_args /* { 336 struct vnode *a_dvp; 337 struct vnode **a_vpp; 338 struct componentname *a_cnp; 339 struct vattr *a_vap; 340 } */ *ap; 341 { 342 struct union_node *un = VTOUNION(ap->a_dvp); 343 struct vnode *dvp = un->un_uppervp; 344 struct componentname *cnp = ap->a_cnp; 345 struct proc *p = cnp->cn_proc; 346 347 if (dvp != NULLVP) { 348 int error; 349 struct vnode *vp; 350 struct mount *mp; 351 352 FIXUP(un, p); 353 354 VREF(dvp); 355 un->un_flags |= UN_KLOCK; 356 mp = ap->a_dvp->v_mount; 357 vput(ap->a_dvp); 358 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap); 359 if (error) 360 return (error); 361 362 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp, 363 NULLVP, 1); 364 if (error) 365 vput(vp); 366 return (error); 367 } 368 369 vput(ap->a_dvp); 370 return (EROFS); 371 } 372 373 int 374 union_whiteout(ap) 375 struct vop_whiteout_args /* { 376 struct vnode *a_dvp; 377 struct componentname *a_cnp; 378 int a_flags; 379 } */ *ap; 380 { 381 struct union_node *un = VTOUNION(ap->a_dvp); 382 struct componentname *cnp = ap->a_cnp; 383 struct proc *p = cnp->cn_proc; 384 385 if (un->un_uppervp == NULLVP) 386 return (EOPNOTSUPP); 387 388 FIXUP(un, p); 389 return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags)); 390 } 391 392 int 393 union_mknod(ap) 394 struct vop_mknod_args /* { 395 struct vnode *a_dvp; 396 struct vnode **a_vpp; 397 struct componentname *a_cnp; 398 struct vattr *a_vap; 399 } */ *ap; 400 { 401 struct union_node *un = VTOUNION(ap->a_dvp); 402 struct vnode *dvp = un->un_uppervp; 403 struct componentname *cnp = ap->a_cnp; 404 struct proc *p = cnp->cn_proc; 405 406 if (dvp != NULLVP) { 407 int error; 408 struct vnode *vp; 409 struct mount *mp; 410 411 FIXUP(un, p); 412 413 VREF(dvp); 414 un->un_flags |= UN_KLOCK; 415 mp = ap->a_dvp->v_mount; 416 vput(ap->a_dvp); 417 error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap); 418 if (error) 419 return (error); 420 421 if (vp != NULLVP) { 422 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, 423 cnp, vp, NULLVP, 1); 424 if (error) 425 vput(vp); 426 } 427 return (error); 428 } 429 430 vput(ap->a_dvp); 431 return (EROFS); 432 } 433 434 int 435 union_open(ap) 436 struct vop_open_args /* { 437 struct vnodeop_desc *a_desc; 438 struct vnode *a_vp; 439 int a_mode; 440 struct ucred *a_cred; 441 struct proc *a_p; 442 } */ *ap; 443 { 444 struct union_node *un = VTOUNION(ap->a_vp); 445 struct vnode *tvp; 446 int mode = ap->a_mode; 447 struct ucred *cred = ap->a_cred; 448 struct proc *p = ap->a_p; 449 int error; 450 451 /* 452 * If there is an existing upper vp then simply open that. 453 */ 454 tvp = un->un_uppervp; 455 if (tvp == NULLVP) { 456 /* 457 * If the lower vnode is being opened for writing, then 458 * copy the file contents to the upper vnode and open that, 459 * otherwise can simply open the lower vnode. 460 */ 461 tvp = un->un_lowervp; 462 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { 463 error = union_copyup(un, (mode&O_TRUNC) == 0, cred, p); 464 if (error == 0) 465 error = VOP_OPEN(un->un_uppervp, mode, cred, p); 466 return (error); 467 } 468 469 /* 470 * Just open the lower vnode 471 */ 472 un->un_openl++; 473 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); 474 error = VOP_OPEN(tvp, mode, cred, p); 475 VOP_UNLOCK(tvp, 0, p); 476 477 return (error); 478 } 479 480 FIXUP(un, p); 481 482 error = VOP_OPEN(tvp, mode, cred, p); 483 484 return (error); 485 } 486 487 int 488 union_close(ap) 489 struct vop_close_args /* { 490 struct vnode *a_vp; 491 int a_fflag; 492 struct ucred *a_cred; 493 struct proc *a_p; 494 } */ *ap; 495 { 496 struct union_node *un = VTOUNION(ap->a_vp); 497 struct vnode *vp; 498 499 if (un->un_uppervp != NULLVP) { 500 vp = un->un_uppervp; 501 } else { 502 #ifdef UNION_DIAGNOSTIC 503 if (un->un_openl <= 0) 504 panic("union: un_openl cnt"); 505 #endif 506 --un->un_openl; 507 vp = un->un_lowervp; 508 } 509 510 return (VOP_CLOSE(vp, ap->a_fflag, ap->a_cred, ap->a_p)); 511 } 512 513 /* 514 * Check access permission on the union vnode. 515 * The access check being enforced is to check 516 * against both the underlying vnode, and any 517 * copied vnode. This ensures that no additional 518 * file permissions are given away simply because 519 * the user caused an implicit file copy. 520 */ 521 int 522 union_access(ap) 523 struct vop_access_args /* { 524 struct vnodeop_desc *a_desc; 525 struct vnode *a_vp; 526 int a_mode; 527 struct ucred *a_cred; 528 struct proc *a_p; 529 } */ *ap; 530 { 531 struct union_node *un = VTOUNION(ap->a_vp); 532 struct proc *p = ap->a_p; 533 int error = EACCES; 534 struct vnode *vp; 535 536 if ((vp = un->un_uppervp) != NULLVP) { 537 FIXUP(un, p); 538 return (VOP_ACCESS(vp, ap->a_mode, ap->a_cred, p)); 539 } 540 541 if ((vp = un->un_lowervp) != NULLVP) { 542 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 543 error = VOP_ACCESS(vp, ap->a_mode, ap->a_cred, p); 544 if (error == 0) { 545 struct union_mount *um = MOUNTTOUNIONMOUNT(vp->v_mount); 546 547 if (um->um_op == UNMNT_BELOW) 548 error = VOP_ACCESS(vp, ap->a_mode, 549 um->um_cred, p); 550 } 551 VOP_UNLOCK(vp, 0, p); 552 if (error) 553 return (error); 554 } 555 556 return (error); 557 } 558 559 /* 560 * We handle getattr only to change the fsid and 561 * track object sizes 562 */ 563 int 564 union_getattr(ap) 565 struct vop_getattr_args /* { 566 struct vnode *a_vp; 567 struct vattr *a_vap; 568 struct ucred *a_cred; 569 struct proc *a_p; 570 } */ *ap; 571 { 572 int error; 573 struct union_node *un = VTOUNION(ap->a_vp); 574 struct vnode *vp = un->un_uppervp; 575 struct proc *p = ap->a_p; 576 struct vattr *vap; 577 struct vattr va; 578 579 580 /* 581 * Some programs walk the filesystem hierarchy by counting 582 * links to directories to avoid stat'ing all the time. 583 * This means the link count on directories needs to be "correct". 584 * The only way to do that is to call getattr on both layers 585 * and fix up the link count. The link count will not necessarily 586 * be accurate but will be large enough to defeat the tree walkers. 587 */ 588 589 vap = ap->a_vap; 590 591 vp = un->un_uppervp; 592 if (vp != NULLVP) { 593 /* 594 * It's not clear whether VOP_GETATTR is to be 595 * called with the vnode locked or not. stat() calls 596 * it with (vp) locked, and fstat calls it with 597 * (vp) unlocked. 598 * In the mean time, compensate here by checking 599 * the union_node's lock flag. 600 */ 601 if (un->un_flags & UN_LOCKED) 602 FIXUP(un, p); 603 604 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); 605 if (error) 606 return (error); 607 union_newsize(ap->a_vp, vap->va_size, VNOVAL); 608 } 609 610 if (vp == NULLVP) { 611 vp = un->un_lowervp; 612 } else if (vp->v_type == VDIR) { 613 vp = un->un_lowervp; 614 vap = &va; 615 } else { 616 vp = NULLVP; 617 } 618 619 if (vp != NULLVP) { 620 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); 621 if (error) 622 return (error); 623 union_newsize(ap->a_vp, VNOVAL, vap->va_size); 624 } 625 626 if ((vap != ap->a_vap) && (vap->va_type == VDIR)) 627 ap->a_vap->va_nlink += vap->va_nlink; 628 629 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 630 return (0); 631 } 632 633 int 634 union_setattr(ap) 635 struct vop_setattr_args /* { 636 struct vnode *a_vp; 637 struct vattr *a_vap; 638 struct ucred *a_cred; 639 struct proc *a_p; 640 } */ *ap; 641 { 642 struct union_node *un = VTOUNION(ap->a_vp); 643 struct proc *p = ap->a_p; 644 int error; 645 646 /* 647 * Handle case of truncating lower object to zero size, 648 * by creating a zero length upper object. This is to 649 * handle the case of open with O_TRUNC and O_CREAT. 650 */ 651 if ((un->un_uppervp == NULLVP) && 652 /* assert(un->un_lowervp != NULLVP) */ 653 (un->un_lowervp->v_type == VREG)) { 654 error = union_copyup(un, (ap->a_vap->va_size != 0), 655 ap->a_cred, ap->a_p); 656 if (error) 657 return (error); 658 } 659 660 /* 661 * Try to set attributes in upper layer, 662 * otherwise return read-only filesystem error. 663 */ 664 if (un->un_uppervp != NULLVP) { 665 FIXUP(un, p); 666 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, 667 ap->a_cred, ap->a_p); 668 if ((error == 0) && (ap->a_vap->va_size != VNOVAL)) 669 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL); 670 } else { 671 error = EROFS; 672 } 673 674 return (error); 675 } 676 677 int 678 union_read(ap) 679 struct vop_read_args /* { 680 struct vnode *a_vp; 681 struct uio *a_uio; 682 int a_ioflag; 683 struct ucred *a_cred; 684 } */ *ap; 685 { 686 int error; 687 struct proc *p = ap->a_uio->uio_procp; 688 struct vnode *vp = OTHERVP(ap->a_vp); 689 int dolock = (vp == LOWERVP(ap->a_vp)); 690 691 if (dolock) 692 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 693 else 694 FIXUP(VTOUNION(ap->a_vp), p); 695 error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 696 if (dolock) 697 VOP_UNLOCK(vp, 0, p); 698 699 /* 700 * XXX 701 * perhaps the size of the underlying object has changed under 702 * our feet. take advantage of the offset information present 703 * in the uio structure. 704 */ 705 if (error == 0) { 706 struct union_node *un = VTOUNION(ap->a_vp); 707 off_t cur = ap->a_uio->uio_offset; 708 709 if (vp == un->un_uppervp) { 710 if (cur > un->un_uppersz) 711 union_newsize(ap->a_vp, cur, VNOVAL); 712 } else { 713 if (cur > un->un_lowersz) 714 union_newsize(ap->a_vp, VNOVAL, cur); 715 } 716 } 717 718 return (error); 719 } 720 721 int 722 union_write(ap) 723 struct vop_read_args /* { 724 struct vnode *a_vp; 725 struct uio *a_uio; 726 int a_ioflag; 727 struct ucred *a_cred; 728 } */ *ap; 729 { 730 int error; 731 struct vnode *vp; 732 struct union_node *un = VTOUNION(ap->a_vp); 733 struct proc *p = ap->a_uio->uio_procp; 734 735 vp = UPPERVP(ap->a_vp); 736 if (vp == NULLVP) 737 panic("union: missing upper layer in write"); 738 739 FIXUP(un, p); 740 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 741 742 /* 743 * the size of the underlying object may be changed by the 744 * write. 745 */ 746 if (error == 0) { 747 off_t cur = ap->a_uio->uio_offset; 748 749 if (cur > un->un_uppersz) 750 union_newsize(ap->a_vp, cur, VNOVAL); 751 } 752 753 return (error); 754 } 755 756 union_lease(ap) 757 struct vop_lease_args /* { 758 struct vnode *a_vp; 759 struct proc *a_p; 760 struct ucred *a_cred; 761 int a_flag; 762 } */ *ap; 763 { 764 765 return (VOP_LEASE(OTHERVP(ap->a_vp), ap->a_p, ap->a_cred, ap->a_flag)); 766 } 767 768 int 769 union_ioctl(ap) 770 struct vop_ioctl_args /* { 771 struct vnode *a_vp; 772 int a_command; 773 caddr_t a_data; 774 int a_fflag; 775 struct ucred *a_cred; 776 struct proc *a_p; 777 } */ *ap; 778 { 779 780 return (VOP_IOCTL(OTHERVP(ap->a_vp), ap->a_command, ap->a_data, 781 ap->a_fflag, ap->a_cred, ap->a_p)); 782 } 783 784 int 785 union_select(ap) 786 struct vop_select_args /* { 787 struct vnode *a_vp; 788 int a_which; 789 int a_fflags; 790 struct ucred *a_cred; 791 struct proc *a_p; 792 } */ *ap; 793 { 794 795 return (VOP_SELECT(OTHERVP(ap->a_vp), ap->a_which, ap->a_fflags, 796 ap->a_cred, ap->a_p)); 797 } 798 799 int 800 union_revoke(ap) 801 struct vop_revoke_args /* { 802 struct vnode *a_vp; 803 int a_flags; 804 struct proc *a_p; 805 } */ *ap; 806 { 807 struct vnode *vp = ap->a_vp; 808 809 if (UPPERVP(vp)) 810 VOP_REVOKE(UPPERVP(vp), ap->a_flags); 811 if (LOWERVP(vp)) 812 VOP_REVOKE(LOWERVP(vp), ap->a_flags); 813 vgone(vp); 814 } 815 816 int 817 union_mmap(ap) 818 struct vop_mmap_args /* { 819 struct vnode *a_vp; 820 int a_fflags; 821 struct ucred *a_cred; 822 struct proc *a_p; 823 } */ *ap; 824 { 825 826 return (VOP_MMAP(OTHERVP(ap->a_vp), ap->a_fflags, 827 ap->a_cred, ap->a_p)); 828 } 829 830 int 831 union_fsync(ap) 832 struct vop_fsync_args /* { 833 struct vnode *a_vp; 834 struct ucred *a_cred; 835 int a_waitfor; 836 struct proc *a_p; 837 } */ *ap; 838 { 839 int error = 0; 840 struct proc *p = ap->a_p; 841 struct vnode *targetvp = OTHERVP(ap->a_vp); 842 843 if (targetvp != NULLVP) { 844 int dolock = (targetvp == LOWERVP(ap->a_vp)); 845 846 if (dolock) 847 vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY, p); 848 else 849 FIXUP(VTOUNION(ap->a_vp), p); 850 error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, p); 851 if (dolock) 852 VOP_UNLOCK(targetvp, 0, p); 853 } 854 855 return (error); 856 } 857 858 int 859 union_seek(ap) 860 struct vop_seek_args /* { 861 struct vnode *a_vp; 862 off_t a_oldoff; 863 off_t a_newoff; 864 struct ucred *a_cred; 865 } */ *ap; 866 { 867 868 return (VOP_SEEK(OTHERVP(ap->a_vp), ap->a_oldoff, ap->a_newoff, ap->a_cred)); 869 } 870 871 int 872 union_remove(ap) 873 struct vop_remove_args /* { 874 struct vnode *a_dvp; 875 struct vnode *a_vp; 876 struct componentname *a_cnp; 877 } */ *ap; 878 { 879 int error; 880 struct union_node *dun = VTOUNION(ap->a_dvp); 881 struct union_node *un = VTOUNION(ap->a_vp); 882 struct componentname *cnp = ap->a_cnp; 883 struct proc *p = cnp->cn_proc; 884 885 if (dun->un_uppervp == NULLVP) 886 panic("union remove: null upper vnode"); 887 888 if (un->un_uppervp != NULLVP) { 889 struct vnode *dvp = dun->un_uppervp; 890 struct vnode *vp = un->un_uppervp; 891 892 FIXUP(dun, p); 893 VREF(dvp); 894 dun->un_flags |= UN_KLOCK; 895 vput(ap->a_dvp); 896 FIXUP(un, p); 897 VREF(vp); 898 un->un_flags |= UN_KLOCK; 899 vput(ap->a_vp); 900 901 if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc)) 902 cnp->cn_flags |= DOWHITEOUT; 903 error = VOP_REMOVE(dvp, vp, cnp); 904 if (!error) 905 union_removed_upper(un); 906 } else { 907 FIXUP(dun, p); 908 error = union_mkwhiteout( 909 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), 910 dun->un_uppervp, ap->a_cnp, un->un_path); 911 vput(ap->a_dvp); 912 vput(ap->a_vp); 913 } 914 915 return (error); 916 } 917 918 int 919 union_link(ap) 920 struct vop_link_args /* { 921 struct vnode *a_vp; 922 struct vnode *a_tdvp; 923 struct componentname *a_cnp; 924 } */ *ap; 925 { 926 int error = 0; 927 struct componentname *cnp = ap->a_cnp; 928 struct proc *p = cnp->cn_proc; 929 struct union_node *un; 930 struct vnode *vp; 931 struct vnode *tdvp; 932 933 un = VTOUNION(ap->a_tdvp); 934 935 if (ap->a_tdvp->v_op != ap->a_vp->v_op) { 936 vp = ap->a_vp; 937 } else { 938 struct union_node *tun = VTOUNION(ap->a_vp); 939 if (tun->un_uppervp == NULLVP) { 940 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p); 941 if (un->un_uppervp == tun->un_dirvp) { 942 un->un_flags &= ~UN_ULOCK; 943 VOP_UNLOCK(un->un_uppervp, 0, p); 944 } 945 error = union_copyup(tun, 1, cnp->cn_cred, p); 946 if (un->un_uppervp == tun->un_dirvp) { 947 vn_lock(un->un_uppervp, 948 LK_EXCLUSIVE | LK_RETRY, p); 949 un->un_flags |= UN_ULOCK; 950 } 951 VOP_UNLOCK(ap->a_vp, 0, p); 952 } 953 vp = tun->un_uppervp; 954 } 955 956 tdvp = un->un_uppervp; 957 if (tdvp == NULLVP) 958 error = EROFS; 959 960 if (error) { 961 vput(ap->a_tdvp); 962 return (error); 963 } 964 965 FIXUP(un, p); 966 VREF(tdvp); 967 un->un_flags |= UN_KLOCK; 968 vput(ap->a_tdvp); 969 970 return (VOP_LINK(vp, tdvp, cnp)); 971 } 972 973 int 974 union_rename(ap) 975 struct vop_rename_args /* { 976 struct vnode *a_fdvp; 977 struct vnode *a_fvp; 978 struct componentname *a_fcnp; 979 struct vnode *a_tdvp; 980 struct vnode *a_tvp; 981 struct componentname *a_tcnp; 982 } */ *ap; 983 { 984 int error; 985 986 struct vnode *fdvp = ap->a_fdvp; 987 struct vnode *fvp = ap->a_fvp; 988 struct vnode *tdvp = ap->a_tdvp; 989 struct vnode *tvp = ap->a_tvp; 990 991 if (fdvp->v_op == union_vnodeop_p) { /* always true */ 992 struct union_node *un = VTOUNION(fdvp); 993 if (un->un_uppervp == NULLVP) { 994 /* 995 * this should never happen in normal 996 * operation but might if there was 997 * a problem creating the top-level shadow 998 * directory. 999 */ 1000 error = EXDEV; 1001 goto bad; 1002 } 1003 1004 fdvp = un->un_uppervp; 1005 VREF(fdvp); 1006 vrele(ap->a_fdvp); 1007 } 1008 1009 if (fvp->v_op == union_vnodeop_p) { /* always true */ 1010 struct union_node *un = VTOUNION(fvp); 1011 if (un->un_uppervp == NULLVP) { 1012 /* XXX: should do a copyup */ 1013 error = EXDEV; 1014 goto bad; 1015 } 1016 1017 if (un->un_lowervp != NULLVP) 1018 ap->a_fcnp->cn_flags |= DOWHITEOUT; 1019 1020 fvp = un->un_uppervp; 1021 VREF(fvp); 1022 vrele(ap->a_fvp); 1023 } 1024 1025 if (tdvp->v_op == union_vnodeop_p) { 1026 struct union_node *un = VTOUNION(tdvp); 1027 if (un->un_uppervp == NULLVP) { 1028 /* 1029 * this should never happen in normal 1030 * operation but might if there was 1031 * a problem creating the top-level shadow 1032 * directory. 1033 */ 1034 error = EXDEV; 1035 goto bad; 1036 } 1037 1038 tdvp = un->un_uppervp; 1039 VREF(tdvp); 1040 un->un_flags |= UN_KLOCK; 1041 vput(ap->a_tdvp); 1042 } 1043 1044 if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) { 1045 struct union_node *un = VTOUNION(tvp); 1046 1047 tvp = un->un_uppervp; 1048 if (tvp != NULLVP) { 1049 VREF(tvp); 1050 un->un_flags |= UN_KLOCK; 1051 } 1052 vput(ap->a_tvp); 1053 } 1054 1055 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); 1056 1057 bad: 1058 vrele(fdvp); 1059 vrele(fvp); 1060 vput(tdvp); 1061 if (tvp != NULLVP) 1062 vput(tvp); 1063 1064 return (error); 1065 } 1066 1067 int 1068 union_mkdir(ap) 1069 struct vop_mkdir_args /* { 1070 struct vnode *a_dvp; 1071 struct vnode **a_vpp; 1072 struct componentname *a_cnp; 1073 struct vattr *a_vap; 1074 } */ *ap; 1075 { 1076 struct union_node *un = VTOUNION(ap->a_dvp); 1077 struct vnode *dvp = un->un_uppervp; 1078 struct componentname *cnp = ap->a_cnp; 1079 struct proc *p = cnp->cn_proc; 1080 1081 if (dvp != NULLVP) { 1082 int error; 1083 struct vnode *vp; 1084 1085 FIXUP(un, p); 1086 VREF(dvp); 1087 un->un_flags |= UN_KLOCK; 1088 VOP_UNLOCK(ap->a_dvp, 0, p); 1089 error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap); 1090 if (error) { 1091 vrele(ap->a_dvp); 1092 return (error); 1093 } 1094 1095 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp, 1096 NULLVP, cnp, vp, NULLVP, 1); 1097 vrele(ap->a_dvp); 1098 if (error) 1099 vput(vp); 1100 return (error); 1101 } 1102 1103 vput(ap->a_dvp); 1104 return (EROFS); 1105 } 1106 1107 int 1108 union_rmdir(ap) 1109 struct vop_rmdir_args /* { 1110 struct vnode *a_dvp; 1111 struct vnode *a_vp; 1112 struct componentname *a_cnp; 1113 } */ *ap; 1114 { 1115 int error; 1116 struct union_node *dun = VTOUNION(ap->a_dvp); 1117 struct union_node *un = VTOUNION(ap->a_vp); 1118 struct componentname *cnp = ap->a_cnp; 1119 struct proc *p = cnp->cn_proc; 1120 1121 if (dun->un_uppervp == NULLVP) 1122 panic("union rmdir: null upper vnode"); 1123 1124 if (un->un_uppervp != NULLVP) { 1125 struct vnode *dvp = dun->un_uppervp; 1126 struct vnode *vp = un->un_uppervp; 1127 1128 FIXUP(dun, p); 1129 VREF(dvp); 1130 dun->un_flags |= UN_KLOCK; 1131 vput(ap->a_dvp); 1132 FIXUP(un, p); 1133 VREF(vp); 1134 un->un_flags |= UN_KLOCK; 1135 vput(ap->a_vp); 1136 1137 if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc)) 1138 cnp->cn_flags |= DOWHITEOUT; 1139 error = VOP_RMDIR(dvp, vp, ap->a_cnp); 1140 if (!error) 1141 union_removed_upper(un); 1142 } else { 1143 FIXUP(dun, p); 1144 error = union_mkwhiteout( 1145 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), 1146 dun->un_uppervp, ap->a_cnp, un->un_path); 1147 vput(ap->a_dvp); 1148 vput(ap->a_vp); 1149 } 1150 1151 return (error); 1152 } 1153 1154 int 1155 union_symlink(ap) 1156 struct vop_symlink_args /* { 1157 struct vnode *a_dvp; 1158 struct vnode **a_vpp; 1159 struct componentname *a_cnp; 1160 struct vattr *a_vap; 1161 char *a_target; 1162 } */ *ap; 1163 { 1164 struct union_node *un = VTOUNION(ap->a_dvp); 1165 struct vnode *dvp = un->un_uppervp; 1166 struct componentname *cnp = ap->a_cnp; 1167 struct proc *p = cnp->cn_proc; 1168 1169 if (dvp != NULLVP) { 1170 int error; 1171 struct vnode *vp; 1172 struct mount *mp = ap->a_dvp->v_mount; 1173 1174 FIXUP(un, p); 1175 VREF(dvp); 1176 un->un_flags |= UN_KLOCK; 1177 vput(ap->a_dvp); 1178 error = VOP_SYMLINK(dvp, &vp, cnp, ap->a_vap, ap->a_target); 1179 *ap->a_vpp = NULLVP; 1180 return (error); 1181 } 1182 1183 vput(ap->a_dvp); 1184 return (EROFS); 1185 } 1186 1187 /* 1188 * union_readdir works in concert with getdirentries and 1189 * readdir(3) to provide a list of entries in the unioned 1190 * directories. getdirentries is responsible for walking 1191 * down the union stack. readdir(3) is responsible for 1192 * eliminating duplicate names from the returned data stream. 1193 */ 1194 int 1195 union_readdir(ap) 1196 struct vop_readdir_args /* { 1197 struct vnodeop_desc *a_desc; 1198 struct vnode *a_vp; 1199 struct uio *a_uio; 1200 struct ucred *a_cred; 1201 int *a_eofflag; 1202 u_long *a_cookies; 1203 int a_ncookies; 1204 } */ *ap; 1205 { 1206 struct union_node *un = VTOUNION(ap->a_vp); 1207 struct vnode *uvp = un->un_uppervp; 1208 struct proc *p = ap->a_uio->uio_procp; 1209 1210 if (uvp == NULLVP) 1211 return (0); 1212 1213 FIXUP(un, p); 1214 ap->a_vp = uvp; 1215 return (VOCALL(uvp->v_op, VOFFSET(vop_readdir), ap)); 1216 } 1217 1218 int 1219 union_readlink(ap) 1220 struct vop_readlink_args /* { 1221 struct vnode *a_vp; 1222 struct uio *a_uio; 1223 struct ucred *a_cred; 1224 } */ *ap; 1225 { 1226 int error; 1227 struct uio *uio = ap->a_uio; 1228 struct proc *p = uio->uio_procp; 1229 struct vnode *vp = OTHERVP(ap->a_vp); 1230 int dolock = (vp == LOWERVP(ap->a_vp)); 1231 1232 if (dolock) 1233 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1234 else 1235 FIXUP(VTOUNION(ap->a_vp), p); 1236 error = VOP_READLINK(vp, uio, ap->a_cred); 1237 if (dolock) 1238 VOP_UNLOCK(vp, 0, p); 1239 1240 return (error); 1241 } 1242 1243 int 1244 union_abortop(ap) 1245 struct vop_abortop_args /* { 1246 struct vnode *a_dvp; 1247 struct componentname *a_cnp; 1248 } */ *ap; 1249 { 1250 int error; 1251 struct componentname *cnp = ap->a_cnp; 1252 struct proc *p = cnp->cn_proc; 1253 struct vnode *vp = OTHERVP(ap->a_dvp); 1254 struct union_node *un = VTOUNION(ap->a_dvp); 1255 int islocked = un->un_flags & UN_LOCKED; 1256 int dolock = (vp == LOWERVP(ap->a_dvp)); 1257 1258 if (islocked) { 1259 if (dolock) 1260 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1261 else 1262 FIXUP(VTOUNION(ap->a_dvp), p); 1263 } 1264 error = VOP_ABORTOP(vp, cnp); 1265 if (islocked && dolock) 1266 VOP_UNLOCK(vp, 0, p); 1267 1268 return (error); 1269 } 1270 1271 int 1272 union_inactive(ap) 1273 struct vop_inactive_args /* { 1274 struct vnode *a_vp; 1275 struct proc *a_p; 1276 } */ *ap; 1277 { 1278 struct union_node *un = VTOUNION(ap->a_vp); 1279 struct vnode **vpp; 1280 1281 /* 1282 * Do nothing (and _don't_ bypass). 1283 * Wait to vrele lowervp until reclaim, 1284 * so that until then our union_node is in the 1285 * cache and reusable. 1286 * 1287 * NEEDSWORK: Someday, consider inactive'ing 1288 * the lowervp and then trying to reactivate it 1289 * with capabilities (v_id) 1290 * like they do in the name lookup cache code. 1291 * That's too much work for now. 1292 */ 1293 1294 #ifdef UNION_DIAGNOSTIC 1295 if (un->un_flags & UN_LOCKED) 1296 panic("union: inactivating locked node"); 1297 if (un->un_flags & UN_ULOCK) 1298 panic("union: inactivating w/locked upper node"); 1299 #endif 1300 1301 if (un->un_dircache != 0) { 1302 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++) 1303 vrele(*vpp); 1304 free(un->un_dircache, M_TEMP); 1305 un->un_dircache = 0; 1306 } 1307 1308 if ((un->un_flags & UN_CACHED) == 0) 1309 vgone(ap->a_vp); 1310 1311 return (0); 1312 } 1313 1314 int 1315 union_reclaim(ap) 1316 struct vop_reclaim_args /* { 1317 struct vnode *a_vp; 1318 } */ *ap; 1319 { 1320 1321 union_freevp(ap->a_vp); 1322 1323 return (0); 1324 } 1325 1326 int 1327 union_lock(ap) 1328 struct vop_lock_args *ap; 1329 { 1330 struct vnode *vp = ap->a_vp; 1331 struct proc *p = ap->a_p; 1332 int flags = ap->a_flags; 1333 struct union_node *un; 1334 1335 start: 1336 if ((flags & LK_INTERLOCK) == 0) 1337 simple_lock(&vp->v_interlock); 1338 if (vp->v_flag & VXLOCK) { 1339 vp->v_flag |= VXWANT; 1340 simple_unlock(&vp->v_interlock); 1341 tsleep((caddr_t)vp, PINOD, "unionlk1", 0); 1342 return (ENOENT); 1343 } 1344 simple_unlock(&vp->v_interlock); 1345 flags &= ~LK_INTERLOCK; 1346 1347 un = VTOUNION(vp); 1348 1349 if (un->un_uppervp != NULLVP) { 1350 if (((un->un_flags & UN_ULOCK) == 0) && 1351 (vp->v_usecount != 0)) { 1352 if (vn_lock(un->un_uppervp, flags, p)) 1353 goto start; 1354 un->un_flags |= UN_ULOCK; 1355 } 1356 #ifdef DIAGNOSTIC 1357 if (un->un_flags & UN_KLOCK) { 1358 vprint("union: dangling klock", vp); 1359 panic("union: dangling upper lock (%lx)", vp); 1360 } 1361 #endif 1362 } 1363 1364 if (un->un_flags & UN_LOCKED) { 1365 #ifdef DIAGNOSTIC 1366 if (curproc && un->un_pid == curproc->p_pid && 1367 un->un_pid > -1 && curproc->p_pid > -1) 1368 panic("union: locking against myself"); 1369 #endif 1370 un->un_flags |= UN_WANT; 1371 tsleep((caddr_t)&un->un_flags, PINOD, "unionlk2", 0); 1372 goto start; 1373 } 1374 1375 #ifdef DIAGNOSTIC 1376 if (curproc) 1377 un->un_pid = curproc->p_pid; 1378 else 1379 un->un_pid = -1; 1380 #endif 1381 1382 un->un_flags |= UN_LOCKED; 1383 return (0); 1384 } 1385 1386 /* 1387 * When operations want to vput() a union node yet retain a lock on 1388 * the upper vnode (say, to do some further operations like link(), 1389 * mkdir(), ...), they set UN_KLOCK on the union node, then call 1390 * vput() which calls VOP_UNLOCK() and comes here. union_unlock() 1391 * unlocks the union node (leaving the upper vnode alone), clears the 1392 * KLOCK flag, and then returns to vput(). The caller then does whatever 1393 * is left to do with the upper vnode, and ensures that it gets unlocked. 1394 * 1395 * If UN_KLOCK isn't set, then the upper vnode is unlocked here. 1396 */ 1397 int 1398 union_unlock(ap) 1399 struct vop_lock_args *ap; 1400 { 1401 struct union_node *un = VTOUNION(ap->a_vp); 1402 struct proc *p = ap->a_p; 1403 1404 #ifdef DIAGNOSTIC 1405 if ((un->un_flags & UN_LOCKED) == 0) 1406 panic("union: unlock unlocked node"); 1407 if (curproc && un->un_pid != curproc->p_pid && 1408 curproc->p_pid > -1 && un->un_pid > -1) 1409 panic("union: unlocking other process's union node"); 1410 #endif 1411 1412 un->un_flags &= ~UN_LOCKED; 1413 1414 if ((un->un_flags & (UN_ULOCK|UN_KLOCK)) == UN_ULOCK) 1415 VOP_UNLOCK(un->un_uppervp, 0, p); 1416 1417 un->un_flags &= ~(UN_ULOCK|UN_KLOCK); 1418 1419 if (un->un_flags & UN_WANT) { 1420 un->un_flags &= ~UN_WANT; 1421 wakeup((caddr_t) &un->un_flags); 1422 } 1423 1424 #ifdef DIAGNOSTIC 1425 un->un_pid = 0; 1426 #endif 1427 1428 return (0); 1429 } 1430 1431 int 1432 union_bmap(ap) 1433 struct vop_bmap_args /* { 1434 struct vnode *a_vp; 1435 daddr_t a_bn; 1436 struct vnode **a_vpp; 1437 daddr_t *a_bnp; 1438 int *a_runp; 1439 } */ *ap; 1440 { 1441 int error; 1442 struct proc *p = curproc; /* XXX */ 1443 struct vnode *vp = OTHERVP(ap->a_vp); 1444 int dolock = (vp == LOWERVP(ap->a_vp)); 1445 1446 if (dolock) 1447 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1448 else 1449 FIXUP(VTOUNION(ap->a_vp), p); 1450 error = VOP_BMAP(vp, ap->a_bn, ap->a_vpp, ap->a_bnp, ap->a_runp); 1451 if (dolock) 1452 VOP_UNLOCK(vp, 0, p); 1453 1454 return (error); 1455 } 1456 1457 int 1458 union_print(ap) 1459 struct vop_print_args /* { 1460 struct vnode *a_vp; 1461 } */ *ap; 1462 { 1463 struct vnode *vp = ap->a_vp; 1464 1465 printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n", 1466 vp, UPPERVP(vp), LOWERVP(vp)); 1467 if (UPPERVP(vp) != NULLVP) 1468 vprint("union: upper", UPPERVP(vp)); 1469 if (LOWERVP(vp) != NULLVP) 1470 vprint("union: lower", LOWERVP(vp)); 1471 1472 return (0); 1473 } 1474 1475 int 1476 union_islocked(ap) 1477 struct vop_islocked_args /* { 1478 struct vnode *a_vp; 1479 } */ *ap; 1480 { 1481 1482 return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0); 1483 } 1484 1485 int 1486 union_pathconf(ap) 1487 struct vop_pathconf_args /* { 1488 struct vnode *a_vp; 1489 int a_name; 1490 int *a_retval; 1491 } */ *ap; 1492 { 1493 int error; 1494 struct proc *p = curproc; /* XXX */ 1495 struct vnode *vp = OTHERVP(ap->a_vp); 1496 int dolock = (vp == LOWERVP(ap->a_vp)); 1497 1498 if (dolock) 1499 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1500 else 1501 FIXUP(VTOUNION(ap->a_vp), p); 1502 error = VOP_PATHCONF(vp, ap->a_name, ap->a_retval); 1503 if (dolock) 1504 VOP_UNLOCK(vp, 0, p); 1505 1506 return (error); 1507 } 1508 1509 int 1510 union_advlock(ap) 1511 struct vop_advlock_args /* { 1512 struct vnode *a_vp; 1513 caddr_t a_id; 1514 int a_op; 1515 struct flock *a_fl; 1516 int a_flags; 1517 } */ *ap; 1518 { 1519 1520 return (VOP_ADVLOCK(OTHERVP(ap->a_vp), ap->a_id, ap->a_op, 1521 ap->a_fl, ap->a_flags)); 1522 } 1523 1524 1525 /* 1526 * XXX - vop_strategy must be hand coded because it has no 1527 * vnode in its arguments. 1528 * This goes away with a merged VM/buffer cache. 1529 */ 1530 int 1531 union_strategy(ap) 1532 struct vop_strategy_args /* { 1533 struct buf *a_bp; 1534 } */ *ap; 1535 { 1536 struct buf *bp = ap->a_bp; 1537 int error; 1538 struct vnode *savedvp; 1539 1540 savedvp = bp->b_vp; 1541 bp->b_vp = OTHERVP(bp->b_vp); 1542 1543 #ifdef DIAGNOSTIC 1544 if (bp->b_vp == NULLVP) 1545 panic("union_strategy: nil vp"); 1546 if (((bp->b_flags & B_READ) == 0) && 1547 (bp->b_vp == LOWERVP(savedvp))) 1548 panic("union_strategy: writing to lowervp"); 1549 #endif 1550 1551 error = VOP_STRATEGY(bp); 1552 bp->b_vp = savedvp; 1553 1554 return (error); 1555 } 1556 1557 /* 1558 * Global vfs data structures 1559 */ 1560 int (**union_vnodeop_p)(); 1561 struct vnodeopv_entry_desc union_vnodeop_entries[] = { 1562 { &vop_default_desc, vn_default_error }, 1563 { &vop_lookup_desc, union_lookup }, /* lookup */ 1564 { &vop_create_desc, union_create }, /* create */ 1565 { &vop_whiteout_desc, union_whiteout }, /* whiteout */ 1566 { &vop_mknod_desc, union_mknod }, /* mknod */ 1567 { &vop_open_desc, union_open }, /* open */ 1568 { &vop_close_desc, union_close }, /* close */ 1569 { &vop_access_desc, union_access }, /* access */ 1570 { &vop_getattr_desc, union_getattr }, /* getattr */ 1571 { &vop_setattr_desc, union_setattr }, /* setattr */ 1572 { &vop_read_desc, union_read }, /* read */ 1573 { &vop_write_desc, union_write }, /* write */ 1574 { &vop_lease_desc, union_lease }, /* lease */ 1575 { &vop_ioctl_desc, union_ioctl }, /* ioctl */ 1576 { &vop_select_desc, union_select }, /* select */ 1577 { &vop_revoke_desc, union_revoke }, /* revoke */ 1578 { &vop_mmap_desc, union_mmap }, /* mmap */ 1579 { &vop_fsync_desc, union_fsync }, /* fsync */ 1580 { &vop_seek_desc, union_seek }, /* seek */ 1581 { &vop_remove_desc, union_remove }, /* remove */ 1582 { &vop_link_desc, union_link }, /* link */ 1583 { &vop_rename_desc, union_rename }, /* rename */ 1584 { &vop_mkdir_desc, union_mkdir }, /* mkdir */ 1585 { &vop_rmdir_desc, union_rmdir }, /* rmdir */ 1586 { &vop_symlink_desc, union_symlink }, /* symlink */ 1587 { &vop_readdir_desc, union_readdir }, /* readdir */ 1588 { &vop_readlink_desc, union_readlink }, /* readlink */ 1589 { &vop_abortop_desc, union_abortop }, /* abortop */ 1590 { &vop_inactive_desc, union_inactive }, /* inactive */ 1591 { &vop_reclaim_desc, union_reclaim }, /* reclaim */ 1592 { &vop_lock_desc, union_lock }, /* lock */ 1593 { &vop_unlock_desc, union_unlock }, /* unlock */ 1594 { &vop_bmap_desc, union_bmap }, /* bmap */ 1595 { &vop_strategy_desc, union_strategy }, /* strategy */ 1596 { &vop_print_desc, union_print }, /* print */ 1597 { &vop_islocked_desc, union_islocked }, /* islocked */ 1598 { &vop_pathconf_desc, union_pathconf }, /* pathconf */ 1599 { &vop_advlock_desc, union_advlock }, /* advlock */ 1600 #ifdef notdef 1601 { &vop_blkatoff_desc, union_blkatoff }, /* blkatoff */ 1602 { &vop_valloc_desc, union_valloc }, /* valloc */ 1603 { &vop_vfree_desc, union_vfree }, /* vfree */ 1604 { &vop_truncate_desc, union_truncate }, /* truncate */ 1605 { &vop_update_desc, union_update }, /* update */ 1606 { &vop_bwrite_desc, union_bwrite }, /* bwrite */ 1607 #endif 1608 { (struct vnodeop_desc*)NULL, (int(*)())NULL } 1609 }; 1610 struct vnodeopv_desc union_vnodeop_opv_desc = 1611 { &union_vnodeop_p, union_vnodeop_entries }; 1612