1 /* 2 * Copyright (c) 1994 Jan-Simon Pendry 3 * Copyright (c) 1994 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)union_subr.c 8.5 (Berkeley) 04/24/94 12 */ 13 14 #include <sys/param.h> 15 #include <sys/systm.h> 16 #include <sys/time.h> 17 #include <sys/kernel.h> 18 #include <sys/vnode.h> 19 #include <sys/namei.h> 20 #include <sys/malloc.h> 21 #include <sys/file.h> 22 #include <sys/filedesc.h> 23 #include <sys/queue.h> 24 #include <sys/mount.h> 25 #include <miscfs/union/union.h> 26 27 #ifdef DIAGNOSTIC 28 #include <sys/proc.h> 29 #endif 30 31 /* must be power of two, otherwise change UNION_HASH() */ 32 #define NHASH 32 33 34 /* unsigned int ... */ 35 #define UNION_HASH(u, l) \ 36 (((((unsigned long) (u)) + ((unsigned long) l)) >> 8) & (NHASH-1)) 37 38 static LIST_HEAD(unhead, union_node) unhead[NHASH]; 39 static int unvplock[NHASH]; 40 41 int 42 union_init() 43 { 44 int i; 45 46 for (i = 0; i < NHASH; i++) 47 LIST_INIT(&unhead[i]); 48 bzero((caddr_t) unvplock, sizeof(unvplock)); 49 } 50 51 static int 52 union_list_lock(ix) 53 int ix; 54 { 55 56 if (unvplock[ix] & UN_LOCKED) { 57 unvplock[ix] |= UN_WANT; 58 sleep((caddr_t) &unvplock[ix], PINOD); 59 return (1); 60 } 61 62 unvplock[ix] |= UN_LOCKED; 63 64 return (0); 65 } 66 67 static void 68 union_list_unlock(ix) 69 int ix; 70 { 71 72 unvplock[ix] &= ~UN_LOCKED; 73 74 if (unvplock[ix] & UN_WANT) { 75 unvplock[ix] &= ~UN_WANT; 76 wakeup((caddr_t) &unvplock[ix]); 77 } 78 } 79 80 void 81 union_updatevp(un, uppervp, lowervp) 82 struct union_node *un; 83 struct vnode *uppervp; 84 struct vnode *lowervp; 85 { 86 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp); 87 int nhash = UNION_HASH(uppervp, lowervp); 88 89 if (ohash != nhash) { 90 /* 91 * Ensure locking is ordered from lower to higher 92 * to avoid deadlocks. 93 */ 94 if (nhash < ohash) { 95 int t = ohash; 96 ohash = nhash; 97 nhash = t; 98 } 99 100 while (union_list_lock(ohash)) 101 continue; 102 103 while (union_list_lock(nhash)) 104 continue; 105 106 LIST_REMOVE(un, un_cache); 107 union_list_unlock(ohash); 108 } else { 109 while (union_list_lock(nhash)) 110 continue; 111 } 112 113 if (un->un_lowervp != lowervp) { 114 if (un->un_lowervp) { 115 vrele(un->un_lowervp); 116 if (un->un_path) { 117 free(un->un_path, M_TEMP); 118 un->un_path = 0; 119 } 120 if (un->un_dirvp) { 121 vrele(un->un_dirvp); 122 un->un_dirvp = NULLVP; 123 } 124 } 125 un->un_lowervp = lowervp; 126 } 127 128 if (un->un_uppervp != uppervp) { 129 if (un->un_uppervp) 130 vrele(un->un_uppervp); 131 132 un->un_uppervp = uppervp; 133 } 134 135 if (ohash != nhash) 136 LIST_INSERT_HEAD(&unhead[nhash], un, un_cache); 137 138 union_list_unlock(nhash); 139 } 140 141 void 142 union_newlower(un, lowervp) 143 struct union_node *un; 144 struct vnode *lowervp; 145 { 146 147 union_updatevp(un, un->un_uppervp, lowervp); 148 } 149 150 void 151 union_newupper(un, uppervp) 152 struct union_node *un; 153 struct vnode *uppervp; 154 { 155 156 union_updatevp(un, uppervp, un->un_lowervp); 157 } 158 159 /* 160 * allocate a union_node/vnode pair. the vnode is 161 * referenced and locked. the new vnode is returned 162 * via (vpp). (mp) is the mountpoint of the union filesystem, 163 * (dvp) is the parent directory where the upper layer object 164 * should exist (but doesn't) and (cnp) is the componentname 165 * information which is partially copied to allow the upper 166 * layer object to be created at a later time. (uppervp) 167 * and (lowervp) reference the upper and lower layer objects 168 * being mapped. either, but not both, can be nil. 169 * if supplied, (uppervp) is locked. 170 * the reference is either maintained in the new union_node 171 * object which is allocated, or they are vrele'd. 172 * 173 * all union_nodes are maintained on a singly-linked 174 * list. new nodes are only allocated when they cannot 175 * be found on this list. entries on the list are 176 * removed when the vfs reclaim entry is called. 177 * 178 * a single lock is kept for the entire list. this is 179 * needed because the getnewvnode() function can block 180 * waiting for a vnode to become free, in which case there 181 * may be more than one process trying to get the same 182 * vnode. this lock is only taken if we are going to 183 * call getnewvnode, since the kernel itself is single-threaded. 184 * 185 * if an entry is found on the list, then call vget() to 186 * take a reference. this is done because there may be 187 * zero references to it and so it needs to removed from 188 * the vnode free list. 189 */ 190 int 191 union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp) 192 struct vnode **vpp; 193 struct mount *mp; 194 struct vnode *undvp; 195 struct vnode *dvp; /* may be null */ 196 struct componentname *cnp; /* may be null */ 197 struct vnode *uppervp; /* may be null */ 198 struct vnode *lowervp; /* may be null */ 199 { 200 int error; 201 struct union_node *un; 202 struct union_node **pp; 203 struct vnode *xlowervp = NULLVP; 204 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 205 int hash; 206 int vflag; 207 int try; 208 209 if (uppervp == NULLVP && lowervp == NULLVP) 210 panic("union: unidentifiable allocation"); 211 212 if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) { 213 xlowervp = lowervp; 214 lowervp = NULLVP; 215 } 216 217 /* detect the root vnode (and aliases) */ 218 vflag = 0; 219 if ((uppervp == um->um_uppervp) && 220 ((lowervp == NULLVP) || lowervp == um->um_lowervp)) { 221 if (lowervp == NULLVP) { 222 lowervp = um->um_lowervp; 223 VREF(lowervp); 224 } 225 vflag = VROOT; 226 } 227 228 loop: 229 for (try = 0; try < 3; try++) { 230 switch (try) { 231 case 0: 232 if (lowervp == NULLVP) 233 continue; 234 hash = UNION_HASH(uppervp, lowervp); 235 break; 236 237 case 1: 238 if (uppervp == NULLVP) 239 continue; 240 hash = UNION_HASH(uppervp, NULLVP); 241 break; 242 243 case 2: 244 if (lowervp == NULLVP) 245 continue; 246 hash = UNION_HASH(NULLVP, lowervp); 247 break; 248 } 249 250 while (union_list_lock(hash)) 251 continue; 252 253 for (un = unhead[hash].lh_first; un != 0; 254 un = un->un_cache.le_next) { 255 if ((un->un_lowervp == lowervp || 256 un->un_lowervp == NULLVP) && 257 (un->un_uppervp == uppervp || 258 un->un_uppervp == NULLVP) && 259 (UNIONTOV(un)->v_mount == mp)) { 260 if (vget(UNIONTOV(un), 0)) { 261 union_list_unlock(hash); 262 goto loop; 263 } 264 break; 265 } 266 } 267 268 union_list_unlock(hash); 269 270 if (un) 271 break; 272 } 273 274 if (un) { 275 /* 276 * Obtain a lock on the union_node. 277 * uppervp is locked, though un->un_uppervp 278 * may not be. this doesn't break the locking 279 * hierarchy since in the case that un->un_uppervp 280 * is not yet locked it will be vrele'd and replaced 281 * with uppervp. 282 */ 283 284 if ((dvp != NULLVP) && (uppervp == dvp)) { 285 /* 286 * Access ``.'', so (un) will already 287 * be locked. Since this process has 288 * the lock on (uppervp) no other 289 * process can hold the lock on (un). 290 */ 291 #ifdef DIAGNOSTIC 292 if ((un->un_flags & UN_LOCKED) == 0) 293 panic("union: . not locked"); 294 else if (curproc && un->un_pid != curproc->p_pid && 295 un->un_pid > -1 && curproc->p_pid > -1) 296 panic("union: allocvp not lock owner"); 297 #endif 298 } else { 299 if (un->un_flags & UN_LOCKED) { 300 vrele(UNIONTOV(un)); 301 un->un_flags |= UN_WANT; 302 sleep((caddr_t) &un->un_flags, PINOD); 303 goto loop; 304 } 305 un->un_flags |= UN_LOCKED; 306 307 #ifdef DIAGNOSTIC 308 if (curproc) 309 un->un_pid = curproc->p_pid; 310 else 311 un->un_pid = -1; 312 #endif 313 } 314 315 /* 316 * At this point, the union_node is locked, 317 * un->un_uppervp may not be locked, and uppervp 318 * is locked or nil. 319 */ 320 321 /* 322 * Save information about the upper layer. 323 */ 324 if (uppervp != un->un_uppervp) { 325 union_newupper(un, uppervp); 326 } else if (uppervp) { 327 vrele(uppervp); 328 } 329 330 if (un->un_uppervp) { 331 un->un_flags |= UN_ULOCK; 332 un->un_flags &= ~UN_KLOCK; 333 } 334 335 /* 336 * Save information about the lower layer. 337 * This needs to keep track of pathname 338 * and directory information which union_vn_create 339 * might need. 340 */ 341 if (lowervp != un->un_lowervp) { 342 union_newlower(un, lowervp); 343 if (cnp && (lowervp != NULLVP) && 344 (lowervp->v_type == VREG)) { 345 un->un_hash = cnp->cn_hash; 346 un->un_path = malloc(cnp->cn_namelen+1, 347 M_TEMP, M_WAITOK); 348 bcopy(cnp->cn_nameptr, un->un_path, 349 cnp->cn_namelen); 350 un->un_path[cnp->cn_namelen] = '\0'; 351 VREF(dvp); 352 un->un_dirvp = dvp; 353 } 354 } else if (lowervp) { 355 vrele(lowervp); 356 } 357 *vpp = UNIONTOV(un); 358 return (0); 359 } 360 361 /* 362 * otherwise lock the vp list while we call getnewvnode 363 * since that can block. 364 */ 365 hash = UNION_HASH(uppervp, lowervp); 366 367 if (union_list_lock(hash)) 368 goto loop; 369 370 error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp); 371 if (error) { 372 if (uppervp) { 373 if (dvp == uppervp) 374 vrele(uppervp); 375 else 376 vput(uppervp); 377 } 378 if (lowervp) 379 vrele(lowervp); 380 381 goto out; 382 } 383 384 MALLOC((*vpp)->v_data, void *, sizeof(struct union_node), 385 M_TEMP, M_WAITOK); 386 387 (*vpp)->v_flag |= vflag; 388 if (uppervp) 389 (*vpp)->v_type = uppervp->v_type; 390 else 391 (*vpp)->v_type = lowervp->v_type; 392 un = VTOUNION(*vpp); 393 un->un_vnode = *vpp; 394 un->un_uppervp = uppervp; 395 un->un_lowervp = lowervp; 396 un->un_openl = 0; 397 un->un_flags = UN_LOCKED; 398 if (un->un_uppervp) 399 un->un_flags |= UN_ULOCK; 400 #ifdef DIAGNOSTIC 401 if (curproc) 402 un->un_pid = curproc->p_pid; 403 else 404 un->un_pid = -1; 405 #endif 406 if (cnp && (lowervp != NULLVP) && (lowervp->v_type == VREG)) { 407 un->un_hash = cnp->cn_hash; 408 un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK); 409 bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen); 410 un->un_path[cnp->cn_namelen] = '\0'; 411 VREF(dvp); 412 un->un_dirvp = dvp; 413 } else { 414 un->un_hash = 0; 415 un->un_path = 0; 416 un->un_dirvp = 0; 417 } 418 419 LIST_INSERT_HEAD(&unhead[hash], un, un_cache); 420 421 if (xlowervp) 422 vrele(xlowervp); 423 424 out: 425 union_list_unlock(hash); 426 427 return (error); 428 } 429 430 int 431 union_freevp(vp) 432 struct vnode *vp; 433 { 434 struct union_node *un = VTOUNION(vp); 435 436 LIST_REMOVE(un, un_cache); 437 438 if (un->un_uppervp) 439 vrele(un->un_uppervp); 440 if (un->un_lowervp) 441 vrele(un->un_lowervp); 442 if (un->un_dirvp) 443 vrele(un->un_dirvp); 444 if (un->un_path) 445 free(un->un_path, M_TEMP); 446 447 FREE(vp->v_data, M_TEMP); 448 vp->v_data = 0; 449 450 return (0); 451 } 452 453 /* 454 * copyfile. copy the vnode (fvp) to the vnode (tvp) 455 * using a sequence of reads and writes. both (fvp) 456 * and (tvp) are locked on entry and exit. 457 */ 458 int 459 union_copyfile(p, cred, fvp, tvp) 460 struct proc *p; 461 struct ucred *cred; 462 struct vnode *fvp; 463 struct vnode *tvp; 464 { 465 char *buf; 466 struct uio uio; 467 struct iovec iov; 468 int error = 0; 469 470 /* 471 * strategy: 472 * allocate a buffer of size MAXBSIZE. 473 * loop doing reads and writes, keeping track 474 * of the current uio offset. 475 * give up at the first sign of trouble. 476 */ 477 478 uio.uio_procp = p; 479 uio.uio_segflg = UIO_SYSSPACE; 480 uio.uio_offset = 0; 481 482 VOP_UNLOCK(fvp); /* XXX */ 483 LEASE_CHECK(fvp, p, cred, LEASE_READ); 484 VOP_LOCK(fvp); /* XXX */ 485 VOP_UNLOCK(tvp); /* XXX */ 486 LEASE_CHECK(tvp, p, cred, LEASE_WRITE); 487 VOP_LOCK(tvp); /* XXX */ 488 489 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK); 490 491 /* ugly loop follows... */ 492 do { 493 off_t offset = uio.uio_offset; 494 495 uio.uio_iov = &iov; 496 uio.uio_iovcnt = 1; 497 iov.iov_base = buf; 498 iov.iov_len = MAXBSIZE; 499 uio.uio_resid = iov.iov_len; 500 uio.uio_rw = UIO_READ; 501 error = VOP_READ(fvp, &uio, 0, cred); 502 503 if (error == 0) { 504 uio.uio_iov = &iov; 505 uio.uio_iovcnt = 1; 506 iov.iov_base = buf; 507 iov.iov_len = MAXBSIZE - uio.uio_resid; 508 uio.uio_offset = offset; 509 uio.uio_rw = UIO_WRITE; 510 uio.uio_resid = iov.iov_len; 511 512 if (uio.uio_resid == 0) 513 break; 514 515 do { 516 error = VOP_WRITE(tvp, &uio, 0, cred); 517 } while ((uio.uio_resid > 0) && (error == 0)); 518 } 519 520 } while (error == 0); 521 522 free(buf, M_TEMP); 523 return (error); 524 } 525 526 /* 527 * Create a shadow directory in the upper layer. 528 * The new vnode is returned locked. 529 * 530 * (um) points to the union mount structure for access to the 531 * the mounting process's credentials. 532 * (dvp) is the directory in which to create the shadow directory. 533 * it is unlocked on entry and exit. 534 * (cnp) is the componentname to be created. 535 * (vpp) is the returned newly created shadow directory, which 536 * is returned locked. 537 */ 538 int 539 union_mkshadow(um, dvp, cnp, vpp) 540 struct union_mount *um; 541 struct vnode *dvp; 542 struct componentname *cnp; 543 struct vnode **vpp; 544 { 545 int error; 546 struct vattr va; 547 struct proc *p = cnp->cn_proc; 548 struct componentname cn; 549 550 /* 551 * policy: when creating the shadow directory in the 552 * upper layer, create it owned by the user who did 553 * the mount, group from parent directory, and mode 554 * 777 modified by umask (ie mostly identical to the 555 * mkdir syscall). (jsp, kb) 556 */ 557 558 /* 559 * A new componentname structure must be faked up because 560 * there is no way to know where the upper level cnp came 561 * from or what it is being used for. This must duplicate 562 * some of the work done by NDINIT, some of the work done 563 * by namei, some of the work done by lookup and some of 564 * the work done by VOP_LOOKUP when given a CREATE flag. 565 * Conclusion: Horrible. 566 * 567 * The pathname buffer will be FREEed by VOP_MKDIR. 568 */ 569 cn.cn_pnbuf = malloc(cnp->cn_namelen+1, M_NAMEI, M_WAITOK); 570 bcopy(cnp->cn_nameptr, cn.cn_pnbuf, cnp->cn_namelen); 571 cn.cn_pnbuf[cnp->cn_namelen] = '\0'; 572 573 cn.cn_nameiop = CREATE; 574 cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN); 575 cn.cn_proc = cnp->cn_proc; 576 if (um->um_op == UNMNT_ABOVE) 577 cn.cn_cred = cnp->cn_cred; 578 else 579 cn.cn_cred = um->um_cred; 580 cn.cn_nameptr = cn.cn_pnbuf; 581 cn.cn_namelen = cnp->cn_namelen; 582 cn.cn_hash = cnp->cn_hash; 583 cn.cn_consume = cnp->cn_consume; 584 585 VREF(dvp); 586 if (error = relookup(dvp, vpp, &cn)) 587 return (error); 588 vrele(dvp); 589 590 if (*vpp) { 591 VOP_ABORTOP(dvp, &cn); 592 VOP_UNLOCK(dvp); 593 vrele(*vpp); 594 *vpp = NULLVP; 595 return (EEXIST); 596 } 597 598 VATTR_NULL(&va); 599 va.va_type = VDIR; 600 va.va_mode = um->um_cmode; 601 602 /* LEASE_CHECK: dvp is locked */ 603 LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE); 604 605 error = VOP_MKDIR(dvp, vpp, &cn, &va); 606 return (error); 607 } 608 609 /* 610 * union_vn_create: creates and opens a new shadow file 611 * on the upper union layer. this function is similar 612 * in spirit to calling vn_open but it avoids calling namei(). 613 * the problem with calling namei is that a) it locks too many 614 * things, and b) it doesn't start at the "right" directory, 615 * whereas relookup is told where to start. 616 */ 617 int 618 union_vn_create(vpp, un, p) 619 struct vnode **vpp; 620 struct union_node *un; 621 struct proc *p; 622 { 623 struct vnode *vp; 624 struct ucred *cred = p->p_ucred; 625 struct vattr vat; 626 struct vattr *vap = &vat; 627 int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL); 628 int error; 629 int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask; 630 char *cp; 631 struct componentname cn; 632 633 *vpp = NULLVP; 634 635 /* 636 * Build a new componentname structure (for the same 637 * reasons outlines in union_mkshadow). 638 * The difference here is that the file is owned by 639 * the current user, rather than by the person who 640 * did the mount, since the current user needs to be 641 * able to write the file (that's why it is being 642 * copied in the first place). 643 */ 644 cn.cn_namelen = strlen(un->un_path); 645 cn.cn_pnbuf = (caddr_t) malloc(cn.cn_namelen, M_NAMEI, M_WAITOK); 646 bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1); 647 cn.cn_nameiop = CREATE; 648 cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN); 649 cn.cn_proc = p; 650 cn.cn_cred = p->p_ucred; 651 cn.cn_nameptr = cn.cn_pnbuf; 652 cn.cn_hash = un->un_hash; 653 cn.cn_consume = 0; 654 655 VREF(un->un_dirvp); 656 if (error = relookup(un->un_dirvp, &vp, &cn)) 657 return (error); 658 vrele(un->un_dirvp); 659 660 if (vp) { 661 VOP_ABORTOP(un->un_dirvp, &cn); 662 if (un->un_dirvp == vp) 663 vrele(un->un_dirvp); 664 else 665 vput(un->un_dirvp); 666 vrele(vp); 667 return (EEXIST); 668 } 669 670 /* 671 * Good - there was no race to create the file 672 * so go ahead and create it. The permissions 673 * on the file will be 0666 modified by the 674 * current user's umask. Access to the file, while 675 * it is unioned, will require access to the top *and* 676 * bottom files. Access when not unioned will simply 677 * require access to the top-level file. 678 * TODO: confirm choice of access permissions. 679 */ 680 VATTR_NULL(vap); 681 vap->va_type = VREG; 682 vap->va_mode = cmode; 683 LEASE_CHECK(un->un_dirvp, p, cred, LEASE_WRITE); 684 if (error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap)) 685 return (error); 686 687 if (error = VOP_OPEN(vp, fmode, cred, p)) { 688 vput(vp); 689 return (error); 690 } 691 692 vp->v_writecount++; 693 *vpp = vp; 694 return (0); 695 } 696 697 int 698 union_vn_close(vp, fmode, cred, p) 699 struct vnode *vp; 700 int fmode; 701 struct ucred *cred; 702 struct proc *p; 703 { 704 if (fmode & FWRITE) 705 --vp->v_writecount; 706 return (VOP_CLOSE(vp, fmode)); 707 } 708 709 void 710 union_removed_upper(un) 711 struct union_node *un; 712 { 713 if (un->un_flags & UN_ULOCK) { 714 un->un_flags &= ~UN_ULOCK; 715 VOP_UNLOCK(un->un_uppervp); 716 } 717 718 union_newupper(un, NULLVP); 719 } 720 721 struct vnode * 722 union_lowervp(vp) 723 struct vnode *vp; 724 { 725 struct union_node *un = VTOUNION(vp); 726 727 if (un->un_lowervp && (vp->v_type == un->un_lowervp->v_type)) { 728 if (vget(un->un_lowervp, 0)) 729 return (NULLVP); 730 } 731 732 return (un->un_lowervp); 733 } 734