xref: /original-bsd/sys/miscfs/union/union_subr.c (revision 730930d2)
1 /*
2  * Copyright (c) 1994 Jan-Simon Pendry
3  * Copyright (c) 1994
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)union_subr.c	8.6 (Berkeley) 04/28/94
12  */
13 
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/time.h>
17 #include <sys/kernel.h>
18 #include <sys/vnode.h>
19 #include <sys/namei.h>
20 #include <sys/malloc.h>
21 #include <sys/file.h>
22 #include <sys/filedesc.h>
23 #include <sys/queue.h>
24 #include <sys/mount.h>
25 #include <miscfs/union/union.h>
26 
27 #ifdef DIAGNOSTIC
28 #include <sys/proc.h>
29 #endif
30 
31 /* must be power of two, otherwise change UNION_HASH() */
32 #define NHASH 32
33 
34 /* unsigned int ... */
35 #define UNION_HASH(u, l) \
36 	(((((unsigned long) (u)) + ((unsigned long) l)) >> 8) & (NHASH-1))
37 
38 static LIST_HEAD(unhead, union_node) unhead[NHASH];
39 static int unvplock[NHASH];
40 
41 int
42 union_init()
43 {
44 	int i;
45 
46 	for (i = 0; i < NHASH; i++)
47 		LIST_INIT(&unhead[i]);
48 	bzero((caddr_t) unvplock, sizeof(unvplock));
49 }
50 
51 static int
52 union_list_lock(ix)
53 	int ix;
54 {
55 
56 	if (unvplock[ix] & UN_LOCKED) {
57 		unvplock[ix] |= UN_WANT;
58 		sleep((caddr_t) &unvplock[ix], PINOD);
59 		return (1);
60 	}
61 
62 	unvplock[ix] |= UN_LOCKED;
63 
64 	return (0);
65 }
66 
67 static void
68 union_list_unlock(ix)
69 	int ix;
70 {
71 
72 	unvplock[ix] &= ~UN_LOCKED;
73 
74 	if (unvplock[ix] & UN_WANT) {
75 		unvplock[ix] &= ~UN_WANT;
76 		wakeup((caddr_t) &unvplock[ix]);
77 	}
78 }
79 
80 void
81 union_updatevp(un, uppervp, lowervp)
82 	struct union_node *un;
83 	struct vnode *uppervp;
84 	struct vnode *lowervp;
85 {
86 	int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
87 	int nhash = UNION_HASH(uppervp, lowervp);
88 	int docache = (lowervp != NULLVP || uppervp != NULLVP);
89 
90 	/*
91 	 * Ensure locking is ordered from lower to higher
92 	 * to avoid deadlocks.
93 	 */
94 	if (nhash < ohash) {
95 		int t = ohash;
96 		ohash = nhash;
97 		nhash = t;
98 	}
99 
100 	if (ohash != nhash)
101 		while (union_list_lock(ohash))
102 			continue;
103 
104 	while (union_list_lock(nhash))
105 		continue;
106 
107 	if (ohash != nhash || !docache) {
108 		if (un->un_flags & UN_CACHED) {
109 			LIST_REMOVE(un, un_cache);
110 			un->un_flags &= ~UN_CACHED;
111 		}
112 	}
113 
114 	if (ohash != nhash)
115 		union_list_unlock(ohash);
116 
117 	if (un->un_lowervp != lowervp) {
118 		if (un->un_lowervp) {
119 			vrele(un->un_lowervp);
120 			if (un->un_path) {
121 				free(un->un_path, M_TEMP);
122 				un->un_path = 0;
123 			}
124 			if (un->un_dirvp) {
125 				vrele(un->un_dirvp);
126 				un->un_dirvp = NULLVP;
127 			}
128 		}
129 		un->un_lowervp = lowervp;
130 	}
131 
132 	if (un->un_uppervp != uppervp) {
133 		if (un->un_uppervp)
134 			vrele(un->un_uppervp);
135 
136 		un->un_uppervp = uppervp;
137 	}
138 
139 	if (docache && (ohash != nhash)) {
140 		LIST_INSERT_HEAD(&unhead[nhash], un, un_cache);
141 		un->un_flags |= UN_CACHED;
142 	}
143 
144 	union_list_unlock(nhash);
145 }
146 
147 void
148 union_newlower(un, lowervp)
149 	struct union_node *un;
150 	struct vnode *lowervp;
151 {
152 
153 	union_updatevp(un, un->un_uppervp, lowervp);
154 }
155 
156 void
157 union_newupper(un, uppervp)
158 	struct union_node *un;
159 	struct vnode *uppervp;
160 {
161 
162 	union_updatevp(un, uppervp, un->un_lowervp);
163 }
164 
165 /*
166  * allocate a union_node/vnode pair.  the vnode is
167  * referenced and locked.  the new vnode is returned
168  * via (vpp).  (mp) is the mountpoint of the union filesystem,
169  * (dvp) is the parent directory where the upper layer object
170  * should exist (but doesn't) and (cnp) is the componentname
171  * information which is partially copied to allow the upper
172  * layer object to be created at a later time.  (uppervp)
173  * and (lowervp) reference the upper and lower layer objects
174  * being mapped.  either, but not both, can be nil.
175  * if supplied, (uppervp) is locked.
176  * the reference is either maintained in the new union_node
177  * object which is allocated, or they are vrele'd.
178  *
179  * all union_nodes are maintained on a singly-linked
180  * list.  new nodes are only allocated when they cannot
181  * be found on this list.  entries on the list are
182  * removed when the vfs reclaim entry is called.
183  *
184  * a single lock is kept for the entire list.  this is
185  * needed because the getnewvnode() function can block
186  * waiting for a vnode to become free, in which case there
187  * may be more than one process trying to get the same
188  * vnode.  this lock is only taken if we are going to
189  * call getnewvnode, since the kernel itself is single-threaded.
190  *
191  * if an entry is found on the list, then call vget() to
192  * take a reference.  this is done because there may be
193  * zero references to it and so it needs to removed from
194  * the vnode free list.
195  */
196 int
197 union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
198 	struct vnode **vpp;
199 	struct mount *mp;
200 	struct vnode *undvp;
201 	struct vnode *dvp;		/* may be null */
202 	struct componentname *cnp;	/* may be null */
203 	struct vnode *uppervp;		/* may be null */
204 	struct vnode *lowervp;		/* may be null */
205 {
206 	int error;
207 	struct union_node *un;
208 	struct union_node **pp;
209 	struct vnode *xlowervp = NULLVP;
210 	struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
211 	int hash;
212 	int vflag;
213 	int try;
214 
215 	if (uppervp == NULLVP && lowervp == NULLVP)
216 		panic("union: unidentifiable allocation");
217 
218 	if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
219 		xlowervp = lowervp;
220 		lowervp = NULLVP;
221 	}
222 
223 	/* detect the root vnode (and aliases) */
224 	vflag = 0;
225 	if ((uppervp == um->um_uppervp) &&
226 	    ((lowervp == NULLVP) || lowervp == um->um_lowervp)) {
227 		if (lowervp == NULLVP) {
228 			lowervp = um->um_lowervp;
229 			VREF(lowervp);
230 		}
231 		vflag = VROOT;
232 	}
233 
234 loop:
235 	for (try = 0; try < 3; try++) {
236 		switch (try) {
237 		case 0:
238 			if (lowervp == NULLVP)
239 				continue;
240 			hash = UNION_HASH(uppervp, lowervp);
241 			break;
242 
243 		case 1:
244 			if (uppervp == NULLVP)
245 				continue;
246 			hash = UNION_HASH(uppervp, NULLVP);
247 			break;
248 
249 		case 2:
250 			if (lowervp == NULLVP)
251 				continue;
252 			hash = UNION_HASH(NULLVP, lowervp);
253 			break;
254 		}
255 
256 		while (union_list_lock(hash))
257 			continue;
258 
259 		for (un = unhead[hash].lh_first; un != 0;
260 					un = un->un_cache.le_next) {
261 			if ((un->un_lowervp == lowervp ||
262 			     un->un_lowervp == NULLVP) &&
263 			    (un->un_uppervp == uppervp ||
264 			     un->un_uppervp == NULLVP) &&
265 			    (UNIONTOV(un)->v_mount == mp)) {
266 				if (vget(UNIONTOV(un), 0)) {
267 					union_list_unlock(hash);
268 					goto loop;
269 				}
270 				break;
271 			}
272 		}
273 
274 		union_list_unlock(hash);
275 
276 		if (un)
277 			break;
278 	}
279 
280 	if (un) {
281 		/*
282 		 * Obtain a lock on the union_node.
283 		 * uppervp is locked, though un->un_uppervp
284 		 * may not be.  this doesn't break the locking
285 		 * hierarchy since in the case that un->un_uppervp
286 		 * is not yet locked it will be vrele'd and replaced
287 		 * with uppervp.
288 		 */
289 
290 		if ((dvp != NULLVP) && (uppervp == dvp)) {
291 			/*
292 			 * Access ``.'', so (un) will already
293 			 * be locked.  Since this process has
294 			 * the lock on (uppervp) no other
295 			 * process can hold the lock on (un).
296 			 */
297 #ifdef DIAGNOSTIC
298 			if ((un->un_flags & UN_LOCKED) == 0)
299 				panic("union: . not locked");
300 			else if (curproc && un->un_pid != curproc->p_pid &&
301 				    un->un_pid > -1 && curproc->p_pid > -1)
302 				panic("union: allocvp not lock owner");
303 #endif
304 		} else {
305 			if (un->un_flags & UN_LOCKED) {
306 				vrele(UNIONTOV(un));
307 				un->un_flags |= UN_WANT;
308 				sleep((caddr_t) &un->un_flags, PINOD);
309 				goto loop;
310 			}
311 			un->un_flags |= UN_LOCKED;
312 
313 #ifdef DIAGNOSTIC
314 			if (curproc)
315 				un->un_pid = curproc->p_pid;
316 			else
317 				un->un_pid = -1;
318 #endif
319 		}
320 
321 		/*
322 		 * At this point, the union_node is locked,
323 		 * un->un_uppervp may not be locked, and uppervp
324 		 * is locked or nil.
325 		 */
326 
327 		/*
328 		 * Save information about the upper layer.
329 		 */
330 		if (uppervp != un->un_uppervp) {
331 			union_newupper(un, uppervp);
332 		} else if (uppervp) {
333 			vrele(uppervp);
334 		}
335 
336 		if (un->un_uppervp) {
337 			un->un_flags |= UN_ULOCK;
338 			un->un_flags &= ~UN_KLOCK;
339 		}
340 
341 		/*
342 		 * Save information about the lower layer.
343 		 * This needs to keep track of pathname
344 		 * and directory information which union_vn_create
345 		 * might need.
346 		 */
347 		if (lowervp != un->un_lowervp) {
348 			union_newlower(un, lowervp);
349 			if (cnp && (lowervp != NULLVP) &&
350 			    (lowervp->v_type == VREG)) {
351 				un->un_hash = cnp->cn_hash;
352 				un->un_path = malloc(cnp->cn_namelen+1,
353 						M_TEMP, M_WAITOK);
354 				bcopy(cnp->cn_nameptr, un->un_path,
355 						cnp->cn_namelen);
356 				un->un_path[cnp->cn_namelen] = '\0';
357 				VREF(dvp);
358 				un->un_dirvp = dvp;
359 			}
360 		} else if (lowervp) {
361 			vrele(lowervp);
362 		}
363 		*vpp = UNIONTOV(un);
364 		return (0);
365 	}
366 
367 	/*
368 	 * otherwise lock the vp list while we call getnewvnode
369 	 * since that can block.
370 	 */
371 	hash = UNION_HASH(uppervp, lowervp);
372 
373 	if (union_list_lock(hash))
374 		goto loop;
375 
376 	error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp);
377 	if (error) {
378 		if (uppervp) {
379 			if (dvp == uppervp)
380 				vrele(uppervp);
381 			else
382 				vput(uppervp);
383 		}
384 		if (lowervp)
385 			vrele(lowervp);
386 
387 		goto out;
388 	}
389 
390 	MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
391 		M_TEMP, M_WAITOK);
392 
393 	(*vpp)->v_flag |= vflag;
394 	if (uppervp)
395 		(*vpp)->v_type = uppervp->v_type;
396 	else
397 		(*vpp)->v_type = lowervp->v_type;
398 	un = VTOUNION(*vpp);
399 	un->un_vnode = *vpp;
400 	un->un_uppervp = uppervp;
401 	un->un_lowervp = lowervp;
402 	un->un_openl = 0;
403 	un->un_flags = UN_LOCKED;
404 	if (un->un_uppervp)
405 		un->un_flags |= UN_ULOCK;
406 #ifdef DIAGNOSTIC
407 	if (curproc)
408 		un->un_pid = curproc->p_pid;
409 	else
410 		un->un_pid = -1;
411 #endif
412 	if (cnp && (lowervp != NULLVP) && (lowervp->v_type == VREG)) {
413 		un->un_hash = cnp->cn_hash;
414 		un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
415 		bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
416 		un->un_path[cnp->cn_namelen] = '\0';
417 		VREF(dvp);
418 		un->un_dirvp = dvp;
419 	} else {
420 		un->un_hash = 0;
421 		un->un_path = 0;
422 		un->un_dirvp = 0;
423 	}
424 
425 	LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
426 	un->un_flags |= UN_CACHED;
427 
428 	if (xlowervp)
429 		vrele(xlowervp);
430 
431 out:
432 	union_list_unlock(hash);
433 
434 	return (error);
435 }
436 
437 int
438 union_freevp(vp)
439 	struct vnode *vp;
440 {
441 	struct union_node *un = VTOUNION(vp);
442 
443 	if (un->un_flags & UN_CACHED) {
444 		LIST_REMOVE(un, un_cache);
445 		un->un_flags &= ~UN_CACHED;
446 	}
447 
448 	if (un->un_uppervp != NULLVP)
449 		vrele(un->un_uppervp);
450 	if (un->un_lowervp != NULLVP)
451 		vrele(un->un_lowervp);
452 	if (un->un_dirvp != NULLVP)
453 		vrele(un->un_dirvp);
454 	if (un->un_path)
455 		free(un->un_path, M_TEMP);
456 
457 	FREE(vp->v_data, M_TEMP);
458 	vp->v_data = 0;
459 
460 	return (0);
461 }
462 
463 /*
464  * copyfile.  copy the vnode (fvp) to the vnode (tvp)
465  * using a sequence of reads and writes.  both (fvp)
466  * and (tvp) are locked on entry and exit.
467  */
468 int
469 union_copyfile(p, cred, fvp, tvp)
470 	struct proc *p;
471 	struct ucred *cred;
472 	struct vnode *fvp;
473 	struct vnode *tvp;
474 {
475 	char *buf;
476 	struct uio uio;
477 	struct iovec iov;
478 	int error = 0;
479 
480 	/*
481 	 * strategy:
482 	 * allocate a buffer of size MAXBSIZE.
483 	 * loop doing reads and writes, keeping track
484 	 * of the current uio offset.
485 	 * give up at the first sign of trouble.
486 	 */
487 
488 	uio.uio_procp = p;
489 	uio.uio_segflg = UIO_SYSSPACE;
490 	uio.uio_offset = 0;
491 
492 	VOP_UNLOCK(fvp);				/* XXX */
493 	LEASE_CHECK(fvp, p, cred, LEASE_READ);
494 	VOP_LOCK(fvp);					/* XXX */
495 	VOP_UNLOCK(tvp);				/* XXX */
496 	LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
497 	VOP_LOCK(tvp);					/* XXX */
498 
499 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
500 
501 	/* ugly loop follows... */
502 	do {
503 		off_t offset = uio.uio_offset;
504 
505 		uio.uio_iov = &iov;
506 		uio.uio_iovcnt = 1;
507 		iov.iov_base = buf;
508 		iov.iov_len = MAXBSIZE;
509 		uio.uio_resid = iov.iov_len;
510 		uio.uio_rw = UIO_READ;
511 		error = VOP_READ(fvp, &uio, 0, cred);
512 
513 		if (error == 0) {
514 			uio.uio_iov = &iov;
515 			uio.uio_iovcnt = 1;
516 			iov.iov_base = buf;
517 			iov.iov_len = MAXBSIZE - uio.uio_resid;
518 			uio.uio_offset = offset;
519 			uio.uio_rw = UIO_WRITE;
520 			uio.uio_resid = iov.iov_len;
521 
522 			if (uio.uio_resid == 0)
523 				break;
524 
525 			do {
526 				error = VOP_WRITE(tvp, &uio, 0, cred);
527 			} while ((uio.uio_resid > 0) && (error == 0));
528 		}
529 
530 	} while (error == 0);
531 
532 	free(buf, M_TEMP);
533 	return (error);
534 }
535 
536 /*
537  * Create a shadow directory in the upper layer.
538  * The new vnode is returned locked.
539  *
540  * (um) points to the union mount structure for access to the
541  * the mounting process's credentials.
542  * (dvp) is the directory in which to create the shadow directory.
543  * it is unlocked on entry and exit.
544  * (cnp) is the componentname to be created.
545  * (vpp) is the returned newly created shadow directory, which
546  * is returned locked.
547  */
548 int
549 union_mkshadow(um, dvp, cnp, vpp)
550 	struct union_mount *um;
551 	struct vnode *dvp;
552 	struct componentname *cnp;
553 	struct vnode **vpp;
554 {
555 	int error;
556 	struct vattr va;
557 	struct proc *p = cnp->cn_proc;
558 	struct componentname cn;
559 
560 	/*
561 	 * policy: when creating the shadow directory in the
562 	 * upper layer, create it owned by the user who did
563 	 * the mount, group from parent directory, and mode
564 	 * 777 modified by umask (ie mostly identical to the
565 	 * mkdir syscall).  (jsp, kb)
566 	 */
567 
568 	/*
569 	 * A new componentname structure must be faked up because
570 	 * there is no way to know where the upper level cnp came
571 	 * from or what it is being used for.  This must duplicate
572 	 * some of the work done by NDINIT, some of the work done
573 	 * by namei, some of the work done by lookup and some of
574 	 * the work done by VOP_LOOKUP when given a CREATE flag.
575 	 * Conclusion: Horrible.
576 	 *
577 	 * The pathname buffer will be FREEed by VOP_MKDIR.
578 	 */
579 	cn.cn_pnbuf = malloc(cnp->cn_namelen+1, M_NAMEI, M_WAITOK);
580 	bcopy(cnp->cn_nameptr, cn.cn_pnbuf, cnp->cn_namelen);
581 	cn.cn_pnbuf[cnp->cn_namelen] = '\0';
582 
583 	cn.cn_nameiop = CREATE;
584 	cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
585 	cn.cn_proc = cnp->cn_proc;
586 	if (um->um_op == UNMNT_ABOVE)
587 		cn.cn_cred = cnp->cn_cred;
588 	else
589 		cn.cn_cred = um->um_cred;
590 	cn.cn_nameptr = cn.cn_pnbuf;
591 	cn.cn_namelen = cnp->cn_namelen;
592 	cn.cn_hash = cnp->cn_hash;
593 	cn.cn_consume = cnp->cn_consume;
594 
595 	VREF(dvp);
596 	if (error = relookup(dvp, vpp, &cn))
597 		return (error);
598 	vrele(dvp);
599 
600 	if (*vpp) {
601 		VOP_ABORTOP(dvp, &cn);
602 		VOP_UNLOCK(dvp);
603 		vrele(*vpp);
604 		*vpp = NULLVP;
605 		return (EEXIST);
606 	}
607 
608 	VATTR_NULL(&va);
609 	va.va_type = VDIR;
610 	va.va_mode = um->um_cmode;
611 
612 	/* LEASE_CHECK: dvp is locked */
613 	LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE);
614 
615 	error = VOP_MKDIR(dvp, vpp, &cn, &va);
616 	return (error);
617 }
618 
619 /*
620  * union_vn_create: creates and opens a new shadow file
621  * on the upper union layer.  this function is similar
622  * in spirit to calling vn_open but it avoids calling namei().
623  * the problem with calling namei is that a) it locks too many
624  * things, and b) it doesn't start at the "right" directory,
625  * whereas relookup is told where to start.
626  */
627 int
628 union_vn_create(vpp, un, p)
629 	struct vnode **vpp;
630 	struct union_node *un;
631 	struct proc *p;
632 {
633 	struct vnode *vp;
634 	struct ucred *cred = p->p_ucred;
635 	struct vattr vat;
636 	struct vattr *vap = &vat;
637 	int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
638 	int error;
639 	int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask;
640 	char *cp;
641 	struct componentname cn;
642 
643 	*vpp = NULLVP;
644 
645 	/*
646 	 * Build a new componentname structure (for the same
647 	 * reasons outlines in union_mkshadow).
648 	 * The difference here is that the file is owned by
649 	 * the current user, rather than by the person who
650 	 * did the mount, since the current user needs to be
651 	 * able to write the file (that's why it is being
652 	 * copied in the first place).
653 	 */
654 	cn.cn_namelen = strlen(un->un_path);
655 	cn.cn_pnbuf = (caddr_t) malloc(cn.cn_namelen, M_NAMEI, M_WAITOK);
656 	bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
657 	cn.cn_nameiop = CREATE;
658 	cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
659 	cn.cn_proc = p;
660 	cn.cn_cred = p->p_ucred;
661 	cn.cn_nameptr = cn.cn_pnbuf;
662 	cn.cn_hash = un->un_hash;
663 	cn.cn_consume = 0;
664 
665 	VREF(un->un_dirvp);
666 	if (error = relookup(un->un_dirvp, &vp, &cn))
667 		return (error);
668 	vrele(un->un_dirvp);
669 
670 	if (vp) {
671 		VOP_ABORTOP(un->un_dirvp, &cn);
672 		if (un->un_dirvp == vp)
673 			vrele(un->un_dirvp);
674 		else
675 			vput(un->un_dirvp);
676 		vrele(vp);
677 		return (EEXIST);
678 	}
679 
680 	/*
681 	 * Good - there was no race to create the file
682 	 * so go ahead and create it.  The permissions
683 	 * on the file will be 0666 modified by the
684 	 * current user's umask.  Access to the file, while
685 	 * it is unioned, will require access to the top *and*
686 	 * bottom files.  Access when not unioned will simply
687 	 * require access to the top-level file.
688 	 * TODO: confirm choice of access permissions.
689 	 */
690 	VATTR_NULL(vap);
691 	vap->va_type = VREG;
692 	vap->va_mode = cmode;
693 	LEASE_CHECK(un->un_dirvp, p, cred, LEASE_WRITE);
694 	if (error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap))
695 		return (error);
696 
697 	if (error = VOP_OPEN(vp, fmode, cred, p)) {
698 		vput(vp);
699 		return (error);
700 	}
701 
702 	vp->v_writecount++;
703 	*vpp = vp;
704 	return (0);
705 }
706 
707 int
708 union_vn_close(vp, fmode, cred, p)
709 	struct vnode *vp;
710 	int fmode;
711 	struct ucred *cred;
712 	struct proc *p;
713 {
714 	if (fmode & FWRITE)
715 		--vp->v_writecount;
716 	return (VOP_CLOSE(vp, fmode));
717 }
718 
719 void
720 union_removed_upper(un)
721 	struct union_node *un;
722 {
723 	if (un->un_flags & UN_ULOCK) {
724 		un->un_flags &= ~UN_ULOCK;
725 		VOP_UNLOCK(un->un_uppervp);
726 	}
727 
728 	union_newupper(un, NULLVP);
729 }
730 
731 struct vnode *
732 union_lowervp(vp)
733 	struct vnode *vp;
734 {
735 	struct union_node *un = VTOUNION(vp);
736 
737 	if (un->un_lowervp && (vp->v_type == un->un_lowervp->v_type)) {
738 		if (vget(un->un_lowervp, 0))
739 			return (NULLVP);
740 	}
741 
742 	return (un->un_lowervp);
743 }
744