xref: /freebsd/sys/fs/unionfs/union_subr.c (revision 10ff414c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1994 Jan-Simon Pendry
5  * Copyright (c) 1994
6  *	The Regents of the University of California.  All rights reserved.
7  * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc.
8  * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org>
9  *
10  * This code is derived from software contributed to Berkeley by
11  * Jan-Simon Pendry.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_subr.c	8.20 (Berkeley) 5/20/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <sys/dirent.h>
53 #include <sys/fcntl.h>
54 #include <sys/filedesc.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/taskqueue.h>
58 #include <sys/resourcevar.h>
59 
60 #include <security/mac/mac_framework.h>
61 
62 #include <vm/uma.h>
63 
64 #include <fs/unionfs/union.h>
65 
66 #define NUNIONFSNODECACHE 16
67 
68 static MALLOC_DEFINE(M_UNIONFSHASH, "UNIONFS hash", "UNIONFS hash table");
69 MALLOC_DEFINE(M_UNIONFSNODE, "UNIONFS node", "UNIONFS vnode private part");
70 MALLOC_DEFINE(M_UNIONFSPATH, "UNIONFS path", "UNIONFS path private part");
71 
72 static struct task unionfs_deferred_rele_task;
73 static struct mtx unionfs_deferred_rele_lock;
74 static STAILQ_HEAD(, unionfs_node) unionfs_deferred_rele_list =
75     STAILQ_HEAD_INITIALIZER(unionfs_deferred_rele_list);
76 static TASKQUEUE_DEFINE_THREAD(unionfs_rele);
77 
78 unsigned int unionfs_ndeferred = 0;
79 SYSCTL_UINT(_vfs, OID_AUTO, unionfs_ndeferred, CTLFLAG_RD,
80     &unionfs_ndeferred, 0, "unionfs deferred vnode release");
81 
82 static void unionfs_deferred_rele(void *, int);
83 
84 /*
85  * Initialize
86  */
87 int
88 unionfs_init(struct vfsconf *vfsp)
89 {
90 	UNIONFSDEBUG("unionfs_init\n");	/* printed during system boot */
91 	TASK_INIT(&unionfs_deferred_rele_task, 0, unionfs_deferred_rele, NULL);
92 	mtx_init(&unionfs_deferred_rele_lock, "uniondefr", NULL, MTX_DEF);
93 	return (0);
94 }
95 
96 /*
97  * Uninitialize
98  */
99 int
100 unionfs_uninit(struct vfsconf *vfsp)
101 {
102 	taskqueue_quiesce(taskqueue_unionfs_rele);
103 	taskqueue_free(taskqueue_unionfs_rele);
104 	mtx_destroy(&unionfs_deferred_rele_lock);
105 	return (0);
106 }
107 
108 static void
109 unionfs_deferred_rele(void *arg __unused, int pending __unused)
110 {
111 	STAILQ_HEAD(, unionfs_node) local_rele_list;
112 	struct unionfs_node *unp, *tunp;
113 	unsigned int ndeferred;
114 
115 	ndeferred = 0;
116 	STAILQ_INIT(&local_rele_list);
117 	mtx_lock(&unionfs_deferred_rele_lock);
118 	STAILQ_CONCAT(&local_rele_list, &unionfs_deferred_rele_list);
119 	mtx_unlock(&unionfs_deferred_rele_lock);
120 	STAILQ_FOREACH_SAFE(unp, &local_rele_list, un_rele, tunp) {
121 		++ndeferred;
122 		MPASS(unp->un_dvp != NULL);
123 		vrele(unp->un_dvp);
124 		free(unp, M_UNIONFSNODE);
125 	}
126 
127 	/* We expect this function to be single-threaded, thus no atomic */
128 	unionfs_ndeferred += ndeferred;
129 }
130 
131 static struct unionfs_node_hashhead *
132 unionfs_get_hashhead(struct vnode *dvp, char *path)
133 {
134 	struct unionfs_node *unp;
135 	int		count;
136 	char		hash;
137 
138 	hash = 0;
139 	unp = VTOUNIONFS(dvp);
140 	if (path != NULL) {
141 		for (count = 0; path[count]; count++)
142 			hash += path[count];
143 	}
144 
145 	return (&(unp->un_hashtbl[hash & (unp->un_hashmask)]));
146 }
147 
148 /*
149  * Get the cached vnode.
150  */
151 static struct vnode *
152 unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
153     struct vnode *dvp, char *path)
154 {
155 	struct unionfs_node_hashhead *hd;
156 	struct unionfs_node *unp;
157 	struct vnode *vp;
158 
159 	KASSERT((uvp == NULLVP || uvp->v_type == VDIR),
160 	    ("unionfs_get_cached_vnode: v_type != VDIR"));
161 	KASSERT((lvp == NULLVP || lvp->v_type == VDIR),
162 	    ("unionfs_get_cached_vnode: v_type != VDIR"));
163 
164 	VI_LOCK(dvp);
165 	hd = unionfs_get_hashhead(dvp, path);
166 	LIST_FOREACH(unp, hd, un_hash) {
167 		if (!strcmp(unp->un_path, path)) {
168 			vp = UNIONFSTOV(unp);
169 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
170 			VI_UNLOCK(dvp);
171 			vp->v_iflag &= ~VI_OWEINACT;
172 			if (VN_IS_DOOMED(vp) ||
173 			    ((vp->v_iflag & VI_DOINGINACT) != 0)) {
174 				VI_UNLOCK(vp);
175 				vp = NULLVP;
176 			} else
177 				VI_UNLOCK(vp);
178 			return (vp);
179 		}
180 	}
181 	VI_UNLOCK(dvp);
182 
183 	return (NULLVP);
184 }
185 
186 /*
187  * Add the new vnode into cache.
188  */
189 static struct vnode *
190 unionfs_ins_cached_vnode(struct unionfs_node *uncp,
191     struct vnode *dvp, char *path)
192 {
193 	struct unionfs_node_hashhead *hd;
194 	struct unionfs_node *unp;
195 	struct vnode *vp;
196 
197 	KASSERT((uncp->un_uppervp==NULLVP || uncp->un_uppervp->v_type==VDIR),
198 	    ("unionfs_ins_cached_vnode: v_type != VDIR"));
199 	KASSERT((uncp->un_lowervp==NULLVP || uncp->un_lowervp->v_type==VDIR),
200 	    ("unionfs_ins_cached_vnode: v_type != VDIR"));
201 
202 	VI_LOCK(dvp);
203 	hd = unionfs_get_hashhead(dvp, path);
204 	LIST_FOREACH(unp, hd, un_hash) {
205 		if (!strcmp(unp->un_path, path)) {
206 			vp = UNIONFSTOV(unp);
207 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
208 			vp->v_iflag &= ~VI_OWEINACT;
209 			if (VN_IS_DOOMED(vp) ||
210 			    ((vp->v_iflag & VI_DOINGINACT) != 0)) {
211 				LIST_INSERT_HEAD(hd, uncp, un_hash);
212 				VI_UNLOCK(vp);
213 				vp = NULLVP;
214 			} else
215 				VI_UNLOCK(vp);
216 			VI_UNLOCK(dvp);
217 			return (vp);
218 		}
219 	}
220 
221 	LIST_INSERT_HEAD(hd, uncp, un_hash);
222 	VI_UNLOCK(dvp);
223 
224 	return (NULLVP);
225 }
226 
227 /*
228  * Remove the vnode.
229  */
230 static void
231 unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
232 {
233 	KASSERT((unp != NULL), ("unionfs_rem_cached_vnode: null node"));
234 	KASSERT((dvp != NULLVP),
235 	    ("unionfs_rem_cached_vnode: null parent vnode"));
236 	KASSERT((unp->un_hash.le_prev != NULL),
237 	    ("unionfs_rem_cached_vnode: null hash"));
238 
239 	VI_LOCK(dvp);
240 	LIST_REMOVE(unp, un_hash);
241 	unp->un_hash.le_next = NULL;
242 	unp->un_hash.le_prev = NULL;
243 	VI_UNLOCK(dvp);
244 }
245 
246 /*
247  * Common cleanup handling for unionfs_nodeget
248  * Upper, lower, and parent directory vnodes are expected to be referenced by
249  * the caller.  Upper and lower vnodes, if non-NULL, are also expected to be
250  * exclusively locked by the caller.
251  * This function will return with the caller's locks and references undone.
252  */
253 static void
254 unionfs_nodeget_cleanup(struct vnode *vp, void *arg)
255 {
256 	struct unionfs_node *unp;
257 
258 	/*
259 	 * Lock and reset the default vnode lock; vgone() expects a locked
260 	 * vnode, and we're going to reset the vnode ops.
261 	 */
262 	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
263 
264 	/*
265 	 * Clear out private data and reset the vnode ops to avoid use of
266 	 * unionfs vnode ops on a partially constructed vnode.
267 	 */
268 	VI_LOCK(vp);
269 	vp->v_data = NULL;
270 	vp->v_vnlock = &vp->v_lock;
271 	vp->v_op = &dead_vnodeops;
272 	VI_UNLOCK(vp);
273 	vgone(vp);
274 	vput(vp);
275 
276 	unp = arg;
277 	if (unp->un_dvp != NULLVP)
278 		vrele(unp->un_dvp);
279 	if (unp->un_uppervp != NULLVP)
280 		vput(unp->un_uppervp);
281 	if (unp->un_lowervp != NULLVP)
282 		vput(unp->un_lowervp);
283 	if (unp->un_hashtbl != NULL)
284 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
285 	free(unp->un_path, M_UNIONFSPATH);
286 	free(unp, M_UNIONFSNODE);
287 }
288 
289 /*
290  * Make a new or get existing unionfs node.
291  *
292  * uppervp and lowervp should be unlocked. Because if new unionfs vnode is
293  * locked, uppervp or lowervp is locked too. In order to prevent dead lock,
294  * you should not lock plurality simultaneously.
295  */
296 int
297 unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
298     struct vnode *lowervp, struct vnode *dvp, struct vnode **vpp,
299     struct componentname *cnp, struct thread *td)
300 {
301 	char	       *path;
302 	struct unionfs_mount *ump;
303 	struct unionfs_node *unp;
304 	struct vnode   *vp;
305 	int		error;
306 	int		lkflags;
307 	enum vtype	vt;
308 
309 	error = 0;
310 	ump = MOUNTTOUNIONFSMOUNT(mp);
311 	lkflags = (cnp ? cnp->cn_lkflags : 0);
312 	path = (cnp ? cnp->cn_nameptr : NULL);
313 	*vpp = NULLVP;
314 
315 	if (uppervp == NULLVP && lowervp == NULLVP)
316 		panic("unionfs_nodeget: upper and lower is null");
317 
318 	vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);
319 
320 	/* If it has no ISLASTCN flag, path check is skipped. */
321 	if (cnp && !(cnp->cn_flags & ISLASTCN))
322 		path = NULL;
323 
324 	/* check the cache */
325 	if (path != NULL && dvp != NULLVP && vt == VDIR) {
326 		vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp, path);
327 		if (vp != NULLVP) {
328 			vref(vp);
329 			*vpp = vp;
330 			goto unionfs_nodeget_out;
331 		}
332 	}
333 
334 	if ((uppervp == NULLVP || ump->um_uppervp != uppervp) ||
335 	    (lowervp == NULLVP || ump->um_lowervp != lowervp)) {
336 		/* dvp will be NULLVP only in case of root vnode. */
337 		if (dvp == NULLVP)
338 			return (EINVAL);
339 	}
340 	unp = malloc(sizeof(struct unionfs_node),
341 	    M_UNIONFSNODE, M_WAITOK | M_ZERO);
342 
343 	error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp);
344 	if (error != 0) {
345 		free(unp, M_UNIONFSNODE);
346 		return (error);
347 	}
348 	if (dvp != NULLVP)
349 		vref(dvp);
350 	if (uppervp != NULLVP)
351 		vref(uppervp);
352 	if (lowervp != NULLVP)
353 		vref(lowervp);
354 
355 	if (vt == VDIR)
356 		unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH,
357 		    &(unp->un_hashmask));
358 
359 	unp->un_vnode = vp;
360 	unp->un_uppervp = uppervp;
361 	unp->un_lowervp = lowervp;
362 	unp->un_dvp = dvp;
363 	if (uppervp != NULLVP)
364 		vp->v_vnlock = uppervp->v_vnlock;
365 	else
366 		vp->v_vnlock = lowervp->v_vnlock;
367 
368 	if (path != NULL) {
369 		unp->un_path = malloc(cnp->cn_namelen + 1,
370 		    M_UNIONFSPATH, M_WAITOK | M_ZERO);
371 		bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen);
372 		unp->un_path[cnp->cn_namelen] = '\0';
373 		unp->un_pathlen = cnp->cn_namelen;
374 	}
375 	vp->v_type = vt;
376 	vp->v_data = unp;
377 
378 	if ((uppervp != NULLVP && ump->um_uppervp == uppervp) &&
379 	    (lowervp != NULLVP && ump->um_lowervp == lowervp))
380 		vp->v_vflag |= VV_ROOT;
381 
382 	vn_lock_pair(lowervp, false, uppervp, false);
383 	error = insmntque1(vp, mp, unionfs_nodeget_cleanup, unp);
384 	if (error != 0)
385 		return (error);
386 	if (lowervp != NULL && VN_IS_DOOMED(lowervp)) {
387 		vput(lowervp);
388 		unp->un_lowervp = NULL;
389 	}
390 	if (uppervp != NULL && VN_IS_DOOMED(uppervp)) {
391 		vput(uppervp);
392 		unp->un_uppervp = NULL;
393 	}
394 	if (unp->un_lowervp == NULL && unp->un_uppervp == NULL) {
395 		unionfs_nodeget_cleanup(vp, unp);
396 		return (ENOENT);
397 	}
398 	if (path != NULL && dvp != NULLVP && vt == VDIR)
399 		*vpp = unionfs_ins_cached_vnode(unp, dvp, path);
400 	if (*vpp != NULLVP) {
401 		unionfs_nodeget_cleanup(vp, unp);
402 		vp = *vpp;
403 		vref(vp);
404 	} else {
405 		if (uppervp != NULL)
406 			VOP_UNLOCK(uppervp);
407 		if (lowervp != NULL)
408 			VOP_UNLOCK(lowervp);
409 		*vpp = vp;
410 	}
411 
412 unionfs_nodeget_out:
413 	if (lkflags & LK_TYPE_MASK)
414 		vn_lock(vp, lkflags | LK_RETRY);
415 
416 	return (0);
417 }
418 
419 /*
420  * Clean up the unionfs node.
421  */
422 void
423 unionfs_noderem(struct vnode *vp, struct thread *td)
424 {
425 	struct unionfs_node *unp, *unp_t1, *unp_t2;
426 	struct unionfs_node_hashhead *hd;
427 	struct unionfs_node_status *unsp, *unsp_tmp;
428 	struct vnode   *lvp;
429 	struct vnode   *uvp;
430 	struct vnode   *dvp;
431 	int		count;
432 
433 	/*
434 	 * Use the interlock to protect the clearing of v_data to
435 	 * prevent faults in unionfs_lock().
436 	 */
437 	VI_LOCK(vp);
438 	unp = VTOUNIONFS(vp);
439 	lvp = unp->un_lowervp;
440 	uvp = unp->un_uppervp;
441 	dvp = unp->un_dvp;
442 	unp->un_lowervp = unp->un_uppervp = NULLVP;
443 	vp->v_vnlock = &(vp->v_lock);
444 	vp->v_data = NULL;
445 	vp->v_object = NULL;
446 	if (vp->v_writecount > 0) {
447 		if (uvp != NULL)
448 			VOP_ADD_WRITECOUNT(uvp, -vp->v_writecount);
449 		else if (lvp != NULL)
450 			VOP_ADD_WRITECOUNT(lvp, -vp->v_writecount);
451 	} else if (vp->v_writecount < 0)
452 		vp->v_writecount = 0;
453 	VI_UNLOCK(vp);
454 
455 	if (lvp != NULLVP)
456 		VOP_UNLOCK(lvp);
457 	if (uvp != NULLVP)
458 		VOP_UNLOCK(uvp);
459 
460 	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
461 		unionfs_rem_cached_vnode(unp, dvp);
462 
463 	if (lockmgr(vp->v_vnlock, LK_EXCLUSIVE, VI_MTX(vp)) != 0)
464 		panic("the lock for deletion is unacquirable.");
465 
466 	if (lvp != NULLVP)
467 		vrele(lvp);
468 	if (uvp != NULLVP)
469 		vrele(uvp);
470 	if (unp->un_path != NULL) {
471 		free(unp->un_path, M_UNIONFSPATH);
472 		unp->un_path = NULL;
473 		unp->un_pathlen = 0;
474 	}
475 
476 	if (unp->un_hashtbl != NULL) {
477 		for (count = 0; count <= unp->un_hashmask; count++) {
478 			hd = unp->un_hashtbl + count;
479 			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
480 				LIST_REMOVE(unp_t1, un_hash);
481 				unp_t1->un_hash.le_next = NULL;
482 				unp_t1->un_hash.le_prev = NULL;
483 			}
484 		}
485 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
486 	}
487 
488 	LIST_FOREACH_SAFE(unsp, &(unp->un_unshead), uns_list, unsp_tmp) {
489 		LIST_REMOVE(unsp, uns_list);
490 		free(unsp, M_TEMP);
491 	}
492 	if (dvp != NULLVP) {
493 		mtx_lock(&unionfs_deferred_rele_lock);
494 		STAILQ_INSERT_TAIL(&unionfs_deferred_rele_list, unp, un_rele);
495 		mtx_unlock(&unionfs_deferred_rele_lock);
496 		taskqueue_enqueue(taskqueue_unionfs_rele,
497 		    &unionfs_deferred_rele_task);
498 	} else
499 		free(unp, M_UNIONFSNODE);
500 }
501 
502 /*
503  * Get the unionfs node status.
504  * You need exclusive lock this vnode.
505  */
506 void
507 unionfs_get_node_status(struct unionfs_node *unp, struct thread *td,
508     struct unionfs_node_status **unspp)
509 {
510 	struct unionfs_node_status *unsp;
511 	pid_t pid;
512 
513 	pid = td->td_proc->p_pid;
514 
515 	KASSERT(NULL != unspp, ("null pointer"));
516 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status");
517 
518 	LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) {
519 		if (unsp->uns_pid == pid) {
520 			*unspp = unsp;
521 			return;
522 		}
523 	}
524 
525 	/* create a new unionfs node status */
526 	unsp = malloc(sizeof(struct unionfs_node_status),
527 	    M_TEMP, M_WAITOK | M_ZERO);
528 
529 	unsp->uns_pid = pid;
530 	LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list);
531 
532 	*unspp = unsp;
533 }
534 
535 /*
536  * Remove the unionfs node status, if you can.
537  * You need exclusive lock this vnode.
538  */
539 void
540 unionfs_tryrem_node_status(struct unionfs_node *unp,
541     struct unionfs_node_status *unsp)
542 {
543 	KASSERT(NULL != unsp, ("null pointer"));
544 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status");
545 
546 	if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt)
547 		return;
548 
549 	LIST_REMOVE(unsp, uns_list);
550 	free(unsp, M_TEMP);
551 }
552 
553 /*
554  * Create upper node attr.
555  */
556 void
557 unionfs_create_uppervattr_core(struct unionfs_mount *ump, struct vattr *lva,
558     struct vattr *uva, struct thread *td)
559 {
560 	VATTR_NULL(uva);
561 	uva->va_type = lva->va_type;
562 	uva->va_atime = lva->va_atime;
563 	uva->va_mtime = lva->va_mtime;
564 	uva->va_ctime = lva->va_ctime;
565 
566 	switch (ump->um_copymode) {
567 	case UNIONFS_TRANSPARENT:
568 		uva->va_mode = lva->va_mode;
569 		uva->va_uid = lva->va_uid;
570 		uva->va_gid = lva->va_gid;
571 		break;
572 	case UNIONFS_MASQUERADE:
573 		if (ump->um_uid == lva->va_uid) {
574 			uva->va_mode = lva->va_mode & 077077;
575 			uva->va_mode |= (lva->va_type == VDIR ?
576 			    ump->um_udir : ump->um_ufile) & 0700;
577 			uva->va_uid = lva->va_uid;
578 			uva->va_gid = lva->va_gid;
579 		} else {
580 			uva->va_mode = (lva->va_type == VDIR ?
581 			    ump->um_udir : ump->um_ufile);
582 			uva->va_uid = ump->um_uid;
583 			uva->va_gid = ump->um_gid;
584 		}
585 		break;
586 	default:		/* UNIONFS_TRADITIONAL */
587 		uva->va_mode = 0777 & ~td->td_proc->p_pd->pd_cmask;
588 		uva->va_uid = ump->um_uid;
589 		uva->va_gid = ump->um_gid;
590 		break;
591 	}
592 }
593 
594 /*
595  * Create upper node attr.
596  */
597 int
598 unionfs_create_uppervattr(struct unionfs_mount *ump, struct vnode *lvp,
599     struct vattr *uva, struct ucred *cred, struct thread *td)
600 {
601 	struct vattr	lva;
602 	int		error;
603 
604 	if ((error = VOP_GETATTR(lvp, &lva, cred)))
605 		return (error);
606 
607 	unionfs_create_uppervattr_core(ump, &lva, uva, td);
608 
609 	return (error);
610 }
611 
612 /*
613  * relookup
614  *
615  * dvp should be locked on entry and will be locked on return.
616  *
617  * If an error is returned, *vpp will be invalid, otherwise it will hold a
618  * locked, referenced vnode. If *vpp == dvp then remember that only one
619  * LK_EXCLUSIVE lock is held.
620  */
621 int
622 unionfs_relookup(struct vnode *dvp, struct vnode **vpp,
623     struct componentname *cnp, struct componentname *cn, struct thread *td,
624     char *path, int pathlen, u_long nameiop)
625 {
626 	int error;
627 
628 	cn->cn_namelen = pathlen;
629 	cn->cn_pnbuf = path;
630 	cn->cn_nameiop = nameiop;
631 	cn->cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN);
632 	cn->cn_lkflags = LK_EXCLUSIVE;
633 	cn->cn_thread = td;
634 	cn->cn_cred = cnp->cn_cred;
635 	cn->cn_nameptr = cn->cn_pnbuf;
636 
637 	if (nameiop == DELETE)
638 		cn->cn_flags |= (cnp->cn_flags & (DOWHITEOUT | SAVESTART));
639 	else if (RENAME == nameiop)
640 		cn->cn_flags |= (cnp->cn_flags & SAVESTART);
641 	else if (nameiop == CREATE)
642 		cn->cn_flags |= NOCACHE;
643 
644 	vref(dvp);
645 	VOP_UNLOCK(dvp);
646 
647 	if ((error = relookup(dvp, vpp, cn))) {
648 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
649 	} else
650 		vrele(dvp);
651 
652 	KASSERT((cn->cn_flags & HASBUF) != 0,
653 	    ("%s: HASBUF cleared", __func__));
654 	KASSERT((cn->cn_flags & SAVENAME) != 0,
655 	    ("%s: SAVENAME cleared", __func__));
656 	KASSERT(cn->cn_pnbuf == path, ("%s: cn_pnbuf changed", __func__));
657 
658 	return (error);
659 }
660 
661 /*
662  * relookup for CREATE namei operation.
663  *
664  * dvp is unionfs vnode. dvp should be locked.
665  *
666  * If it called 'unionfs_copyfile' function by unionfs_link etc,
667  * VOP_LOOKUP information is broken.
668  * So it need relookup in order to create link etc.
669  */
670 int
671 unionfs_relookup_for_create(struct vnode *dvp, struct componentname *cnp,
672     struct thread *td)
673 {
674 	struct vnode *udvp;
675 	struct vnode *vp;
676 	struct componentname cn;
677 	int error;
678 
679 	udvp = UNIONFSVPTOUPPERVP(dvp);
680 	vp = NULLVP;
681 
682 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
683 	    cnp->cn_namelen, CREATE);
684 	if (error)
685 		return (error);
686 
687 	if (vp != NULLVP) {
688 		if (udvp == vp)
689 			vrele(vp);
690 		else
691 			vput(vp);
692 
693 		error = EEXIST;
694 	}
695 
696 	return (error);
697 }
698 
699 /*
700  * relookup for DELETE namei operation.
701  *
702  * dvp is unionfs vnode. dvp should be locked.
703  */
704 int
705 unionfs_relookup_for_delete(struct vnode *dvp, struct componentname *cnp,
706     struct thread *td)
707 {
708 	struct vnode *udvp;
709 	struct vnode *vp;
710 	struct componentname cn;
711 	int error;
712 
713 	udvp = UNIONFSVPTOUPPERVP(dvp);
714 	vp = NULLVP;
715 
716 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
717 	    cnp->cn_namelen, DELETE);
718 	if (error)
719 		return (error);
720 
721 	if (vp == NULLVP)
722 		error = ENOENT;
723 	else {
724 		if (udvp == vp)
725 			vrele(vp);
726 		else
727 			vput(vp);
728 	}
729 
730 	return (error);
731 }
732 
733 /*
734  * relookup for RENAME namei operation.
735  *
736  * dvp is unionfs vnode. dvp should be locked.
737  */
738 int
739 unionfs_relookup_for_rename(struct vnode *dvp, struct componentname *cnp,
740     struct thread *td)
741 {
742 	struct vnode *udvp;
743 	struct vnode *vp;
744 	struct componentname cn;
745 	int error;
746 
747 	udvp = UNIONFSVPTOUPPERVP(dvp);
748 	vp = NULLVP;
749 
750 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
751 	    cnp->cn_namelen, RENAME);
752 	if (error)
753 		return (error);
754 
755 	if (vp != NULLVP) {
756 		if (udvp == vp)
757 			vrele(vp);
758 		else
759 			vput(vp);
760 	}
761 
762 	return (error);
763 }
764 
765 /*
766  * Update the unionfs_node.
767  *
768  * uvp is new locked upper vnode. unionfs vnode's lock will be exchanged to the
769  * uvp's lock and lower's lock will be unlocked.
770  */
771 static void
772 unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
773     struct thread *td)
774 {
775 	struct vnode   *vp;
776 	struct vnode   *lvp;
777 	struct vnode   *dvp;
778 	unsigned	count, lockrec;
779 
780 	vp = UNIONFSTOV(unp);
781 	lvp = unp->un_lowervp;
782 	ASSERT_VOP_ELOCKED(lvp, "unionfs_node_update");
783 	dvp = unp->un_dvp;
784 
785 	/*
786 	 * lock update
787 	 */
788 	VI_LOCK(vp);
789 	unp->un_uppervp = uvp;
790 	vp->v_vnlock = uvp->v_vnlock;
791 	VI_UNLOCK(vp);
792 	lockrec = lvp->v_vnlock->lk_recurse;
793 	for (count = 0; count < lockrec; count++)
794 		vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
795 
796 	/*
797 	 * cache update
798 	 */
799 	if (unp->un_path != NULL && dvp != NULLVP && vp->v_type == VDIR) {
800 		static struct unionfs_node_hashhead *hd;
801 
802 		VI_LOCK(dvp);
803 		hd = unionfs_get_hashhead(dvp, unp->un_path);
804 		LIST_REMOVE(unp, un_hash);
805 		LIST_INSERT_HEAD(hd, unp, un_hash);
806 		VI_UNLOCK(dvp);
807 	}
808 }
809 
810 /*
811  * Create a new shadow dir.
812  *
813  * udvp should be locked on entry and will be locked on return.
814  *
815  * If no error returned, unp will be updated.
816  */
817 int
818 unionfs_mkshadowdir(struct unionfs_mount *ump, struct vnode *udvp,
819     struct unionfs_node *unp, struct componentname *cnp, struct thread *td)
820 {
821 	struct vnode   *lvp;
822 	struct vnode   *uvp;
823 	struct vattr	va;
824 	struct vattr	lva;
825 	struct nameidata nd;
826 	struct mount   *mp;
827 	struct ucred   *cred;
828 	struct ucred   *credbk;
829 	struct uidinfo *rootinfo;
830 	int		error;
831 
832 	if (unp->un_uppervp != NULLVP)
833 		return (EEXIST);
834 
835 	lvp = unp->un_lowervp;
836 	uvp = NULLVP;
837 	credbk = cnp->cn_cred;
838 
839 	/* Authority change to root */
840 	rootinfo = uifind((uid_t)0);
841 	cred = crdup(cnp->cn_cred);
842 	/*
843 	 * The calls to chgproccnt() are needed to compensate for change_ruid()
844 	 * calling chgproccnt().
845 	 */
846 	chgproccnt(cred->cr_ruidinfo, 1, 0);
847 	change_euid(cred, rootinfo);
848 	change_ruid(cred, rootinfo);
849 	change_svuid(cred, (uid_t)0);
850 	uifree(rootinfo);
851 	cnp->cn_cred = cred;
852 
853 	memset(&nd.ni_cnd, 0, sizeof(struct componentname));
854 	NDPREINIT(&nd);
855 
856 	if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred)))
857 		goto unionfs_mkshadowdir_abort;
858 
859 	if ((error = unionfs_relookup(udvp, &uvp, cnp, &nd.ni_cnd, td,
860 	    cnp->cn_nameptr, cnp->cn_namelen, CREATE)))
861 		goto unionfs_mkshadowdir_abort;
862 	if (uvp != NULLVP) {
863 		if (udvp == uvp)
864 			vrele(uvp);
865 		else
866 			vput(uvp);
867 
868 		error = EEXIST;
869 		goto unionfs_mkshadowdir_abort;
870 	}
871 
872 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)))
873 		goto unionfs_mkshadowdir_abort;
874 	unionfs_create_uppervattr_core(ump, &lva, &va, td);
875 
876 	error = VOP_MKDIR(udvp, &uvp, &nd.ni_cnd, &va);
877 
878 	if (!error) {
879 		unionfs_node_update(unp, uvp, td);
880 
881 		/*
882 		 * XXX The bug which cannot set uid/gid was corrected.
883 		 * Ignore errors.
884 		 */
885 		va.va_type = VNON;
886 		VOP_SETATTR(uvp, &va, nd.ni_cnd.cn_cred);
887 	}
888 	vn_finished_write(mp);
889 
890 unionfs_mkshadowdir_abort:
891 	cnp->cn_cred = credbk;
892 	chgproccnt(cred->cr_ruidinfo, -1, 0);
893 	crfree(cred);
894 
895 	return (error);
896 }
897 
898 /*
899  * Create a new whiteout.
900  *
901  * dvp should be locked on entry and will be locked on return.
902  */
903 int
904 unionfs_mkwhiteout(struct vnode *dvp, struct componentname *cnp,
905     struct thread *td, char *path, int pathlen)
906 {
907 	struct vnode   *wvp;
908 	struct nameidata nd;
909 	struct mount   *mp;
910 	int		error;
911 
912 	wvp = NULLVP;
913 	NDPREINIT(&nd);
914 	if ((error = unionfs_relookup(dvp, &wvp, cnp, &nd.ni_cnd, td, path,
915 	    pathlen, CREATE))) {
916 		return (error);
917 	}
918 	if (wvp != NULLVP) {
919 		if (dvp == wvp)
920 			vrele(wvp);
921 		else
922 			vput(wvp);
923 
924 		return (EEXIST);
925 	}
926 
927 	if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)))
928 		goto unionfs_mkwhiteout_free_out;
929 	error = VOP_WHITEOUT(dvp, &nd.ni_cnd, CREATE);
930 
931 	vn_finished_write(mp);
932 
933 unionfs_mkwhiteout_free_out:
934 	return (error);
935 }
936 
937 /*
938  * Create a new vnode for create a new shadow file.
939  *
940  * If an error is returned, *vpp will be invalid, otherwise it will hold a
941  * locked, referenced and opened vnode.
942  *
943  * unp is never updated.
944  */
945 static int
946 unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp,
947     struct unionfs_node *unp, struct vattr *uvap, struct thread *td)
948 {
949 	struct unionfs_mount *ump;
950 	struct vnode   *vp;
951 	struct vnode   *lvp;
952 	struct ucred   *cred;
953 	struct vattr	lva;
954 	struct nameidata nd;
955 	int		fmode;
956 	int		error;
957 
958 	ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount);
959 	vp = NULLVP;
960 	lvp = unp->un_lowervp;
961 	cred = td->td_ucred;
962 	fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL);
963 	error = 0;
964 
965 	if ((error = VOP_GETATTR(lvp, &lva, cred)) != 0)
966 		return (error);
967 	unionfs_create_uppervattr_core(ump, &lva, uvap, td);
968 
969 	if (unp->un_path == NULL)
970 		panic("unionfs: un_path is null");
971 
972 	nd.ni_cnd.cn_namelen = unp->un_pathlen;
973 	nd.ni_cnd.cn_pnbuf = unp->un_path;
974 	nd.ni_cnd.cn_nameiop = CREATE;
975 	nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME |
976 	    ISLASTCN;
977 	nd.ni_cnd.cn_lkflags = LK_EXCLUSIVE;
978 	nd.ni_cnd.cn_thread = td;
979 	nd.ni_cnd.cn_cred = cred;
980 	nd.ni_cnd.cn_nameptr = nd.ni_cnd.cn_pnbuf;
981 	NDPREINIT(&nd);
982 
983 	vref(udvp);
984 	if ((error = relookup(udvp, &vp, &nd.ni_cnd)) != 0)
985 		goto unionfs_vn_create_on_upper_free_out2;
986 	vrele(udvp);
987 
988 	if (vp != NULLVP) {
989 		if (vp == udvp)
990 			vrele(vp);
991 		else
992 			vput(vp);
993 		error = EEXIST;
994 		goto unionfs_vn_create_on_upper_free_out1;
995 	}
996 
997 	if ((error = VOP_CREATE(udvp, &vp, &nd.ni_cnd, uvap)) != 0)
998 		goto unionfs_vn_create_on_upper_free_out1;
999 
1000 	if ((error = VOP_OPEN(vp, fmode, cred, td, NULL)) != 0) {
1001 		vput(vp);
1002 		goto unionfs_vn_create_on_upper_free_out1;
1003 	}
1004 	error = VOP_ADD_WRITECOUNT(vp, 1);
1005 	CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
1006 	    __func__, vp, vp->v_writecount);
1007 	if (error == 0) {
1008 		*vpp = vp;
1009 	} else {
1010 		VOP_CLOSE(vp, fmode, cred, td);
1011 	}
1012 
1013 unionfs_vn_create_on_upper_free_out1:
1014 	VOP_UNLOCK(udvp);
1015 
1016 unionfs_vn_create_on_upper_free_out2:
1017 	KASSERT((nd.ni_cnd.cn_flags & HASBUF) != 0,
1018 	    ("%s: HASBUF cleared", __func__));
1019 	KASSERT((nd.ni_cnd.cn_flags & SAVENAME) != 0,
1020 	    ("%s: SAVENAME cleared", __func__));
1021 	KASSERT(nd.ni_cnd.cn_pnbuf == unp->un_path,
1022 	    ("%s: cn_pnbuf changed", __func__));
1023 
1024 	return (error);
1025 }
1026 
1027 /*
1028  * Copy from lvp to uvp.
1029  *
1030  * lvp and uvp should be locked and opened on entry and will be locked and
1031  * opened on return.
1032  */
1033 static int
1034 unionfs_copyfile_core(struct vnode *lvp, struct vnode *uvp,
1035     struct ucred *cred, struct thread *td)
1036 {
1037 	char           *buf;
1038 	struct uio	uio;
1039 	struct iovec	iov;
1040 	off_t		offset;
1041 	int		count;
1042 	int		error;
1043 	int		bufoffset;
1044 
1045 	error = 0;
1046 	memset(&uio, 0, sizeof(uio));
1047 
1048 	uio.uio_td = td;
1049 	uio.uio_segflg = UIO_SYSSPACE;
1050 	uio.uio_offset = 0;
1051 
1052 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
1053 
1054 	while (error == 0) {
1055 		offset = uio.uio_offset;
1056 
1057 		uio.uio_iov = &iov;
1058 		uio.uio_iovcnt = 1;
1059 		iov.iov_base = buf;
1060 		iov.iov_len = MAXBSIZE;
1061 		uio.uio_resid = iov.iov_len;
1062 		uio.uio_rw = UIO_READ;
1063 
1064 		if ((error = VOP_READ(lvp, &uio, 0, cred)) != 0)
1065 			break;
1066 		if ((count = MAXBSIZE - uio.uio_resid) == 0)
1067 			break;
1068 
1069 		bufoffset = 0;
1070 		while (bufoffset < count) {
1071 			uio.uio_iov = &iov;
1072 			uio.uio_iovcnt = 1;
1073 			iov.iov_base = buf + bufoffset;
1074 			iov.iov_len = count - bufoffset;
1075 			uio.uio_offset = offset + bufoffset;
1076 			uio.uio_resid = iov.iov_len;
1077 			uio.uio_rw = UIO_WRITE;
1078 
1079 			if ((error = VOP_WRITE(uvp, &uio, 0, cred)) != 0)
1080 				break;
1081 
1082 			bufoffset += (count - bufoffset) - uio.uio_resid;
1083 		}
1084 
1085 		uio.uio_offset = offset + bufoffset;
1086 	}
1087 
1088 	free(buf, M_TEMP);
1089 
1090 	return (error);
1091 }
1092 
1093 /*
1094  * Copy file from lower to upper.
1095  *
1096  * If you need copy of the contents, set 1 to docopy. Otherwise, set 0 to
1097  * docopy.
1098  *
1099  * If no error returned, unp will be updated.
1100  */
1101 int
1102 unionfs_copyfile(struct unionfs_node *unp, int docopy, struct ucred *cred,
1103     struct thread *td)
1104 {
1105 	struct mount   *mp;
1106 	struct vnode   *udvp;
1107 	struct vnode   *lvp;
1108 	struct vnode   *uvp;
1109 	struct vattr	uva;
1110 	int		error;
1111 
1112 	lvp = unp->un_lowervp;
1113 	uvp = NULLVP;
1114 
1115 	if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY))
1116 		return (EROFS);
1117 	if (unp->un_dvp == NULLVP)
1118 		return (EINVAL);
1119 	if (unp->un_uppervp != NULLVP)
1120 		return (EEXIST);
1121 	udvp = VTOUNIONFS(unp->un_dvp)->un_uppervp;
1122 	if (udvp == NULLVP)
1123 		return (EROFS);
1124 	if ((udvp->v_mount->mnt_flag & MNT_RDONLY))
1125 		return (EROFS);
1126 
1127 	error = VOP_ACCESS(lvp, VREAD, cred, td);
1128 	if (error != 0)
1129 		return (error);
1130 
1131 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)) != 0)
1132 		return (error);
1133 	error = unionfs_vn_create_on_upper(&uvp, udvp, unp, &uva, td);
1134 	if (error != 0) {
1135 		vn_finished_write(mp);
1136 		return (error);
1137 	}
1138 
1139 	if (docopy != 0) {
1140 		error = VOP_OPEN(lvp, FREAD, cred, td, NULL);
1141 		if (error == 0) {
1142 			error = unionfs_copyfile_core(lvp, uvp, cred, td);
1143 			VOP_CLOSE(lvp, FREAD, cred, td);
1144 		}
1145 	}
1146 	VOP_CLOSE(uvp, FWRITE, cred, td);
1147 	VOP_ADD_WRITECOUNT_CHECKED(uvp, -1);
1148 	CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
1149 	    __func__, uvp, uvp->v_writecount);
1150 
1151 	vn_finished_write(mp);
1152 
1153 	if (error == 0) {
1154 		/* Reset the attributes. Ignore errors. */
1155 		uva.va_type = VNON;
1156 		VOP_SETATTR(uvp, &uva, cred);
1157 	}
1158 
1159 	unionfs_node_update(unp, uvp, td);
1160 
1161 	return (error);
1162 }
1163 
1164 /*
1165  * It checks whether vp can rmdir. (check empty)
1166  *
1167  * vp is unionfs vnode.
1168  * vp should be locked.
1169  */
1170 int
1171 unionfs_check_rmdir(struct vnode *vp, struct ucred *cred, struct thread *td)
1172 {
1173 	struct vnode   *uvp;
1174 	struct vnode   *lvp;
1175 	struct vnode   *tvp;
1176 	struct dirent  *dp;
1177 	struct dirent  *edp;
1178 	struct componentname cn;
1179 	struct iovec	iov;
1180 	struct uio	uio;
1181 	struct vattr	va;
1182 	int		error;
1183 	int		eofflag;
1184 	int		lookuperr;
1185 
1186 	/*
1187 	 * The size of buf needs to be larger than DIRBLKSIZ.
1188 	 */
1189 	char		buf[256 * 6];
1190 
1191 	ASSERT_VOP_ELOCKED(vp, "unionfs_check_rmdir");
1192 
1193 	eofflag = 0;
1194 	uvp = UNIONFSVPTOUPPERVP(vp);
1195 	lvp = UNIONFSVPTOLOWERVP(vp);
1196 
1197 	/* check opaque */
1198 	if ((error = VOP_GETATTR(uvp, &va, cred)) != 0)
1199 		return (error);
1200 	if (va.va_flags & OPAQUE)
1201 		return (0);
1202 
1203 	/* open vnode */
1204 #ifdef MAC
1205 	if ((error = mac_vnode_check_open(cred, vp, VEXEC|VREAD)) != 0)
1206 		return (error);
1207 #endif
1208 	if ((error = VOP_ACCESS(vp, VEXEC|VREAD, cred, td)) != 0)
1209 		return (error);
1210 	if ((error = VOP_OPEN(vp, FREAD, cred, td, NULL)) != 0)
1211 		return (error);
1212 
1213 	uio.uio_rw = UIO_READ;
1214 	uio.uio_segflg = UIO_SYSSPACE;
1215 	uio.uio_td = td;
1216 	uio.uio_offset = 0;
1217 
1218 #ifdef MAC
1219 	error = mac_vnode_check_readdir(td->td_ucred, lvp);
1220 #endif
1221 	while (!error && !eofflag) {
1222 		iov.iov_base = buf;
1223 		iov.iov_len = sizeof(buf);
1224 		uio.uio_iov = &iov;
1225 		uio.uio_iovcnt = 1;
1226 		uio.uio_resid = iov.iov_len;
1227 
1228 		error = VOP_READDIR(lvp, &uio, cred, &eofflag, NULL, NULL);
1229 		if (error != 0)
1230 			break;
1231 		if (eofflag == 0 && uio.uio_resid == sizeof(buf)) {
1232 #ifdef DIAGNOSTIC
1233 			panic("bad readdir response from lower FS.");
1234 #endif
1235 			break;
1236 		}
1237 
1238 		edp = (struct dirent*)&buf[sizeof(buf) - uio.uio_resid];
1239 		for (dp = (struct dirent*)buf; !error && dp < edp;
1240 		     dp = (struct dirent*)((caddr_t)dp + dp->d_reclen)) {
1241 			if (dp->d_type == DT_WHT || dp->d_fileno == 0 ||
1242 			    (dp->d_namlen == 1 && dp->d_name[0] == '.') ||
1243 			    (dp->d_namlen == 2 && !bcmp(dp->d_name, "..", 2)))
1244 				continue;
1245 
1246 			cn.cn_namelen = dp->d_namlen;
1247 			cn.cn_pnbuf = NULL;
1248 			cn.cn_nameptr = dp->d_name;
1249 			cn.cn_nameiop = LOOKUP;
1250 			cn.cn_flags = LOCKPARENT | LOCKLEAF | SAVENAME |
1251 			    RDONLY | ISLASTCN;
1252 			cn.cn_lkflags = LK_EXCLUSIVE;
1253 			cn.cn_thread = td;
1254 			cn.cn_cred = cred;
1255 
1256 			/*
1257 			 * check entry in lower.
1258 			 * Sometimes, readdir function returns
1259 			 * wrong entry.
1260 			 */
1261 			lookuperr = VOP_LOOKUP(lvp, &tvp, &cn);
1262 
1263 			if (!lookuperr)
1264 				vput(tvp);
1265 			else
1266 				continue; /* skip entry */
1267 
1268 			/*
1269 			 * check entry
1270 			 * If it has no exist/whiteout entry in upper,
1271 			 * directory is not empty.
1272 			 */
1273 			cn.cn_flags = LOCKPARENT | LOCKLEAF | SAVENAME |
1274 			    RDONLY | ISLASTCN;
1275 			lookuperr = VOP_LOOKUP(uvp, &tvp, &cn);
1276 
1277 			if (!lookuperr)
1278 				vput(tvp);
1279 
1280 			/* ignore exist or whiteout entry */
1281 			if (!lookuperr ||
1282 			    (lookuperr == ENOENT && (cn.cn_flags & ISWHITEOUT)))
1283 				continue;
1284 
1285 			error = ENOTEMPTY;
1286 		}
1287 	}
1288 
1289 	/* close vnode */
1290 	VOP_CLOSE(vp, FREAD, cred, td);
1291 
1292 	return (error);
1293 }
1294 
1295 #ifdef DIAGNOSTIC
1296 
1297 struct vnode   *
1298 unionfs_checkuppervp(struct vnode *vp, char *fil, int lno)
1299 {
1300 	struct unionfs_node *unp;
1301 
1302 	unp = VTOUNIONFS(vp);
1303 
1304 #ifdef notyet
1305 	if (vp->v_op != unionfs_vnodeop_p) {
1306 		printf("unionfs_checkuppervp: on non-unionfs-node.\n");
1307 #ifdef KDB
1308 		kdb_enter(KDB_WHY_UNIONFS,
1309 		    "unionfs_checkuppervp: on non-unionfs-node.\n");
1310 #endif
1311 		panic("unionfs_checkuppervp");
1312 	}
1313 #endif
1314 	return (unp->un_uppervp);
1315 }
1316 
1317 struct vnode   *
1318 unionfs_checklowervp(struct vnode *vp, char *fil, int lno)
1319 {
1320 	struct unionfs_node *unp;
1321 
1322 	unp = VTOUNIONFS(vp);
1323 
1324 #ifdef notyet
1325 	if (vp->v_op != unionfs_vnodeop_p) {
1326 		printf("unionfs_checklowervp: on non-unionfs-node.\n");
1327 #ifdef KDB
1328 		kdb_enter(KDB_WHY_UNIONFS,
1329 		    "unionfs_checklowervp: on non-unionfs-node.\n");
1330 #endif
1331 		panic("unionfs_checklowervp");
1332 	}
1333 #endif
1334 	return (unp->un_lowervp);
1335 }
1336 #endif
1337