xref: /freebsd/sys/fs/unionfs/union_subr.c (revision c03c5b1c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1994 Jan-Simon Pendry
5  * Copyright (c) 1994
6  *	The Regents of the University of California.  All rights reserved.
7  * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc.
8  * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org>
9  *
10  * This code is derived from software contributed to Berkeley by
11  * Jan-Simon Pendry.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)union_subr.c	8.20 (Berkeley) 5/20/95
38  * $FreeBSD$
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <sys/dirent.h>
53 #include <sys/fcntl.h>
54 #include <sys/filedesc.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/taskqueue.h>
58 #include <sys/resourcevar.h>
59 
60 #include <machine/atomic.h>
61 
62 #include <security/mac/mac_framework.h>
63 
64 #include <vm/uma.h>
65 
66 #include <fs/unionfs/union.h>
67 
68 #define NUNIONFSNODECACHE 16
69 #define UNIONFSHASHMASK (NUNIONFSNODECACHE - 1)
70 
71 static MALLOC_DEFINE(M_UNIONFSHASH, "UNIONFS hash", "UNIONFS hash table");
72 MALLOC_DEFINE(M_UNIONFSNODE, "UNIONFS node", "UNIONFS vnode private part");
73 MALLOC_DEFINE(M_UNIONFSPATH, "UNIONFS path", "UNIONFS path private part");
74 
75 static struct task unionfs_deferred_rele_task;
76 static struct mtx unionfs_deferred_rele_lock;
77 static STAILQ_HEAD(, unionfs_node) unionfs_deferred_rele_list =
78     STAILQ_HEAD_INITIALIZER(unionfs_deferred_rele_list);
79 static TASKQUEUE_DEFINE_THREAD(unionfs_rele);
80 
81 unsigned int unionfs_ndeferred = 0;
82 SYSCTL_UINT(_vfs, OID_AUTO, unionfs_ndeferred, CTLFLAG_RD,
83     &unionfs_ndeferred, 0, "unionfs deferred vnode release");
84 
85 static void unionfs_deferred_rele(void *, int);
86 
87 /*
88  * Initialize
89  */
90 int
91 unionfs_init(struct vfsconf *vfsp)
92 {
93 	UNIONFSDEBUG("unionfs_init\n");	/* printed during system boot */
94 	TASK_INIT(&unionfs_deferred_rele_task, 0, unionfs_deferred_rele, NULL);
95 	mtx_init(&unionfs_deferred_rele_lock, "uniondefr", NULL, MTX_DEF);
96 	return (0);
97 }
98 
99 /*
100  * Uninitialize
101  */
102 int
103 unionfs_uninit(struct vfsconf *vfsp)
104 {
105 	taskqueue_quiesce(taskqueue_unionfs_rele);
106 	taskqueue_free(taskqueue_unionfs_rele);
107 	mtx_destroy(&unionfs_deferred_rele_lock);
108 	return (0);
109 }
110 
111 static void
112 unionfs_deferred_rele(void *arg __unused, int pending __unused)
113 {
114 	STAILQ_HEAD(, unionfs_node) local_rele_list;
115 	struct unionfs_node *unp, *tunp;
116 	unsigned int ndeferred;
117 
118 	ndeferred = 0;
119 	STAILQ_INIT(&local_rele_list);
120 	mtx_lock(&unionfs_deferred_rele_lock);
121 	STAILQ_CONCAT(&local_rele_list, &unionfs_deferred_rele_list);
122 	mtx_unlock(&unionfs_deferred_rele_lock);
123 	STAILQ_FOREACH_SAFE(unp, &local_rele_list, un_rele, tunp) {
124 		++ndeferred;
125 		MPASS(unp->un_dvp != NULL);
126 		vrele(unp->un_dvp);
127 		free(unp, M_UNIONFSNODE);
128 	}
129 
130 	/* We expect this function to be single-threaded, thus no atomic */
131 	unionfs_ndeferred += ndeferred;
132 }
133 
134 static struct unionfs_node_hashhead *
135 unionfs_get_hashhead(struct vnode *dvp, struct vnode *lookup)
136 {
137 	struct unionfs_node *unp;
138 
139 	unp = VTOUNIONFS(dvp);
140 
141 	return (&(unp->un_hashtbl[vfs_hash_index(lookup) & UNIONFSHASHMASK]));
142 }
143 
144 /*
145  * Attempt to lookup a cached unionfs vnode by upper/lower vp
146  * from dvp, with dvp's interlock held.
147  */
148 static struct vnode *
149 unionfs_get_cached_vnode_locked(struct vnode *lookup, struct vnode *dvp)
150 {
151 	struct unionfs_node *unp;
152 	struct unionfs_node_hashhead *hd;
153 	struct vnode *vp;
154 
155 	hd = unionfs_get_hashhead(dvp, lookup);
156 
157 	LIST_FOREACH(unp, hd, un_hash) {
158 		if (unp->un_uppervp == lookup ||
159 		    unp->un_lowervp == lookup) {
160 			vp = UNIONFSTOV(unp);
161 			VI_LOCK_FLAGS(vp, MTX_DUPOK);
162 			vp->v_iflag &= ~VI_OWEINACT;
163 			if (VN_IS_DOOMED(vp) ||
164 			    ((vp->v_iflag & VI_DOINGINACT) != 0)) {
165 				VI_UNLOCK(vp);
166 				vp = NULLVP;
167 			} else {
168 				vrefl(vp);
169 				VI_UNLOCK(vp);
170 			}
171 			return (vp);
172 		}
173 	}
174 
175 	return (NULLVP);
176 }
177 
178 
179 /*
180  * Get the cached vnode.
181  */
182 static struct vnode *
183 unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
184     struct vnode *dvp)
185 {
186 	struct vnode *vp;
187 
188 	vp = NULLVP;
189 	VI_LOCK(dvp);
190 	if (uvp != NULLVP)
191 		vp = unionfs_get_cached_vnode_locked(uvp, dvp);
192 	else if (lvp != NULLVP)
193 		vp = unionfs_get_cached_vnode_locked(lvp, dvp);
194 	VI_UNLOCK(dvp);
195 
196 	return (vp);
197 }
198 
199 /*
200  * Add the new vnode into cache.
201  */
202 static struct vnode *
203 unionfs_ins_cached_vnode(struct unionfs_node *uncp,
204     struct vnode *dvp)
205 {
206 	struct unionfs_node_hashhead *hd;
207 	struct vnode *vp;
208 
209 	ASSERT_VOP_ELOCKED(uncp->un_uppervp, __func__);
210 	ASSERT_VOP_ELOCKED(uncp->un_lowervp, __func__);
211 	KASSERT(uncp->un_uppervp == NULLVP || uncp->un_uppervp->v_type == VDIR,
212 	    ("%s: v_type != VDIR", __func__));
213 	KASSERT(uncp->un_lowervp == NULLVP || uncp->un_lowervp->v_type == VDIR,
214 	    ("%s: v_type != VDIR", __func__));
215 
216 	vp = NULLVP;
217 	VI_LOCK(dvp);
218 	if (uncp->un_uppervp != NULL)
219 		vp = unionfs_get_cached_vnode_locked(uncp->un_uppervp, dvp);
220 	else if (uncp->un_lowervp != NULL)
221 		vp = unionfs_get_cached_vnode_locked(uncp->un_lowervp, dvp);
222 	if (vp == NULLVP) {
223 		hd = unionfs_get_hashhead(dvp, (uncp->un_uppervp != NULLVP ?
224 		    uncp->un_uppervp : uncp->un_lowervp));
225 		LIST_INSERT_HEAD(hd, uncp, un_hash);
226 	}
227 	VI_UNLOCK(dvp);
228 
229 	return (vp);
230 }
231 
232 /*
233  * Remove the vnode.
234  */
235 static void
236 unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
237 {
238 	KASSERT(unp != NULL, ("%s: null node", __func__));
239 	KASSERT(dvp != NULLVP,
240 	    ("%s: null parent vnode", __func__));
241 
242 	VI_LOCK(dvp);
243 	if (unp->un_hash.le_prev != NULL) {
244 		LIST_REMOVE(unp, un_hash);
245 		unp->un_hash.le_next = NULL;
246 		unp->un_hash.le_prev = NULL;
247 	}
248 	VI_UNLOCK(dvp);
249 }
250 
251 /*
252  * Common cleanup handling for unionfs_nodeget
253  * Upper, lower, and parent directory vnodes are expected to be referenced by
254  * the caller.  Upper and lower vnodes, if non-NULL, are also expected to be
255  * exclusively locked by the caller.
256  * This function will return with the caller's locks and references undone.
257  */
258 static void
259 unionfs_nodeget_cleanup(struct vnode *vp, struct unionfs_node *unp)
260 {
261 
262 	/*
263 	 * Lock and reset the default vnode lock; vgone() expects a locked
264 	 * vnode, and we're going to reset the vnode ops.
265 	 */
266 	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
267 
268 	/*
269 	 * Clear out private data and reset the vnode ops to avoid use of
270 	 * unionfs vnode ops on a partially constructed vnode.
271 	 */
272 	VI_LOCK(vp);
273 	vp->v_data = NULL;
274 	vp->v_vnlock = &vp->v_lock;
275 	vp->v_op = &dead_vnodeops;
276 	VI_UNLOCK(vp);
277 	vgone(vp);
278 	vput(vp);
279 
280 	if (unp->un_dvp != NULLVP)
281 		vrele(unp->un_dvp);
282 	if (unp->un_uppervp != NULLVP)
283 		vput(unp->un_uppervp);
284 	if (unp->un_lowervp != NULLVP)
285 		vput(unp->un_lowervp);
286 	if (unp->un_hashtbl != NULL)
287 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, UNIONFSHASHMASK);
288 	free(unp->un_path, M_UNIONFSPATH);
289 	free(unp, M_UNIONFSNODE);
290 }
291 
292 /*
293  * Make a new or get existing unionfs node.
294  *
295  * uppervp and lowervp should be unlocked. Because if new unionfs vnode is
296  * locked, uppervp or lowervp is locked too. In order to prevent dead lock,
297  * you should not lock plurality simultaneously.
298  */
299 int
300 unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
301     struct vnode *lowervp, struct vnode *dvp, struct vnode **vpp,
302     struct componentname *cnp)
303 {
304 	char	       *path;
305 	struct unionfs_mount *ump;
306 	struct unionfs_node *unp;
307 	struct vnode   *vp;
308 	u_long		hashmask;
309 	int		error;
310 	int		lkflags;
311 	enum vtype	vt;
312 
313 	error = 0;
314 	ump = MOUNTTOUNIONFSMOUNT(mp);
315 	lkflags = (cnp ? cnp->cn_lkflags : 0);
316 	path = (cnp ? cnp->cn_nameptr : NULL);
317 	*vpp = NULLVP;
318 
319 	if (uppervp == NULLVP && lowervp == NULLVP)
320 		panic("%s: upper and lower is null", __func__);
321 
322 	vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);
323 
324 	/* If it has no ISLASTCN flag, path check is skipped. */
325 	if (cnp && !(cnp->cn_flags & ISLASTCN))
326 		path = NULL;
327 
328 	/* check the cache */
329 	if (dvp != NULLVP && vt == VDIR) {
330 		vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp);
331 		if (vp != NULLVP) {
332 			*vpp = vp;
333 			goto unionfs_nodeget_out;
334 		}
335 	}
336 
337 	unp = malloc(sizeof(struct unionfs_node),
338 	    M_UNIONFSNODE, M_WAITOK | M_ZERO);
339 
340 	error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp);
341 	if (error != 0) {
342 		free(unp, M_UNIONFSNODE);
343 		return (error);
344 	}
345 	if (dvp != NULLVP)
346 		vref(dvp);
347 	if (uppervp != NULLVP)
348 		vref(uppervp);
349 	if (lowervp != NULLVP)
350 		vref(lowervp);
351 
352 	if (vt == VDIR) {
353 		unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH,
354 		    &hashmask);
355 		KASSERT(hashmask == UNIONFSHASHMASK,
356 		    ("unexpected unionfs hash mask 0x%lx", hashmask));
357 	}
358 
359 	unp->un_vnode = vp;
360 	unp->un_uppervp = uppervp;
361 	unp->un_lowervp = lowervp;
362 	unp->un_dvp = dvp;
363 	if (uppervp != NULLVP)
364 		vp->v_vnlock = uppervp->v_vnlock;
365 	else
366 		vp->v_vnlock = lowervp->v_vnlock;
367 
368 	if (path != NULL) {
369 		unp->un_path = malloc(cnp->cn_namelen + 1,
370 		    M_UNIONFSPATH, M_WAITOK | M_ZERO);
371 		bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen);
372 		unp->un_path[cnp->cn_namelen] = '\0';
373 		unp->un_pathlen = cnp->cn_namelen;
374 	}
375 	vp->v_type = vt;
376 	vp->v_data = unp;
377 
378 	/*
379 	 * TODO: This is an imperfect check, as there's no guarantee that
380 	 * the underlying filesystems will always return vnode pointers
381 	 * for the root inodes that match our cached values.  To reduce
382 	 * the likelihood of failure, for example in the case where either
383 	 * vnode has been forcibly doomed, we check both pointers and set
384 	 * VV_ROOT if either matches.
385 	 */
386 	if (ump->um_uppervp == uppervp || ump->um_lowervp == lowervp)
387 		vp->v_vflag |= VV_ROOT;
388 	KASSERT(dvp != NULL || (vp->v_vflag & VV_ROOT) != 0,
389 	    ("%s: NULL dvp for non-root vp %p", __func__, vp));
390 
391 	vn_lock_pair(lowervp, false, uppervp, false);
392 	error = insmntque1(vp, mp);
393 	if (error != 0) {
394 		unionfs_nodeget_cleanup(vp, unp);
395 		return (error);
396 	}
397 	if (lowervp != NULL && VN_IS_DOOMED(lowervp)) {
398 		vput(lowervp);
399 		unp->un_lowervp = NULL;
400 	}
401 	if (uppervp != NULL && VN_IS_DOOMED(uppervp)) {
402 		vput(uppervp);
403 		unp->un_uppervp = NULL;
404 	}
405 	if (unp->un_lowervp == NULL && unp->un_uppervp == NULL) {
406 		unionfs_nodeget_cleanup(vp, unp);
407 		return (ENOENT);
408 	}
409 
410 	if (dvp != NULLVP && vt == VDIR)
411 		*vpp = unionfs_ins_cached_vnode(unp, dvp);
412 	if (*vpp != NULLVP) {
413 		unionfs_nodeget_cleanup(vp, unp);
414 		vp = *vpp;
415 	} else {
416 		if (uppervp != NULL)
417 			VOP_UNLOCK(uppervp);
418 		if (lowervp != NULL)
419 			VOP_UNLOCK(lowervp);
420 		*vpp = vp;
421 	}
422 
423 unionfs_nodeget_out:
424 	if (lkflags & LK_TYPE_MASK)
425 		vn_lock(vp, lkflags | LK_RETRY);
426 
427 	return (0);
428 }
429 
430 /*
431  * Clean up the unionfs node.
432  */
433 void
434 unionfs_noderem(struct vnode *vp)
435 {
436 	struct unionfs_node *unp, *unp_t1, *unp_t2;
437 	struct unionfs_node_hashhead *hd;
438 	struct unionfs_node_status *unsp, *unsp_tmp;
439 	struct vnode   *lvp;
440 	struct vnode   *uvp;
441 	struct vnode   *dvp;
442 	int		count;
443 	int		writerefs;
444 
445 	/*
446 	 * The root vnode lock may be recursed during unmount, because
447 	 * it may share the same lock as the unionfs mount's covered vnode,
448 	 * which is locked across VFS_UNMOUNT().  This lock will then be
449 	 * recursively taken during the vflush() issued by unionfs_unmount().
450 	 * But we still only need to lock the unionfs lock once, because only
451 	 * one of those lock operations was taken against a unionfs vnode and
452 	 * will be undone against a unionfs vnode.
453 	 */
454 	KASSERT(vp->v_vnlock->lk_recurse == 0 || (vp->v_vflag & VV_ROOT) != 0,
455 	    ("%s: vnode %p locked recursively", __func__, vp));
456 	if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
457 		panic("%s: failed to acquire lock for vnode lock", __func__);
458 
459 	/*
460 	 * Use the interlock to protect the clearing of v_data to
461 	 * prevent faults in unionfs_lock().
462 	 */
463 	VI_LOCK(vp);
464 	unp = VTOUNIONFS(vp);
465 	lvp = unp->un_lowervp;
466 	uvp = unp->un_uppervp;
467 	dvp = unp->un_dvp;
468 	unp->un_lowervp = unp->un_uppervp = NULLVP;
469 	vp->v_vnlock = &(vp->v_lock);
470 	vp->v_data = NULL;
471 	vp->v_object = NULL;
472 	if (unp->un_hashtbl != NULL) {
473 		/*
474 		 * Clear out any cached child vnodes.  This should only
475 		 * be necessary during forced unmount, when the vnode may
476 		 * be reclaimed with a non-zero use count.  Otherwise the
477 		 * reference held by each child should prevent reclamation.
478 		 */
479 		for (count = 0; count <= UNIONFSHASHMASK; count++) {
480 			hd = unp->un_hashtbl + count;
481 			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
482 				LIST_REMOVE(unp_t1, un_hash);
483 				unp_t1->un_hash.le_next = NULL;
484 				unp_t1->un_hash.le_prev = NULL;
485 			}
486 		}
487 	}
488 	VI_UNLOCK(vp);
489 
490 	writerefs = atomic_load_int(&vp->v_writecount);
491 	VNASSERT(writerefs >= 0, vp,
492 	    ("%s: write count %d, unexpected text ref", __func__, writerefs));
493 	/*
494 	 * If we were opened for write, we leased the write reference
495 	 * to the lower vnode.  If this is a reclamation due to the
496 	 * forced unmount, undo the reference now.
497 	 */
498 	if (writerefs > 0) {
499 		VNASSERT(uvp != NULL, vp,
500 		    ("%s: write reference without upper vnode", __func__));
501 		VOP_ADD_WRITECOUNT(uvp, -writerefs);
502 	}
503 	if (lvp != NULLVP)
504 		VOP_UNLOCK(lvp);
505 	if (uvp != NULLVP)
506 		VOP_UNLOCK(uvp);
507 
508 	if (dvp != NULLVP)
509 		unionfs_rem_cached_vnode(unp, dvp);
510 
511 	if (lvp != NULLVP)
512 		vrele(lvp);
513 	if (uvp != NULLVP)
514 		vrele(uvp);
515 	if (unp->un_path != NULL) {
516 		free(unp->un_path, M_UNIONFSPATH);
517 		unp->un_path = NULL;
518 		unp->un_pathlen = 0;
519 	}
520 
521 	if (unp->un_hashtbl != NULL) {
522 		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, UNIONFSHASHMASK);
523 	}
524 
525 	LIST_FOREACH_SAFE(unsp, &(unp->un_unshead), uns_list, unsp_tmp) {
526 		LIST_REMOVE(unsp, uns_list);
527 		free(unsp, M_TEMP);
528 	}
529 	if (dvp != NULLVP) {
530 		mtx_lock(&unionfs_deferred_rele_lock);
531 		STAILQ_INSERT_TAIL(&unionfs_deferred_rele_list, unp, un_rele);
532 		mtx_unlock(&unionfs_deferred_rele_lock);
533 		taskqueue_enqueue(taskqueue_unionfs_rele,
534 		    &unionfs_deferred_rele_task);
535 	} else
536 		free(unp, M_UNIONFSNODE);
537 }
538 
539 /*
540  * Get the unionfs node status object for the vnode corresponding to unp,
541  * for the process that owns td.  Allocate a new status object if one
542  * does not already exist.
543  */
544 void
545 unionfs_get_node_status(struct unionfs_node *unp, struct thread *td,
546     struct unionfs_node_status **unspp)
547 {
548 	struct unionfs_node_status *unsp;
549 	pid_t pid;
550 
551 	pid = td->td_proc->p_pid;
552 
553 	KASSERT(NULL != unspp, ("%s: NULL status", __func__));
554 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), __func__);
555 
556 	LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) {
557 		if (unsp->uns_pid == pid) {
558 			*unspp = unsp;
559 			return;
560 		}
561 	}
562 
563 	/* create a new unionfs node status */
564 	unsp = malloc(sizeof(struct unionfs_node_status),
565 	    M_TEMP, M_WAITOK | M_ZERO);
566 
567 	unsp->uns_pid = pid;
568 	LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list);
569 
570 	*unspp = unsp;
571 }
572 
573 /*
574  * Remove the unionfs node status, if you can.
575  * You need exclusive lock this vnode.
576  */
577 void
578 unionfs_tryrem_node_status(struct unionfs_node *unp,
579     struct unionfs_node_status *unsp)
580 {
581 	KASSERT(NULL != unsp, ("%s: NULL status", __func__));
582 	ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), __func__);
583 
584 	if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt)
585 		return;
586 
587 	LIST_REMOVE(unsp, uns_list);
588 	free(unsp, M_TEMP);
589 }
590 
591 /*
592  * Create upper node attr.
593  */
594 void
595 unionfs_create_uppervattr_core(struct unionfs_mount *ump, struct vattr *lva,
596     struct vattr *uva, struct thread *td)
597 {
598 	VATTR_NULL(uva);
599 	uva->va_type = lva->va_type;
600 	uva->va_atime = lva->va_atime;
601 	uva->va_mtime = lva->va_mtime;
602 	uva->va_ctime = lva->va_ctime;
603 
604 	switch (ump->um_copymode) {
605 	case UNIONFS_TRANSPARENT:
606 		uva->va_mode = lva->va_mode;
607 		uva->va_uid = lva->va_uid;
608 		uva->va_gid = lva->va_gid;
609 		break;
610 	case UNIONFS_MASQUERADE:
611 		if (ump->um_uid == lva->va_uid) {
612 			uva->va_mode = lva->va_mode & 077077;
613 			uva->va_mode |= (lva->va_type == VDIR ?
614 			    ump->um_udir : ump->um_ufile) & 0700;
615 			uva->va_uid = lva->va_uid;
616 			uva->va_gid = lva->va_gid;
617 		} else {
618 			uva->va_mode = (lva->va_type == VDIR ?
619 			    ump->um_udir : ump->um_ufile);
620 			uva->va_uid = ump->um_uid;
621 			uva->va_gid = ump->um_gid;
622 		}
623 		break;
624 	default:		/* UNIONFS_TRADITIONAL */
625 		uva->va_mode = 0777 & ~td->td_proc->p_pd->pd_cmask;
626 		uva->va_uid = ump->um_uid;
627 		uva->va_gid = ump->um_gid;
628 		break;
629 	}
630 }
631 
632 /*
633  * Create upper node attr.
634  */
635 int
636 unionfs_create_uppervattr(struct unionfs_mount *ump, struct vnode *lvp,
637     struct vattr *uva, struct ucred *cred, struct thread *td)
638 {
639 	struct vattr	lva;
640 	int		error;
641 
642 	if ((error = VOP_GETATTR(lvp, &lva, cred)))
643 		return (error);
644 
645 	unionfs_create_uppervattr_core(ump, &lva, uva, td);
646 
647 	return (error);
648 }
649 
650 /*
651  * relookup
652  *
653  * dvp should be locked on entry and will be locked on return.
654  *
655  * If an error is returned, *vpp will be invalid, otherwise it will hold a
656  * locked, referenced vnode. If *vpp == dvp then remember that only one
657  * LK_EXCLUSIVE lock is held.
658  */
659 int
660 unionfs_relookup(struct vnode *dvp, struct vnode **vpp,
661     struct componentname *cnp, struct componentname *cn, struct thread *td,
662     char *path, int pathlen, u_long nameiop)
663 {
664 	int error;
665 
666 	cn->cn_namelen = pathlen;
667 	cn->cn_pnbuf = path;
668 	cn->cn_nameiop = nameiop;
669 	cn->cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN);
670 	cn->cn_lkflags = LK_EXCLUSIVE;
671 	cn->cn_cred = cnp->cn_cred;
672 	cn->cn_nameptr = cn->cn_pnbuf;
673 
674 	if (nameiop == DELETE)
675 		cn->cn_flags |= (cnp->cn_flags & (DOWHITEOUT | SAVESTART));
676 	else if (RENAME == nameiop)
677 		cn->cn_flags |= (cnp->cn_flags & SAVESTART);
678 	else if (nameiop == CREATE)
679 		cn->cn_flags |= NOCACHE;
680 
681 	vref(dvp);
682 	VOP_UNLOCK(dvp);
683 
684 	if ((error = relookup(dvp, vpp, cn))) {
685 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
686 	} else
687 		vrele(dvp);
688 
689 	KASSERT((cn->cn_flags & HASBUF) != 0,
690 	    ("%s: HASBUF cleared", __func__));
691 	KASSERT((cn->cn_flags & SAVENAME) != 0,
692 	    ("%s: SAVENAME cleared", __func__));
693 	KASSERT(cn->cn_pnbuf == path, ("%s: cn_pnbuf changed", __func__));
694 
695 	return (error);
696 }
697 
698 /*
699  * relookup for CREATE namei operation.
700  *
701  * dvp is unionfs vnode. dvp should be locked.
702  *
703  * If it called 'unionfs_copyfile' function by unionfs_link etc,
704  * VOP_LOOKUP information is broken.
705  * So it need relookup in order to create link etc.
706  */
707 int
708 unionfs_relookup_for_create(struct vnode *dvp, struct componentname *cnp,
709     struct thread *td)
710 {
711 	struct vnode *udvp;
712 	struct vnode *vp;
713 	struct componentname cn;
714 	int error;
715 
716 	udvp = UNIONFSVPTOUPPERVP(dvp);
717 	vp = NULLVP;
718 
719 	KASSERT((cnp->cn_flags & HASBUF) != 0,
720 	    ("%s called without HASBUF", __func__));
721 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
722 	    cnp->cn_namelen, CREATE);
723 	if (error)
724 		return (error);
725 
726 	if (vp != NULLVP) {
727 		if (udvp == vp)
728 			vrele(vp);
729 		else
730 			vput(vp);
731 
732 		error = EEXIST;
733 	}
734 
735 	return (error);
736 }
737 
738 /*
739  * relookup for DELETE namei operation.
740  *
741  * dvp is unionfs vnode. dvp should be locked.
742  */
743 int
744 unionfs_relookup_for_delete(struct vnode *dvp, struct componentname *cnp,
745     struct thread *td)
746 {
747 	struct vnode *udvp;
748 	struct vnode *vp;
749 	struct componentname cn;
750 	int error;
751 
752 	udvp = UNIONFSVPTOUPPERVP(dvp);
753 	vp = NULLVP;
754 
755 	KASSERT((cnp->cn_flags & HASBUF) != 0,
756 	    ("%s called without HASBUF", __func__));
757 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
758 	    cnp->cn_namelen, DELETE);
759 	if (error)
760 		return (error);
761 
762 	if (vp == NULLVP)
763 		error = ENOENT;
764 	else {
765 		if (udvp == vp)
766 			vrele(vp);
767 		else
768 			vput(vp);
769 	}
770 
771 	return (error);
772 }
773 
774 /*
775  * relookup for RENAME namei operation.
776  *
777  * dvp is unionfs vnode. dvp should be locked.
778  */
779 int
780 unionfs_relookup_for_rename(struct vnode *dvp, struct componentname *cnp,
781     struct thread *td)
782 {
783 	struct vnode *udvp;
784 	struct vnode *vp;
785 	struct componentname cn;
786 	int error;
787 
788 	udvp = UNIONFSVPTOUPPERVP(dvp);
789 	vp = NULLVP;
790 
791 	KASSERT((cnp->cn_flags & HASBUF) != 0,
792 	    ("%s called without HASBUF", __func__));
793 	error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr,
794 	    cnp->cn_namelen, RENAME);
795 	if (error)
796 		return (error);
797 
798 	if (vp != NULLVP) {
799 		if (udvp == vp)
800 			vrele(vp);
801 		else
802 			vput(vp);
803 	}
804 
805 	return (error);
806 }
807 
808 /*
809  * Update the unionfs_node.
810  *
811  * uvp is new locked upper vnode. unionfs vnode's lock will be exchanged to the
812  * uvp's lock and lower's lock will be unlocked.
813  */
814 static void
815 unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
816     struct thread *td)
817 {
818 	struct unionfs_node_hashhead *hd;
819 	struct vnode   *vp;
820 	struct vnode   *lvp;
821 	struct vnode   *dvp;
822 	unsigned	count, lockrec;
823 
824 	vp = UNIONFSTOV(unp);
825 	lvp = unp->un_lowervp;
826 	ASSERT_VOP_ELOCKED(lvp, __func__);
827 	ASSERT_VOP_ELOCKED(uvp, __func__);
828 	dvp = unp->un_dvp;
829 
830 	VNASSERT(vp->v_writecount == 0, vp,
831 	    ("%s: non-zero writecount", __func__));
832 	/*
833 	 * Update the upper vnode's lock state to match the lower vnode,
834 	 * and then switch the unionfs vnode's lock to the upper vnode.
835 	 */
836 	lockrec = lvp->v_vnlock->lk_recurse;
837 	for (count = 0; count < lockrec; count++)
838 		vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
839 	VI_LOCK(vp);
840 	unp->un_uppervp = uvp;
841 	vp->v_vnlock = uvp->v_vnlock;
842 	VI_UNLOCK(vp);
843 
844 	/*
845 	 * Re-cache the unionfs vnode against the upper vnode
846 	 */
847 	if (dvp != NULLVP && vp->v_type == VDIR) {
848 		VI_LOCK(dvp);
849 		if (unp->un_hash.le_prev != NULL) {
850 			LIST_REMOVE(unp, un_hash);
851 			hd = unionfs_get_hashhead(dvp, uvp);
852 			LIST_INSERT_HEAD(hd, unp, un_hash);
853 		}
854 		VI_UNLOCK(unp->un_dvp);
855 	}
856 }
857 
858 /*
859  * Create a new shadow dir.
860  *
861  * udvp should be locked on entry and will be locked on return.
862  *
863  * If no error returned, unp will be updated.
864  */
865 int
866 unionfs_mkshadowdir(struct unionfs_mount *ump, struct vnode *udvp,
867     struct unionfs_node *unp, struct componentname *cnp, struct thread *td)
868 {
869 	struct vnode   *lvp;
870 	struct vnode   *uvp;
871 	struct vattr	va;
872 	struct vattr	lva;
873 	struct nameidata nd;
874 	struct mount   *mp;
875 	struct ucred   *cred;
876 	struct ucred   *credbk;
877 	struct uidinfo *rootinfo;
878 	int		error;
879 
880 	if (unp->un_uppervp != NULLVP)
881 		return (EEXIST);
882 
883 	lvp = unp->un_lowervp;
884 	uvp = NULLVP;
885 	credbk = cnp->cn_cred;
886 
887 	/* Authority change to root */
888 	rootinfo = uifind((uid_t)0);
889 	cred = crdup(cnp->cn_cred);
890 	/*
891 	 * The calls to chgproccnt() are needed to compensate for change_ruid()
892 	 * calling chgproccnt().
893 	 */
894 	chgproccnt(cred->cr_ruidinfo, 1, 0);
895 	change_euid(cred, rootinfo);
896 	change_ruid(cred, rootinfo);
897 	change_svuid(cred, (uid_t)0);
898 	uifree(rootinfo);
899 	cnp->cn_cred = cred;
900 
901 	memset(&nd.ni_cnd, 0, sizeof(struct componentname));
902 	NDPREINIT(&nd);
903 
904 	if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred)))
905 		goto unionfs_mkshadowdir_abort;
906 
907 	if ((error = unionfs_relookup(udvp, &uvp, cnp, &nd.ni_cnd, td,
908 	    cnp->cn_nameptr, cnp->cn_namelen, CREATE)))
909 		goto unionfs_mkshadowdir_abort;
910 	if (uvp != NULLVP) {
911 		if (udvp == uvp)
912 			vrele(uvp);
913 		else
914 			vput(uvp);
915 
916 		error = EEXIST;
917 		goto unionfs_mkshadowdir_abort;
918 	}
919 
920 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)))
921 		goto unionfs_mkshadowdir_abort;
922 	unionfs_create_uppervattr_core(ump, &lva, &va, td);
923 
924 	error = VOP_MKDIR(udvp, &uvp, &nd.ni_cnd, &va);
925 
926 	if (!error) {
927 		unionfs_node_update(unp, uvp, td);
928 
929 		/*
930 		 * XXX The bug which cannot set uid/gid was corrected.
931 		 * Ignore errors.
932 		 */
933 		va.va_type = VNON;
934 		VOP_SETATTR(uvp, &va, nd.ni_cnd.cn_cred);
935 	}
936 	vn_finished_write(mp);
937 
938 unionfs_mkshadowdir_abort:
939 	cnp->cn_cred = credbk;
940 	chgproccnt(cred->cr_ruidinfo, -1, 0);
941 	crfree(cred);
942 
943 	return (error);
944 }
945 
946 /*
947  * Create a new whiteout.
948  *
949  * dvp should be locked on entry and will be locked on return.
950  */
951 int
952 unionfs_mkwhiteout(struct vnode *dvp, struct componentname *cnp,
953     struct thread *td, char *path, int pathlen)
954 {
955 	struct vnode   *wvp;
956 	struct nameidata nd;
957 	struct mount   *mp;
958 	int		error;
959 
960 	wvp = NULLVP;
961 	NDPREINIT(&nd);
962 	if ((error = unionfs_relookup(dvp, &wvp, cnp, &nd.ni_cnd, td, path,
963 	    pathlen, CREATE))) {
964 		return (error);
965 	}
966 	if (wvp != NULLVP) {
967 		if (dvp == wvp)
968 			vrele(wvp);
969 		else
970 			vput(wvp);
971 
972 		return (EEXIST);
973 	}
974 
975 	if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH)))
976 		goto unionfs_mkwhiteout_free_out;
977 	error = VOP_WHITEOUT(dvp, &nd.ni_cnd, CREATE);
978 
979 	vn_finished_write(mp);
980 
981 unionfs_mkwhiteout_free_out:
982 	return (error);
983 }
984 
985 /*
986  * Create a new vnode for create a new shadow file.
987  *
988  * If an error is returned, *vpp will be invalid, otherwise it will hold a
989  * locked, referenced and opened vnode.
990  *
991  * unp is never updated.
992  */
993 static int
994 unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp,
995     struct unionfs_node *unp, struct vattr *uvap, struct thread *td)
996 {
997 	struct unionfs_mount *ump;
998 	struct vnode   *vp;
999 	struct vnode   *lvp;
1000 	struct ucred   *cred;
1001 	struct vattr	lva;
1002 	struct nameidata nd;
1003 	int		fmode;
1004 	int		error;
1005 
1006 	ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount);
1007 	vp = NULLVP;
1008 	lvp = unp->un_lowervp;
1009 	cred = td->td_ucred;
1010 	fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL);
1011 	error = 0;
1012 
1013 	if ((error = VOP_GETATTR(lvp, &lva, cred)) != 0)
1014 		return (error);
1015 	unionfs_create_uppervattr_core(ump, &lva, uvap, td);
1016 
1017 	if (unp->un_path == NULL)
1018 		panic("%s: NULL un_path", __func__);
1019 
1020 	nd.ni_cnd.cn_namelen = unp->un_pathlen;
1021 	nd.ni_cnd.cn_pnbuf = unp->un_path;
1022 	nd.ni_cnd.cn_nameiop = CREATE;
1023 	nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME |
1024 	    ISLASTCN;
1025 	nd.ni_cnd.cn_lkflags = LK_EXCLUSIVE;
1026 	nd.ni_cnd.cn_cred = cred;
1027 	nd.ni_cnd.cn_nameptr = nd.ni_cnd.cn_pnbuf;
1028 	NDPREINIT(&nd);
1029 
1030 	vref(udvp);
1031 	if ((error = relookup(udvp, &vp, &nd.ni_cnd)) != 0)
1032 		goto unionfs_vn_create_on_upper_free_out2;
1033 	vrele(udvp);
1034 
1035 	if (vp != NULLVP) {
1036 		if (vp == udvp)
1037 			vrele(vp);
1038 		else
1039 			vput(vp);
1040 		error = EEXIST;
1041 		goto unionfs_vn_create_on_upper_free_out1;
1042 	}
1043 
1044 	if ((error = VOP_CREATE(udvp, &vp, &nd.ni_cnd, uvap)) != 0)
1045 		goto unionfs_vn_create_on_upper_free_out1;
1046 
1047 	if ((error = VOP_OPEN(vp, fmode, cred, td, NULL)) != 0) {
1048 		vput(vp);
1049 		goto unionfs_vn_create_on_upper_free_out1;
1050 	}
1051 	error = VOP_ADD_WRITECOUNT(vp, 1);
1052 	CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
1053 	    __func__, vp, vp->v_writecount);
1054 	if (error == 0) {
1055 		*vpp = vp;
1056 	} else {
1057 		VOP_CLOSE(vp, fmode, cred, td);
1058 	}
1059 
1060 unionfs_vn_create_on_upper_free_out1:
1061 	VOP_UNLOCK(udvp);
1062 
1063 unionfs_vn_create_on_upper_free_out2:
1064 	KASSERT((nd.ni_cnd.cn_flags & HASBUF) != 0,
1065 	    ("%s: HASBUF cleared", __func__));
1066 	KASSERT((nd.ni_cnd.cn_flags & SAVENAME) != 0,
1067 	    ("%s: SAVENAME cleared", __func__));
1068 	KASSERT(nd.ni_cnd.cn_pnbuf == unp->un_path,
1069 	    ("%s: cn_pnbuf changed", __func__));
1070 
1071 	return (error);
1072 }
1073 
1074 /*
1075  * Copy from lvp to uvp.
1076  *
1077  * lvp and uvp should be locked and opened on entry and will be locked and
1078  * opened on return.
1079  */
1080 static int
1081 unionfs_copyfile_core(struct vnode *lvp, struct vnode *uvp,
1082     struct ucred *cred, struct thread *td)
1083 {
1084 	char           *buf;
1085 	struct uio	uio;
1086 	struct iovec	iov;
1087 	off_t		offset;
1088 	int		count;
1089 	int		error;
1090 	int		bufoffset;
1091 
1092 	error = 0;
1093 	memset(&uio, 0, sizeof(uio));
1094 
1095 	uio.uio_td = td;
1096 	uio.uio_segflg = UIO_SYSSPACE;
1097 	uio.uio_offset = 0;
1098 
1099 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
1100 
1101 	while (error == 0) {
1102 		offset = uio.uio_offset;
1103 
1104 		uio.uio_iov = &iov;
1105 		uio.uio_iovcnt = 1;
1106 		iov.iov_base = buf;
1107 		iov.iov_len = MAXBSIZE;
1108 		uio.uio_resid = iov.iov_len;
1109 		uio.uio_rw = UIO_READ;
1110 
1111 		if ((error = VOP_READ(lvp, &uio, 0, cred)) != 0)
1112 			break;
1113 		if ((count = MAXBSIZE - uio.uio_resid) == 0)
1114 			break;
1115 
1116 		bufoffset = 0;
1117 		while (bufoffset < count) {
1118 			uio.uio_iov = &iov;
1119 			uio.uio_iovcnt = 1;
1120 			iov.iov_base = buf + bufoffset;
1121 			iov.iov_len = count - bufoffset;
1122 			uio.uio_offset = offset + bufoffset;
1123 			uio.uio_resid = iov.iov_len;
1124 			uio.uio_rw = UIO_WRITE;
1125 
1126 			if ((error = VOP_WRITE(uvp, &uio, 0, cred)) != 0)
1127 				break;
1128 
1129 			bufoffset += (count - bufoffset) - uio.uio_resid;
1130 		}
1131 
1132 		uio.uio_offset = offset + bufoffset;
1133 	}
1134 
1135 	free(buf, M_TEMP);
1136 
1137 	return (error);
1138 }
1139 
1140 /*
1141  * Copy file from lower to upper.
1142  *
1143  * If you need copy of the contents, set 1 to docopy. Otherwise, set 0 to
1144  * docopy.
1145  *
1146  * If no error returned, unp will be updated.
1147  */
1148 int
1149 unionfs_copyfile(struct unionfs_node *unp, int docopy, struct ucred *cred,
1150     struct thread *td)
1151 {
1152 	struct mount   *mp;
1153 	struct vnode   *udvp;
1154 	struct vnode   *lvp;
1155 	struct vnode   *uvp;
1156 	struct vattr	uva;
1157 	int		error;
1158 
1159 	lvp = unp->un_lowervp;
1160 	uvp = NULLVP;
1161 
1162 	if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY))
1163 		return (EROFS);
1164 	if (unp->un_dvp == NULLVP)
1165 		return (EINVAL);
1166 	if (unp->un_uppervp != NULLVP)
1167 		return (EEXIST);
1168 	udvp = VTOUNIONFS(unp->un_dvp)->un_uppervp;
1169 	if (udvp == NULLVP)
1170 		return (EROFS);
1171 	if ((udvp->v_mount->mnt_flag & MNT_RDONLY))
1172 		return (EROFS);
1173 
1174 	error = VOP_ACCESS(lvp, VREAD, cred, td);
1175 	if (error != 0)
1176 		return (error);
1177 
1178 	if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)) != 0)
1179 		return (error);
1180 	error = unionfs_vn_create_on_upper(&uvp, udvp, unp, &uva, td);
1181 	if (error != 0) {
1182 		vn_finished_write(mp);
1183 		return (error);
1184 	}
1185 
1186 	if (docopy != 0) {
1187 		error = VOP_OPEN(lvp, FREAD, cred, td, NULL);
1188 		if (error == 0) {
1189 			error = unionfs_copyfile_core(lvp, uvp, cred, td);
1190 			VOP_CLOSE(lvp, FREAD, cred, td);
1191 		}
1192 	}
1193 	VOP_CLOSE(uvp, FWRITE, cred, td);
1194 	VOP_ADD_WRITECOUNT_CHECKED(uvp, -1);
1195 	CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
1196 	    __func__, uvp, uvp->v_writecount);
1197 
1198 	vn_finished_write(mp);
1199 
1200 	if (error == 0) {
1201 		/* Reset the attributes. Ignore errors. */
1202 		uva.va_type = VNON;
1203 		VOP_SETATTR(uvp, &uva, cred);
1204 	}
1205 
1206 	unionfs_node_update(unp, uvp, td);
1207 
1208 	return (error);
1209 }
1210 
1211 /*
1212  * It checks whether vp can rmdir. (check empty)
1213  *
1214  * vp is unionfs vnode.
1215  * vp should be locked.
1216  */
1217 int
1218 unionfs_check_rmdir(struct vnode *vp, struct ucred *cred, struct thread *td)
1219 {
1220 	struct vnode   *uvp;
1221 	struct vnode   *lvp;
1222 	struct vnode   *tvp;
1223 	struct dirent  *dp;
1224 	struct dirent  *edp;
1225 	struct componentname cn;
1226 	struct iovec	iov;
1227 	struct uio	uio;
1228 	struct vattr	va;
1229 	int		error;
1230 	int		eofflag;
1231 	int		lookuperr;
1232 
1233 	/*
1234 	 * The size of buf needs to be larger than DIRBLKSIZ.
1235 	 */
1236 	char		buf[256 * 6];
1237 
1238 	ASSERT_VOP_ELOCKED(vp, __func__);
1239 
1240 	eofflag = 0;
1241 	uvp = UNIONFSVPTOUPPERVP(vp);
1242 	lvp = UNIONFSVPTOLOWERVP(vp);
1243 
1244 	/* check opaque */
1245 	if ((error = VOP_GETATTR(uvp, &va, cred)) != 0)
1246 		return (error);
1247 	if (va.va_flags & OPAQUE)
1248 		return (0);
1249 
1250 	/* open vnode */
1251 #ifdef MAC
1252 	if ((error = mac_vnode_check_open(cred, vp, VEXEC|VREAD)) != 0)
1253 		return (error);
1254 #endif
1255 	if ((error = VOP_ACCESS(vp, VEXEC|VREAD, cred, td)) != 0)
1256 		return (error);
1257 	if ((error = VOP_OPEN(vp, FREAD, cred, td, NULL)) != 0)
1258 		return (error);
1259 
1260 	uio.uio_rw = UIO_READ;
1261 	uio.uio_segflg = UIO_SYSSPACE;
1262 	uio.uio_td = td;
1263 	uio.uio_offset = 0;
1264 
1265 #ifdef MAC
1266 	error = mac_vnode_check_readdir(td->td_ucred, lvp);
1267 #endif
1268 	while (!error && !eofflag) {
1269 		iov.iov_base = buf;
1270 		iov.iov_len = sizeof(buf);
1271 		uio.uio_iov = &iov;
1272 		uio.uio_iovcnt = 1;
1273 		uio.uio_resid = iov.iov_len;
1274 
1275 		error = VOP_READDIR(lvp, &uio, cred, &eofflag, NULL, NULL);
1276 		if (error != 0)
1277 			break;
1278 		KASSERT(eofflag != 0 || uio.uio_resid < sizeof(buf),
1279 		    ("%s: empty read from lower FS", __func__));
1280 
1281 		edp = (struct dirent*)&buf[sizeof(buf) - uio.uio_resid];
1282 		for (dp = (struct dirent*)buf; !error && dp < edp;
1283 		     dp = (struct dirent*)((caddr_t)dp + dp->d_reclen)) {
1284 			if (dp->d_type == DT_WHT || dp->d_fileno == 0 ||
1285 			    (dp->d_namlen == 1 && dp->d_name[0] == '.') ||
1286 			    (dp->d_namlen == 2 && !bcmp(dp->d_name, "..", 2)))
1287 				continue;
1288 
1289 			cn.cn_namelen = dp->d_namlen;
1290 			cn.cn_pnbuf = NULL;
1291 			cn.cn_nameptr = dp->d_name;
1292 			cn.cn_nameiop = LOOKUP;
1293 			cn.cn_flags = LOCKPARENT | LOCKLEAF | SAVENAME |
1294 			    RDONLY | ISLASTCN;
1295 			cn.cn_lkflags = LK_EXCLUSIVE;
1296 			cn.cn_cred = cred;
1297 
1298 			/*
1299 			 * check entry in lower.
1300 			 * Sometimes, readdir function returns
1301 			 * wrong entry.
1302 			 */
1303 			lookuperr = VOP_LOOKUP(lvp, &tvp, &cn);
1304 
1305 			if (!lookuperr)
1306 				vput(tvp);
1307 			else
1308 				continue; /* skip entry */
1309 
1310 			/*
1311 			 * check entry
1312 			 * If it has no exist/whiteout entry in upper,
1313 			 * directory is not empty.
1314 			 */
1315 			cn.cn_flags = LOCKPARENT | LOCKLEAF | SAVENAME |
1316 			    RDONLY | ISLASTCN;
1317 			lookuperr = VOP_LOOKUP(uvp, &tvp, &cn);
1318 
1319 			if (!lookuperr)
1320 				vput(tvp);
1321 
1322 			/* ignore exist or whiteout entry */
1323 			if (!lookuperr ||
1324 			    (lookuperr == ENOENT && (cn.cn_flags & ISWHITEOUT)))
1325 				continue;
1326 
1327 			error = ENOTEMPTY;
1328 		}
1329 	}
1330 
1331 	/* close vnode */
1332 	VOP_CLOSE(vp, FREAD, cred, td);
1333 
1334 	return (error);
1335 }
1336 
1337