xref: /netbsd/sys/miscfs/genfs/layer_vnops.c (revision 8abbca48)
1 /*	$NetBSD: layer_vnops.c,v 1.72 2021/10/20 03:08:18 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1999 National Aeronautics & Space Administration
5  * All rights reserved.
6  *
7  * This software was written by William Studenmund of the
8  * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the National Aeronautics & Space Administration
19  *    nor the names of its contributors may be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NATIONAL AERONAUTICS & SPACE ADMINISTRATION
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE ADMINISTRATION OR CONTRIB-
27  * UTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1992, 1993
38  *	The Regents of the University of California.  All rights reserved.
39  *
40  * This code is derived from software contributed to Berkeley by
41  * John Heidemann of the UCLA Ficus project.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
68  *
69  * Ancestors:
70  *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
71  *	Id: lofs_vnops.c,v 1.11 1992/05/30 10:05:43 jsp Exp jsp
72  *	...and...
73  *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
74  */
75 
76 /*
77  * Generic layer vnode operations.
78  *
79  * The layer.h, layer_extern.h, layer_vfs.c, and layer_vnops.c files provide
80  * the core implementation of stacked file-systems.
81  *
82  * The layerfs duplicates a portion of the file system name space under
83  * a new name.  In this respect, it is similar to the loopback file system.
84  * It differs from the loopback fs in two respects: it is implemented using
85  * a stackable layers technique, and it is "layerfs-nodes" stack above all
86  * lower-layer vnodes, not just over directory vnodes.
87  *
88  * OPERATION OF LAYERFS
89  *
90  * The layerfs is the minimum file system layer, bypassing all possible
91  * operations to the lower layer for processing there.  The majority of its
92  * activity centers on the bypass routine, through which nearly all vnode
93  * operations pass.
94  *
95  * The bypass routine accepts arbitrary vnode operations for handling by
96  * the lower layer.  It begins by examining vnode operation arguments and
97  * replacing any layered nodes by their lower-layer equivalents.  It then
98  * invokes an operation on the lower layer.  Finally, it replaces the
99  * layered nodes in the arguments and, if a vnode is returned by the
100  * operation, stacks a layered node on top of the returned vnode.
101  *
102  * The bypass routine in this file, layer_bypass(), is suitable for use
103  * by many different layered filesystems. It can be used by multiple
104  * filesystems simultaneously. Alternatively, a layered fs may provide
105  * its own bypass routine, in which case layer_bypass() should be used as
106  * a model. For instance, the main functionality provided by umapfs, the user
107  * identity mapping file system, is handled by a custom bypass routine.
108  *
109  * Typically a layered fs registers its selected bypass routine as the
110  * default vnode operation in its vnodeopv_entry_desc table. Additionally
111  * the filesystem must store the bypass entry point in the layerm_bypass
112  * field of struct layer_mount. All other layer routines in this file will
113  * use the layerm_bypass() routine.
114  *
115  * Although the bypass routine handles most operations outright, a number
116  * of operations are special cased and handled by the layerfs.  For instance,
117  * layer_getattr() must change the fsid being returned.  While layer_lock()
118  * and layer_unlock() must handle any locking for the current vnode as well
119  * as pass the lock request down.  layer_inactive() and layer_reclaim() are
120  * not bypassed so that they can handle freeing layerfs-specific data.  Also,
121  * certain vnode operations (create, mknod, remove, link, rename, mkdir,
122  * rmdir, and symlink) change the locking state within the operation.  Ideally
123  * these operations should not change the lock state, but should be changed
124  * to let the caller of the function unlock them.  Otherwise, all intermediate
125  * vnode layers (such as union, umapfs, etc) must catch these functions to do
126  * the necessary locking at their layer.
127  *
128  * INSTANTIATING VNODE STACKS
129  *
130  * Mounting associates "layerfs-nodes" stack and lower layer, in effect
131  * stacking two VFSes.  The initial mount creates a single vnode stack for
132  * the root of the new layerfs.  All other vnode stacks are created as a
133  * result of vnode operations on this or other layerfs vnode stacks.
134  *
135  * New vnode stacks come into existence as a result of an operation which
136  * returns a vnode.  The bypass routine stacks a layerfs-node above the new
137  * vnode before returning it to the caller.
138  *
139  * For example, imagine mounting a null layer with:
140  *
141  *	"mount_null /usr/include /dev/layer/null"
142  *
143  * Changing directory to /dev/layer/null will assign the root layerfs-node,
144  * which was created when the null layer was mounted).  Now consider opening
145  * "sys".  A layer_lookup() would be performed on the root layerfs-node.
146  * This operation would bypass through to the lower layer which would return
147  * a vnode representing the UFS "sys".  Then, layer_bypass() builds a
148  * layerfs-node aliasing the UFS "sys" and returns this to the caller.
149  * Later operations on the layerfs-node "sys" will repeat this process when
150  * constructing other vnode stacks.
151  *
152  * INVOKING OPERATIONS ON LOWER LAYERS
153  *
154  * There are two techniques to invoke operations on a lower layer when the
155  * operation cannot be completely bypassed.  Each method is appropriate in
156  * different situations.  In both cases, it is the responsibility of the
157  * aliasing layer to make the operation arguments "correct" for the lower
158  * layer by mapping any vnode arguments to the lower layer.
159  *
160  * The first approach is to call the aliasing layer's bypass routine.  This
161  * method is most suitable when you wish to invoke the operation currently
162  * being handled on the lower layer.  It has the advantage that the bypass
163  * routine already must do argument mapping.  An example of this is
164  * layer_getattr().
165  *
166  * A second approach is to directly invoke vnode operations on the lower
167  * layer with the VOP_OPERATIONNAME interface.  The advantage of this method
168  * is that it is easy to invoke arbitrary operations on the lower layer.
169  * The disadvantage is that vnode's arguments must be manually mapped.
170  */
171 
172 #include <sys/cdefs.h>
173 __KERNEL_RCSID(0, "$NetBSD: layer_vnops.c,v 1.72 2021/10/20 03:08:18 thorpej Exp $");
174 
175 #include <sys/param.h>
176 #include <sys/systm.h>
177 #include <sys/proc.h>
178 #include <sys/time.h>
179 #include <sys/vnode.h>
180 #include <sys/mount.h>
181 #include <sys/namei.h>
182 #include <sys/kmem.h>
183 #include <sys/buf.h>
184 #include <sys/kauth.h>
185 #include <sys/fcntl.h>
186 #include <sys/fstrans.h>
187 
188 #include <miscfs/genfs/layer.h>
189 #include <miscfs/genfs/layer_extern.h>
190 #include <miscfs/genfs/genfs.h>
191 #include <miscfs/specfs/specdev.h>
192 
193 /*
194  * This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass
195  *		routine by John Heidemann.
196  *	The new element for this version is that the whole nullfs
197  * system gained the concept of locks on the lower node.
198  *    The 10-Apr-92 version was optimized for speed, throwing away some
199  * safety checks.  It should still always work, but it's not as
200  * robust to programmer errors.
201  *
202  * In general, we map all vnodes going down and unmap them on the way back.
203  *
204  * Also, some BSD vnode operations have the side effect of vrele'ing
205  * their arguments.  With stacking, the reference counts are held
206  * by the upper node, not the lower one, so we must handle these
207  * side-effects here.  This is not of concern in Sun-derived systems
208  * since there are no such side-effects.
209  *
210  * New for the 08-June-99 version: we also handle operations which unlock
211  * the passed-in node (typically they vput the node).
212  *
213  * This makes the following assumptions:
214  * - only one returned vpp
215  * - no INOUT vpp's (Sun's vop_open has one of these)
216  * - the vnode operation vector of the first vnode should be used
217  *   to determine what implementation of the op should be invoked
218  * - all mapped vnodes are of our vnode-type (NEEDSWORK:
219  *   problems on rmdir'ing mount points and renaming?)
220  */
221 int
layer_bypass(void * v)222 layer_bypass(void *v)
223 {
224 	struct vop_generic_args /* {
225 		struct vnodeop_desc *a_desc;
226 		<other random data follows, presumably>
227 	} */ *ap = v;
228 	int (**our_vnodeop_p)(void *);
229 	struct vnode **this_vp_p;
230 	int error;
231 	struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
232 	struct vnode **vps_p[VDESC_MAX_VPS];
233 	struct vnode ***vppp;
234 	struct mount *mp;
235 	struct vnodeop_desc *descp = ap->a_desc;
236 	int reles, i, flags;
237 
238 #ifdef DIAGNOSTIC
239 	/*
240 	 * We require at least one vp.
241 	 */
242 	if (descp->vdesc_vp_offsets == NULL ||
243 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
244 		panic("%s: no vp's in map.\n", __func__);
245 #endif
246 
247 	vps_p[0] =
248 	    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
249 	vp0 = *vps_p[0];
250 	mp = vp0->v_mount;
251 	flags = MOUNTTOLAYERMOUNT(mp)->layerm_flags;
252 	our_vnodeop_p = vp0->v_op;
253 
254 	if (flags & LAYERFS_MBYPASSDEBUG)
255 		printf("%s: %s\n", __func__, descp->vdesc_name);
256 
257 	/*
258 	 * Map the vnodes going in.
259 	 * Later, we'll invoke the operation based on
260 	 * the first mapped vnode's operation vector.
261 	 */
262 	reles = descp->vdesc_flags;
263 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
264 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
265 			break;   /* bail out at end of list */
266 		vps_p[i] = this_vp_p =
267 		    VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
268 		    ap);
269 		/*
270 		 * We're not guaranteed that any but the first vnode
271 		 * are of our type.  Check for and don't map any
272 		 * that aren't.  (We must always map first vp or vclean fails.)
273 		 */
274 		if (i && (*this_vp_p == NULL ||
275 		    (*this_vp_p)->v_op != our_vnodeop_p)) {
276 			old_vps[i] = NULL;
277 		} else {
278 			old_vps[i] = *this_vp_p;
279 			*(vps_p[i]) = LAYERVPTOLOWERVP(*this_vp_p);
280 			/*
281 			 * XXX - Several operations have the side effect
282 			 * of vrele'ing their vp's.  We must account for
283 			 * that.  (This should go away in the future.)
284 			 */
285 			if (reles & VDESC_VP0_WILLRELE)
286 				vref(*this_vp_p);
287 		}
288 	}
289 
290 	/*
291 	 * Call the operation on the lower layer
292 	 * with the modified argument structure.
293 	 */
294 	error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
295 
296 	/*
297 	 * Maintain the illusion of call-by-value
298 	 * by restoring vnodes in the argument structure
299 	 * to their original value.
300 	 */
301 	reles = descp->vdesc_flags;
302 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
303 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
304 			break;   /* bail out at end of list */
305 		if (old_vps[i]) {
306 			*(vps_p[i]) = old_vps[i];
307 			if (reles & VDESC_VP0_WILLRELE)
308 				vrele(*(vps_p[i]));
309 		}
310 	}
311 
312 	/*
313 	 * Map the possible out-going vpp
314 	 * (Assumes that the lower layer always returns
315 	 * a VREF'ed vpp unless it gets an error.)
316 	 */
317 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && !error) {
318 		vppp = VOPARG_OFFSETTO(struct vnode***,
319 				 descp->vdesc_vpp_offset, ap);
320 		/*
321 		 * Only vop_lookup, vop_create, vop_makedir, vop_mknod
322 		 * and vop_symlink return vpp's. vop_lookup doesn't call bypass
323 		 * as a lookup on "." would generate a locking error.
324 		 * So all the calls which get us here have a unlocked vpp. :-)
325 		 */
326 		error = layer_node_create(mp, **vppp, *vppp);
327 		if (error) {
328 			vrele(**vppp);
329 			**vppp = NULL;
330 		}
331 	}
332 	return error;
333 }
334 
335 /*
336  * We have to carry on the locking protocol on the layer vnodes
337  * as we progress through the tree. We also have to enforce read-only
338  * if this layer is mounted read-only.
339  */
340 int
layer_lookup(void * v)341 layer_lookup(void *v)
342 {
343 	struct vop_lookup_v2_args /* {
344 		struct vnodeop_desc *a_desc;
345 		struct vnode * a_dvp;
346 		struct vnode ** a_vpp;
347 		struct componentname * a_cnp;
348 	} */ *ap = v;
349 	struct componentname *cnp = ap->a_cnp;
350 	struct vnode *dvp, *lvp, *ldvp;
351 	int error, flags = cnp->cn_flags;
352 
353 	dvp = ap->a_dvp;
354 
355 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
356 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
357 		*ap->a_vpp = NULL;
358 		return EROFS;
359 	}
360 
361 	ldvp = LAYERVPTOLOWERVP(dvp);
362 	ap->a_dvp = ldvp;
363 	error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
364 	lvp = *ap->a_vpp;
365 	*ap->a_vpp = NULL;
366 
367 	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
368 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
369 	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
370 		error = EROFS;
371 
372 	/*
373 	 * We must do the same locking and unlocking at this layer as
374 	 * is done in the layers below us.
375 	 */
376 	if (ldvp == lvp) {
377 		/*
378 		 * Got the same object back, because we looked up ".",
379 		 * or ".." in the root node of a mount point.
380 		 * So we make another reference to dvp and return it.
381 		 */
382 		vref(dvp);
383 		*ap->a_vpp = dvp;
384 		vrele(lvp);
385 	} else if (lvp != NULL) {
386 		/* Note: dvp and ldvp are both locked. */
387 		KASSERT(error != ENOLCK);
388 		error = layer_node_create(dvp->v_mount, lvp, ap->a_vpp);
389 		if (error) {
390 			vrele(lvp);
391 		}
392 	}
393 	return error;
394 }
395 
396 /*
397  * Setattr call. Disallow write attempts if the layer is mounted read-only.
398  */
399 int
layer_setattr(void * v)400 layer_setattr(void *v)
401 {
402 	struct vop_setattr_args /* {
403 		struct vnodeop_desc *a_desc;
404 		struct vnode *a_vp;
405 		struct vattr *a_vap;
406 		kauth_cred_t a_cred;
407 		struct lwp *a_l;
408 	} */ *ap = v;
409 	struct vnode *vp = ap->a_vp;
410 	struct vattr *vap = ap->a_vap;
411 
412   	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
413 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
414 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
415 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
416 		return EROFS;
417 	if (vap->va_size != VNOVAL) {
418  		switch (vp->v_type) {
419  		case VDIR:
420  			return EISDIR;
421  		case VCHR:
422  		case VBLK:
423  		case VSOCK:
424  		case VFIFO:
425 			return 0;
426 		case VREG:
427 		case VLNK:
428  		default:
429 			/*
430 			 * Disallow write attempts if the filesystem is
431 			 * mounted read-only.
432 			 */
433 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
434 				return EROFS;
435 		}
436 	}
437 	return LAYERFS_DO_BYPASS(vp, ap);
438 }
439 
440 /*
441  *  We handle getattr only to change the fsid.
442  */
443 int
layer_getattr(void * v)444 layer_getattr(void *v)
445 {
446 	struct vop_getattr_args /* {
447 		struct vnode *a_vp;
448 		struct vattr *a_vap;
449 		kauth_cred_t a_cred;
450 		struct lwp *a_l;
451 	} */ *ap = v;
452 	struct vnode *vp = ap->a_vp;
453 	int error;
454 
455 	error = LAYERFS_DO_BYPASS(vp, ap);
456 	if (error) {
457 		return error;
458 	}
459 	/* Requires that arguments be restored. */
460 	ap->a_vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
461 	return 0;
462 }
463 
464 int
layer_access(void * v)465 layer_access(void *v)
466 {
467 	struct vop_access_args /* {
468 		struct vnode *a_vp;
469 		accmode_t  a_accmode;
470 		kauth_cred_t a_cred;
471 		struct lwp *a_l;
472 	} */ *ap = v;
473 	struct vnode *vp = ap->a_vp;
474 	accmode_t accmode = ap->a_accmode;
475 
476 	/*
477 	 * Disallow write attempts on read-only layers;
478 	 * unless the file is a socket, fifo, or a block or
479 	 * character device resident on the file system.
480 	 */
481 	if (accmode & VWRITE) {
482 		switch (vp->v_type) {
483 		case VDIR:
484 		case VLNK:
485 		case VREG:
486 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
487 				return EROFS;
488 			break;
489 		default:
490 			break;
491 		}
492 	}
493 	return LAYERFS_DO_BYPASS(vp, ap);
494 }
495 
496 /*
497  * We must handle open to be able to catch MNT_NODEV and friends
498  * and increment the lower v_writecount.
499  */
500 int
layer_open(void * v)501 layer_open(void *v)
502 {
503 	struct vop_open_args /* {
504 		const struct vnodeop_desc *a_desc;
505 		struct vnode *a_vp;
506 		int a_mode;
507 		kauth_cred_t a_cred;
508 	} */ *ap = v;
509 	struct vnode *vp = ap->a_vp;
510 	struct vnode *lvp = LAYERVPTOLOWERVP(vp);
511 	int error;
512 
513 	if (((lvp->v_type == VBLK) || (lvp->v_type == VCHR)) &&
514 	    (vp->v_mount->mnt_flag & MNT_NODEV))
515 		return ENXIO;
516 
517 	error = LAYERFS_DO_BYPASS(vp, ap);
518 	if (error == 0 && (ap->a_mode & FWRITE)) {
519 		mutex_enter(lvp->v_interlock);
520 		lvp->v_writecount++;
521 		mutex_exit(lvp->v_interlock);
522 	}
523 	return error;
524 }
525 
526 /*
527  * We must handle close to decrement the lower v_writecount.
528  */
529 int
layer_close(void * v)530 layer_close(void *v)
531 {
532 	struct vop_close_args /* {
533 		const struct vnodeop_desc *a_desc;
534 		struct vnode *a_vp;
535 		int a_fflag;
536 		kauth_cred_t a_cred;
537 	} */ *ap = v;
538 	struct vnode *vp = ap->a_vp;
539 	struct vnode *lvp = LAYERVPTOLOWERVP(vp);
540 
541 	if ((ap->a_fflag & FWRITE)) {
542 		mutex_enter(lvp->v_interlock);
543 		KASSERT(lvp->v_writecount > 0);
544 		lvp->v_writecount--;
545 		mutex_exit(lvp->v_interlock);
546 	}
547 	return LAYERFS_DO_BYPASS(vp, ap);
548 }
549 
550 /*
551  * If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
552  * syncing the underlying vnodes, since they'll be fsync'ed when
553  * reclaimed; otherwise, pass it through to the underlying layer.
554  *
555  * XXX Do we still need to worry about shallow fsync?
556  */
557 int
layer_fsync(void * v)558 layer_fsync(void *v)
559 {
560 	struct vop_fsync_args /* {
561 		struct vnode *a_vp;
562 		kauth_cred_t a_cred;
563 		int  a_flags;
564 		off_t offlo;
565 		off_t offhi;
566 		struct lwp *a_l;
567 	} */ *ap = v;
568 	int error;
569 
570 	if (ap->a_flags & FSYNC_RECLAIM) {
571 		return 0;
572 	}
573 	if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR) {
574 		error = spec_fsync(v);
575 		if (error)
576 			return error;
577 	}
578 	return LAYERFS_DO_BYPASS(ap->a_vp, ap);
579 }
580 
581 int
layer_inactive(void * v)582 layer_inactive(void *v)
583 {
584 	struct vop_inactive_v2_args /* {
585 		struct vnode *a_vp;
586 		bool *a_recycle;
587 	} */ *ap = v;
588 	struct vnode *vp = ap->a_vp;
589 
590 	/*
591 	 * If we did a remove, don't cache the node.
592 	 */
593 	*ap->a_recycle = ((VTOLAYER(vp)->layer_flags & LAYERFS_REMOVED) != 0);
594 
595 	/*
596 	 * Do nothing (and _don't_ bypass).
597 	 * Wait to vrele lowervp until reclaim,
598 	 * so that until then our layer_node is in the
599 	 * cache and reusable.
600 	 *
601 	 * NEEDSWORK: Someday, consider inactive'ing
602 	 * the lowervp and then trying to reactivate it
603 	 * with capabilities (v_id)
604 	 * like they do in the name lookup cache code.
605 	 * That's too much work for now.
606 	 */
607 
608 	return 0;
609 }
610 
611 int
layer_remove(void * v)612 layer_remove(void *v)
613 {
614 	struct vop_remove_v3_args /* {
615 		struct vnode		*a_dvp;
616 		struct vnode		*a_vp;
617 		struct componentname	*a_cnp;
618 		nlink_t			 ctx_vp_new_nlink;
619 	} */ *ap = v;
620 	struct vnode *vp = ap->a_vp;
621 	int error;
622 
623 	vref(vp);
624 	error = LAYERFS_DO_BYPASS(vp, ap);
625 	if (error == 0) {
626 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
627 	}
628 	vrele(vp);
629 
630 	return error;
631 }
632 
633 int
layer_rename(void * v)634 layer_rename(void *v)
635 {
636 	struct vop_rename_args /* {
637 		struct vnode		*a_fdvp;
638 		struct vnode		*a_fvp;
639 		struct componentname	*a_fcnp;
640 		struct vnode		*a_tdvp;
641 		struct vnode		*a_tvp;
642 		struct componentname	*a_tcnp;
643 	} */ *ap = v;
644 	struct vnode *fdvp = ap->a_fdvp, *tvp;
645 	int error;
646 
647 	tvp = ap->a_tvp;
648 	if (tvp) {
649 		if (tvp->v_mount != fdvp->v_mount)
650 			tvp = NULL;
651 		else
652 			vref(tvp);
653 	}
654 	error = LAYERFS_DO_BYPASS(fdvp, ap);
655 	if (tvp) {
656 		if (error == 0)
657 			VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
658 		vrele(tvp);
659 	}
660 	return error;
661 }
662 
663 int
layer_rmdir(void * v)664 layer_rmdir(void *v)
665 {
666 	struct vop_rmdir_v2_args /* {
667 		struct vnode		*a_dvp;
668 		struct vnode		*a_vp;
669 		struct componentname	*a_cnp;
670 	} */ *ap = v;
671 	int		error;
672 	struct vnode	*vp = ap->a_vp;
673 
674 	vref(vp);
675 	error = LAYERFS_DO_BYPASS(vp, ap);
676 	if (error == 0) {
677 		VTOLAYER(vp)->layer_flags |= LAYERFS_REMOVED;
678 	}
679 	vrele(vp);
680 
681 	return error;
682 }
683 
684 int
layer_revoke(void * v)685 layer_revoke(void *v)
686 {
687         struct vop_revoke_args /* {
688 		struct vnode *a_vp;
689 		int a_flags;
690 	} */ *ap = v;
691 	struct vnode *vp = ap->a_vp;
692 	struct vnode *lvp = LAYERVPTOLOWERVP(vp);
693 	int error;
694 
695 	/*
696 	 * We will most likely end up in vclean which uses the usecount
697 	 * to determine if a vnode is active.  Take an extra reference on
698 	 * the lower vnode so it will always close and inactivate.
699 	 */
700 	vref(lvp);
701 	error = LAYERFS_DO_BYPASS(vp, ap);
702 	vrele(lvp);
703 
704 	return error;
705 }
706 
707 int
layer_reclaim(void * v)708 layer_reclaim(void *v)
709 {
710 	struct vop_reclaim_v2_args /* {
711 		struct vnode *a_vp;
712 		struct lwp *a_l;
713 	} */ *ap = v;
714 	struct vnode *vp = ap->a_vp;
715 	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(vp->v_mount);
716 	struct layer_node *xp = VTOLAYER(vp);
717 	struct vnode *lowervp = xp->layer_lowervp;
718 
719 	VOP_UNLOCK(vp);
720 
721 	/*
722 	 * Note: in vop_reclaim, the node's struct lock has been
723 	 * decomissioned, so we have to be careful about calling
724 	 * VOP's on ourself.  We must be careful as VXLOCK is set.
725 	 */
726 	if (vp == lmp->layerm_rootvp) {
727 		/*
728 		 * Oops! We no longer have a root node. Most likely reason is
729 		 * that someone forcably unmunted the underlying fs.
730 		 *
731 		 * Now getting the root vnode will fail. We're dead. :-(
732 		 */
733 		lmp->layerm_rootvp = NULL;
734 	}
735 
736 	mutex_enter(vp->v_interlock);
737 	KASSERT(vp->v_interlock == lowervp->v_interlock);
738 	lowervp->v_writecount -= vp->v_writecount;
739 	mutex_exit(vp->v_interlock);
740 
741 	/* After this assignment, this node will not be re-used. */
742 	xp->layer_lowervp = NULL;
743 	kmem_free(vp->v_data, lmp->layerm_size);
744 	vp->v_data = NULL;
745 	vrele(lowervp);
746 
747 	return 0;
748 }
749 
750 /*
751  * We just feed the returned vnode up to the caller - there's no need
752  * to build a layer node on top of the node on which we're going to do
753  * i/o. :-)
754  */
755 int
layer_bmap(void * v)756 layer_bmap(void *v)
757 {
758 	struct vop_bmap_args /* {
759 		struct vnode *a_vp;
760 		daddr_t  a_bn;
761 		struct vnode **a_vpp;
762 		daddr_t *a_bnp;
763 		int *a_runp;
764 	} */ *ap = v;
765 	struct vnode *vp;
766 
767 	vp = LAYERVPTOLOWERVP(ap->a_vp);
768 	ap->a_vp = vp;
769 
770 	return VCALL(vp, ap->a_desc->vdesc_offset, ap);
771 }
772 
773 int
layer_print(void * v)774 layer_print(void *v)
775 {
776 	struct vop_print_args /* {
777 		struct vnode *a_vp;
778 	} */ *ap = v;
779 	struct vnode *vp = ap->a_vp;
780 	printf ("\ttag VT_LAYERFS, vp=%p, lowervp=%p\n", vp, LAYERVPTOLOWERVP(vp));
781 	return 0;
782 }
783 
784 int
layer_getpages(void * v)785 layer_getpages(void *v)
786 {
787 	struct vop_getpages_args /* {
788 		struct vnode *a_vp;
789 		voff_t a_offset;
790 		struct vm_page **a_m;
791 		int *a_count;
792 		int a_centeridx;
793 		vm_prot_t a_access_type;
794 		int a_advice;
795 		int a_flags;
796 	} */ *ap = v;
797 	struct vnode *vp = ap->a_vp;
798 	struct mount *mp = vp->v_mount;
799 	int error;
800 	krw_t op;
801 
802 	KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
803 
804 	if (ap->a_flags & PGO_LOCKED) {
805 		return EBUSY;
806 	}
807 	ap->a_vp = LAYERVPTOLOWERVP(vp);
808 	KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
809 
810 	/* Just pass the request on to the underlying layer. */
811 	op = rw_lock_op(vp->v_uobj.vmobjlock);
812 	rw_exit(vp->v_uobj.vmobjlock);
813 	fstrans_start(mp);
814 	rw_enter(vp->v_uobj.vmobjlock, op);
815 	if (mp == vp->v_mount) {
816 		/* Will release the lock. */
817 		error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
818 	} else {
819 		rw_exit(vp->v_uobj.vmobjlock);
820 		error = ENOENT;
821 	}
822 	fstrans_done(mp);
823 
824 	return error;
825 }
826 
827 int
layer_putpages(void * v)828 layer_putpages(void *v)
829 {
830 	struct vop_putpages_args /* {
831 		struct vnode *a_vp;
832 		voff_t a_offlo;
833 		voff_t a_offhi;
834 		int a_flags;
835 	} */ *ap = v;
836 	struct vnode *vp = ap->a_vp;
837 
838 	KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
839 
840 	ap->a_vp = LAYERVPTOLOWERVP(vp);
841 	KASSERT(vp->v_uobj.vmobjlock == ap->a_vp->v_uobj.vmobjlock);
842 
843 	if (ap->a_flags & PGO_RECLAIM) {
844 		rw_exit(vp->v_uobj.vmobjlock);
845 		return 0;
846 	}
847 
848 	/* Just pass the request on to the underlying layer. */
849 	return VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
850 }
851