xref: /freebsd/sys/geom/geom_vfs.c (revision e521fb05)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bio.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/vnode.h>
38 #include <sys/mount.h>	/* XXX Temporary for VFS_LOCK_GIANT */
39 
40 #include <geom/geom.h>
41 #include <geom/geom_vfs.h>
42 
43 /*
44  * subroutines for use by filesystems.
45  *
46  * XXX: should maybe live somewhere else ?
47  */
48 #include <sys/buf.h>
49 
50 struct g_vfs_softc {
51 	struct mtx	 sc_mtx;
52 	struct bufobj	*sc_bo;
53 	int		 sc_active;
54 	int		 sc_orphaned;
55 };
56 
57 static struct buf_ops __g_vfs_bufops = {
58 	.bop_name =	"GEOM_VFS",
59 	.bop_write =	bufwrite,
60 	.bop_strategy =	g_vfs_strategy,
61 	.bop_sync =	bufsync,
62 	.bop_bdflush =	bufbdflush
63 };
64 
65 struct buf_ops *g_vfs_bufops = &__g_vfs_bufops;
66 
67 static g_orphan_t g_vfs_orphan;
68 
69 static struct g_class g_vfs_class = {
70 	.name =		"VFS",
71 	.version =	G_VERSION,
72 	.orphan =	g_vfs_orphan,
73 };
74 
75 DECLARE_GEOM_CLASS(g_vfs_class, g_vfs);
76 
77 static void
78 g_vfs_destroy(void *arg, int flags __unused)
79 {
80 	struct g_consumer *cp;
81 
82 	g_topology_assert();
83 	cp = arg;
84 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
85 		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
86 	g_detach(cp);
87 	if (cp->geom->softc == NULL)
88 		g_wither_geom(cp->geom, ENXIO);
89 }
90 
91 static void
92 g_vfs_done(struct bio *bip)
93 {
94 	struct g_consumer *cp;
95 	struct g_vfs_softc *sc;
96 	struct buf *bp;
97 	int vfslocked, destroy;
98 	struct mount *mp;
99 	struct vnode *vp;
100 	struct cdev *cdevp;
101 
102 	/*
103 	 * Collect statistics on synchronous and asynchronous read
104 	 * and write counts for disks that have associated filesystems.
105 	 * Since this run by the g_up thread it is single threaded and
106 	 * we do not need to use atomic increments on the counters.
107 	 */
108 	bp = bip->bio_caller2;
109 	vp = bp->b_vp;
110 	if (vp == NULL) {
111 		mp = NULL;
112 	} else {
113 		/*
114 		 * If not a disk vnode, use its associated mount point
115 		 * otherwise use the mountpoint associated with the disk.
116 		 */
117 		VI_LOCK(vp);
118 		if (vp->v_type != VCHR ||
119 		    (cdevp = vp->v_rdev) == NULL ||
120 		    cdevp->si_devsw == NULL ||
121 		    (cdevp->si_devsw->d_flags & D_DISK) == 0)
122 			mp = vp->v_mount;
123 		else
124 			mp = cdevp->si_mountpt;
125 		VI_UNLOCK(vp);
126 	}
127 	if (mp != NULL) {
128 		if (bp->b_iocmd == BIO_WRITE) {
129 			if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC)
130 				mp->mnt_stat.f_asyncwrites++;
131 			else
132 				mp->mnt_stat.f_syncwrites++;
133 		} else {
134 			if (LK_HOLDER(bp->b_lock.lk_lock) == LK_KERNPROC)
135 				mp->mnt_stat.f_asyncreads++;
136 			else
137 				mp->mnt_stat.f_syncreads++;
138 		}
139 	}
140 
141 	cp = bip->bio_from;
142 	sc = cp->geom->softc;
143 	if (bip->bio_error) {
144 		printf("g_vfs_done():");
145 		g_print_bio(bip);
146 		printf("error = %d\n", bip->bio_error);
147 	}
148 	bp->b_error = bip->bio_error;
149 	bp->b_ioflags = bip->bio_flags;
150 	if (bip->bio_error)
151 		bp->b_ioflags |= BIO_ERROR;
152 	bp->b_resid = bp->b_bcount - bip->bio_completed;
153 	g_destroy_bio(bip);
154 
155 	mtx_lock(&sc->sc_mtx);
156 	destroy = ((--sc->sc_active) == 0 && sc->sc_orphaned);
157 	mtx_unlock(&sc->sc_mtx);
158 	if (destroy)
159 		g_post_event(g_vfs_destroy, cp, M_WAITOK, NULL);
160 
161 	vfslocked = VFS_LOCK_GIANT(((struct mount *)NULL));
162 	bufdone(bp);
163 	VFS_UNLOCK_GIANT(vfslocked);
164 }
165 
166 void
167 g_vfs_strategy(struct bufobj *bo, struct buf *bp)
168 {
169 	struct g_vfs_softc *sc;
170 	struct g_consumer *cp;
171 	struct bio *bip;
172 	int vfslocked;
173 
174 	cp = bo->bo_private;
175 	sc = cp->geom->softc;
176 
177 	/*
178 	 * If the provider has orphaned us, just return EXIO.
179 	 */
180 	mtx_lock(&sc->sc_mtx);
181 	if (sc->sc_orphaned) {
182 		mtx_unlock(&sc->sc_mtx);
183 		bp->b_error = ENXIO;
184 		bp->b_ioflags |= BIO_ERROR;
185 		vfslocked = VFS_LOCK_GIANT(((struct mount *)NULL));
186 		bufdone(bp);
187 		VFS_UNLOCK_GIANT(vfslocked);
188 		return;
189 	}
190 	sc->sc_active++;
191 	mtx_unlock(&sc->sc_mtx);
192 
193 	bip = g_alloc_bio();
194 	bip->bio_cmd = bp->b_iocmd;
195 	bip->bio_offset = bp->b_iooffset;
196 	bip->bio_data = bp->b_data;
197 	bip->bio_done = g_vfs_done;
198 	bip->bio_caller2 = bp;
199 	bip->bio_length = bp->b_bcount;
200 	g_io_request(bip, cp);
201 }
202 
203 static void
204 g_vfs_orphan(struct g_consumer *cp)
205 {
206 	struct g_geom *gp;
207 	struct g_vfs_softc *sc;
208 	int destroy;
209 
210 	g_topology_assert();
211 
212 	gp = cp->geom;
213 	g_trace(G_T_TOPOLOGY, "g_vfs_orphan(%p(%s))", cp, gp->name);
214 	sc = gp->softc;
215 	if (sc == NULL)
216 		return;
217 	mtx_lock(&sc->sc_mtx);
218 	sc->sc_orphaned = 1;
219 	destroy = (sc->sc_active == 0);
220 	mtx_unlock(&sc->sc_mtx);
221 	if (destroy)
222 		g_vfs_destroy(cp, 0);
223 
224 	/*
225 	 * Do not destroy the geom.  Filesystem will do that during unmount.
226 	 */
227 }
228 
229 int
230 g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr)
231 {
232 	struct g_geom *gp;
233 	struct g_provider *pp;
234 	struct g_consumer *cp;
235 	struct g_vfs_softc *sc;
236 	struct bufobj *bo;
237 	int vfslocked;
238 	int error;
239 
240 	g_topology_assert();
241 
242 	*cpp = NULL;
243 	bo = &vp->v_bufobj;
244 	if (bo->bo_private != vp)
245 		return (EBUSY);
246 
247 	pp = g_dev_getprovider(vp->v_rdev);
248 	if (pp == NULL)
249 		return (ENOENT);
250 	gp = g_new_geomf(&g_vfs_class, "%s.%s", fsname, pp->name);
251 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
252 	mtx_init(&sc->sc_mtx, "g_vfs", NULL, MTX_DEF);
253 	sc->sc_bo = bo;
254 	gp->softc = sc;
255 	cp = g_new_consumer(gp);
256 	g_attach(cp, pp);
257 	error = g_access(cp, 1, wr, wr);
258 	if (error) {
259 		g_wither_geom(gp, ENXIO);
260 		return (error);
261 	}
262 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
263 	vnode_create_vobject(vp, pp->mediasize, curthread);
264 	VFS_UNLOCK_GIANT(vfslocked);
265 	*cpp = cp;
266 	cp->private = vp;
267 	bo->bo_ops = g_vfs_bufops;
268 	bo->bo_private = cp;
269 	bo->bo_bsize = pp->sectorsize;
270 
271 	return (error);
272 }
273 
274 void
275 g_vfs_close(struct g_consumer *cp)
276 {
277 	struct g_geom *gp;
278 	struct g_vfs_softc *sc;
279 
280 	g_topology_assert();
281 
282 	gp = cp->geom;
283 	sc = gp->softc;
284 	bufobj_invalbuf(sc->sc_bo, V_SAVE, 0, 0);
285 	sc->sc_bo->bo_private = cp->private;
286 	gp->softc = NULL;
287 	mtx_destroy(&sc->sc_mtx);
288 	if (!sc->sc_orphaned || cp->provider == NULL)
289 		g_wither_geom_close(gp, ENXIO);
290 	g_free(sc);
291 }
292