xref: /freebsd/sys/geom/gate/g_gate.c (revision e28a4053)
1 /*-
2  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * Copyright (c) 2009-2010 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Pawel Jakub Dawidek
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sysctl.h>
49 #include <sys/signalvar.h>
50 #include <sys/time.h>
51 #include <machine/atomic.h>
52 
53 #include <geom/geom.h>
54 #include <geom/gate/g_gate.h>
55 
56 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
57 
58 SYSCTL_DECL(_kern_geom);
59 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
60 static int g_gate_debug = 0;
61 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug);
62 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
63     "Debug level");
64 static u_int g_gate_maxunits = 256;
65 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits);
66 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
67     &g_gate_maxunits, 0, "Maximum number of ggate devices");
68 
69 struct g_class g_gate_class = {
70 	.name = G_GATE_CLASS_NAME,
71 	.version = G_VERSION,
72 };
73 
74 static struct cdev *status_dev;
75 static d_ioctl_t g_gate_ioctl;
76 static struct cdevsw g_gate_cdevsw = {
77 	.d_version =	D_VERSION,
78 	.d_ioctl =	g_gate_ioctl,
79 	.d_name =	G_GATE_CTL_NAME
80 };
81 
82 
83 static struct g_gate_softc **g_gate_units;
84 static u_int g_gate_nunits;
85 static struct mtx g_gate_units_lock;
86 
87 static int
88 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
89 {
90 	struct g_provider *pp;
91 	struct g_geom *gp;
92 	struct bio *bp;
93 
94 	g_topology_assert();
95 	mtx_assert(&g_gate_units_lock, MA_OWNED);
96 	pp = sc->sc_provider;
97 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
98 		mtx_unlock(&g_gate_units_lock);
99 		return (EBUSY);
100 	}
101 	mtx_unlock(&g_gate_units_lock);
102 	mtx_lock(&sc->sc_queue_mtx);
103 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
104 		sc->sc_flags |= G_GATE_FLAG_DESTROY;
105 	wakeup(sc);
106 	mtx_unlock(&sc->sc_queue_mtx);
107 	gp = pp->geom;
108 	pp->flags |= G_PF_WITHER;
109 	g_orphan_provider(pp, ENXIO);
110 	callout_drain(&sc->sc_callout);
111 	mtx_lock(&sc->sc_queue_mtx);
112 	while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) {
113 		bioq_remove(&sc->sc_inqueue, bp);
114 		sc->sc_queue_count--;
115 		G_GATE_LOGREQ(1, bp, "Request canceled.");
116 		g_io_deliver(bp, ENXIO);
117 	}
118 	while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) {
119 		bioq_remove(&sc->sc_outqueue, bp);
120 		sc->sc_queue_count--;
121 		G_GATE_LOGREQ(1, bp, "Request canceled.");
122 		g_io_deliver(bp, ENXIO);
123 	}
124 	mtx_unlock(&sc->sc_queue_mtx);
125 	g_topology_unlock();
126 	mtx_lock(&g_gate_units_lock);
127 	/* One reference is ours. */
128 	sc->sc_ref--;
129 	while (sc->sc_ref > 0)
130 		msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
131 	g_gate_units[sc->sc_unit] = NULL;
132 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
133 	g_gate_nunits--;
134 	mtx_unlock(&g_gate_units_lock);
135 	mtx_destroy(&sc->sc_queue_mtx);
136 	g_topology_lock();
137 	G_GATE_DEBUG(0, "Device %s destroyed.", gp->name);
138 	gp->softc = NULL;
139 	g_wither_geom(gp, ENXIO);
140 	sc->sc_provider = NULL;
141 	free(sc, M_GATE);
142 	return (0);
143 }
144 
145 static int
146 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
147 {
148 	struct g_gate_softc *sc;
149 
150 	if (dr <= 0 && dw <= 0 && de <= 0)
151 		return (0);
152 	sc = pp->geom->softc;
153 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
154 		return (ENXIO);
155 	/* XXX: Hack to allow read-only mounts. */
156 #if 0
157 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
158 		return (EPERM);
159 #endif
160 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
161 		return (EPERM);
162 	return (0);
163 }
164 
165 static void
166 g_gate_start(struct bio *bp)
167 {
168 	struct g_gate_softc *sc;
169 
170 	sc = bp->bio_to->geom->softc;
171 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
172 		g_io_deliver(bp, ENXIO);
173 		return;
174 	}
175 	G_GATE_LOGREQ(2, bp, "Request received.");
176 	switch (bp->bio_cmd) {
177 	case BIO_READ:
178 		break;
179 	case BIO_DELETE:
180 	case BIO_WRITE:
181 		/* XXX: Hack to allow read-only mounts. */
182 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
183 			g_io_deliver(bp, EPERM);
184 			return;
185 		}
186 		break;
187 	case BIO_GETATTR:
188 	default:
189 		G_GATE_LOGREQ(2, bp, "Ignoring request.");
190 		g_io_deliver(bp, EOPNOTSUPP);
191 		return;
192 	}
193 
194 	mtx_lock(&sc->sc_queue_mtx);
195 	if (sc->sc_queue_count > sc->sc_queue_size) {
196 		mtx_unlock(&sc->sc_queue_mtx);
197 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
198 		g_io_deliver(bp, ENOMEM);
199 		return;
200 	}
201 
202 	bp->bio_driver1 = (void *)sc->sc_seq;
203 	sc->sc_seq++;
204 	sc->sc_queue_count++;
205 
206 	bioq_insert_tail(&sc->sc_inqueue, bp);
207 	wakeup(sc);
208 
209 	mtx_unlock(&sc->sc_queue_mtx);
210 }
211 
212 static struct g_gate_softc *
213 g_gate_hold(int unit, const char *name)
214 {
215 	struct g_gate_softc *sc = NULL;
216 
217 	mtx_lock(&g_gate_units_lock);
218 	if (unit >= 0 && unit < g_gate_maxunits)
219 		sc = g_gate_units[unit];
220 	else if (unit == G_GATE_NAME_GIVEN) {
221 		KASSERT(name != NULL, ("name is NULL"));
222 		for (unit = 0; unit < g_gate_maxunits; unit++) {
223 			if (g_gate_units[unit] == NULL)
224 				continue;
225 			if (strcmp(name,
226 			    g_gate_units[unit]->sc_provider->name) != 0) {
227 				continue;
228 			}
229 			sc = g_gate_units[unit];
230 			break;
231 		}
232 	}
233 	if (sc != NULL)
234 		sc->sc_ref++;
235 	mtx_unlock(&g_gate_units_lock);
236 	return (sc);
237 }
238 
239 static void
240 g_gate_release(struct g_gate_softc *sc)
241 {
242 
243 	g_topology_assert_not();
244 	mtx_lock(&g_gate_units_lock);
245 	sc->sc_ref--;
246 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
247 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
248 		wakeup(&sc->sc_ref);
249 	mtx_unlock(&g_gate_units_lock);
250 }
251 
252 static int
253 g_gate_getunit(int unit, int *errorp)
254 {
255 
256 	mtx_assert(&g_gate_units_lock, MA_OWNED);
257 	if (unit >= 0) {
258 		if (unit >= g_gate_maxunits)
259 			*errorp = EINVAL;
260 		else if (g_gate_units[unit] == NULL)
261 			return (unit);
262 		else
263 			*errorp = EEXIST;
264 	} else {
265 		for (unit = 0; unit < g_gate_maxunits; unit++) {
266 			if (g_gate_units[unit] == NULL)
267 				return (unit);
268 		}
269 		*errorp = ENFILE;
270 	}
271 	return (-1);
272 }
273 
274 static void
275 g_gate_guard(void *arg)
276 {
277 	struct g_gate_softc *sc;
278 	struct bintime curtime;
279 	struct bio *bp, *bp2;
280 
281 	sc = arg;
282 	binuptime(&curtime);
283 	g_gate_hold(sc->sc_unit, NULL);
284 	mtx_lock(&sc->sc_queue_mtx);
285 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
286 		if (curtime.sec - bp->bio_t0.sec < 5)
287 			continue;
288 		bioq_remove(&sc->sc_inqueue, bp);
289 		sc->sc_queue_count--;
290 		G_GATE_LOGREQ(1, bp, "Request timeout.");
291 		g_io_deliver(bp, EIO);
292 	}
293 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
294 		if (curtime.sec - bp->bio_t0.sec < 5)
295 			continue;
296 		bioq_remove(&sc->sc_outqueue, bp);
297 		sc->sc_queue_count--;
298 		G_GATE_LOGREQ(1, bp, "Request timeout.");
299 		g_io_deliver(bp, EIO);
300 	}
301 	mtx_unlock(&sc->sc_queue_mtx);
302 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
303 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
304 		    g_gate_guard, sc);
305 	}
306 	g_gate_release(sc);
307 }
308 
309 static void
310 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
311     struct g_consumer *cp, struct g_provider *pp)
312 {
313 	struct g_gate_softc *sc;
314 
315 	sc = gp->softc;
316 	if (sc == NULL || pp != NULL || cp != NULL)
317 		return;
318 	g_gate_hold(sc->sc_unit, NULL);
319 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
320 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
321 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
322 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
323 		    "write-only");
324 	} else {
325 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
326 		    "read-write");
327 	}
328 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
329 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
330 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
331 	    sc->sc_queue_count);
332 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
333 	    sc->sc_queue_size);
334 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
335 	sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
336 	g_topology_unlock();
337 	g_gate_release(sc);
338 	g_topology_lock();
339 }
340 
341 static int
342 g_gate_create(struct g_gate_ctl_create *ggio)
343 {
344 	struct g_gate_softc *sc;
345 	struct g_geom *gp;
346 	struct g_provider *pp;
347 	char name[NAME_MAX];
348 	int error = 0, unit;
349 
350 	if (ggio->gctl_mediasize == 0) {
351 		G_GATE_DEBUG(1, "Invalid media size.");
352 		return (EINVAL);
353 	}
354 	if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
355 		G_GATE_DEBUG(1, "Invalid sector size.");
356 		return (EINVAL);
357 	}
358 	if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
359 		G_GATE_DEBUG(1, "Invalid media size.");
360 		return (EINVAL);
361 	}
362 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
363 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
364 		G_GATE_DEBUG(1, "Invalid flags.");
365 		return (EINVAL);
366 	}
367 	if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
368 	    ggio->gctl_unit != G_GATE_NAME_GIVEN &&
369 	    ggio->gctl_unit < 0) {
370 		G_GATE_DEBUG(1, "Invalid unit number.");
371 		return (EINVAL);
372 	}
373 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
374 	    ggio->gctl_name[0] == '\0') {
375 		G_GATE_DEBUG(1, "No device name.");
376 		return (EINVAL);
377 	}
378 
379 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
380 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
381 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
382 	sc->sc_seq = 1;
383 	bioq_init(&sc->sc_inqueue);
384 	bioq_init(&sc->sc_outqueue);
385 	mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
386 	sc->sc_queue_count = 0;
387 	sc->sc_queue_size = ggio->gctl_maxcount;
388 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
389 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
390 	sc->sc_timeout = ggio->gctl_timeout;
391 	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
392 	mtx_lock(&g_gate_units_lock);
393 	sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
394 	if (sc->sc_unit < 0) {
395 		mtx_unlock(&g_gate_units_lock);
396 		mtx_destroy(&sc->sc_queue_mtx);
397 		free(sc, M_GATE);
398 		return (error);
399 	}
400 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
401 		snprintf(name, sizeof(name), "%s", ggio->gctl_name);
402 	else {
403 		snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
404 		    sc->sc_unit);
405 	}
406 	/* Check for name collision. */
407 	for (unit = 0; unit < g_gate_maxunits; unit++) {
408 		if (g_gate_units[unit] == NULL)
409 			continue;
410 		if (strcmp(name, g_gate_units[unit]->sc_provider->name) != 0)
411 			continue;
412 		mtx_unlock(&g_gate_units_lock);
413 		mtx_destroy(&sc->sc_queue_mtx);
414 		free(sc, M_GATE);
415 		return (EEXIST);
416 	}
417 	g_gate_units[sc->sc_unit] = sc;
418 	g_gate_nunits++;
419 	mtx_unlock(&g_gate_units_lock);
420 
421 	ggio->gctl_unit = sc->sc_unit;
422 
423 	g_topology_lock();
424 	gp = g_new_geomf(&g_gate_class, "%s", name);
425 	gp->start = g_gate_start;
426 	gp->access = g_gate_access;
427 	gp->dumpconf = g_gate_dumpconf;
428 	gp->softc = sc;
429 	pp = g_new_providerf(gp, "%s", name);
430 	pp->mediasize = ggio->gctl_mediasize;
431 	pp->sectorsize = ggio->gctl_sectorsize;
432 	sc->sc_provider = pp;
433 	g_error_provider(pp, 0);
434 	g_topology_unlock();
435 
436 	if (sc->sc_timeout > 0) {
437 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
438 		    g_gate_guard, sc);
439 	}
440 	return (0);
441 }
442 
443 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
444 	if ((ggio)->gctl_version != G_GATE_VERSION) {			\
445 		printf("Version mismatch %d != %d.\n",			\
446 		    ggio->gctl_version, G_GATE_VERSION);		\
447 		return (EINVAL);					\
448 	}								\
449 } while (0)
450 static int
451 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
452 {
453 	struct g_gate_softc *sc;
454 	struct bio *bp;
455 	int error = 0;
456 
457 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
458 	    flags, td);
459 
460 	switch (cmd) {
461 	case G_GATE_CMD_CREATE:
462 	    {
463 		struct g_gate_ctl_create *ggio = (void *)addr;
464 
465 		G_GATE_CHECK_VERSION(ggio);
466 		error = g_gate_create(ggio);
467 		/*
468 		 * Reset TDP_GEOM flag.
469 		 * There are pending events for sure, because we just created
470 		 * new provider and other classes want to taste it, but we
471 		 * cannot answer on I/O requests until we're here.
472 		 */
473 		td->td_pflags &= ~TDP_GEOM;
474 		return (error);
475 	    }
476 	case G_GATE_CMD_DESTROY:
477 	    {
478 		struct g_gate_ctl_destroy *ggio = (void *)addr;
479 
480 		G_GATE_CHECK_VERSION(ggio);
481 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
482 		if (sc == NULL)
483 			return (ENXIO);
484 		g_topology_lock();
485 		mtx_lock(&g_gate_units_lock);
486 		error = g_gate_destroy(sc, ggio->gctl_force);
487 		g_topology_unlock();
488 		if (error != 0)
489 			g_gate_release(sc);
490 		return (error);
491 	    }
492 	case G_GATE_CMD_CANCEL:
493 	    {
494 		struct g_gate_ctl_cancel *ggio = (void *)addr;
495 		struct bio *tbp, *lbp;
496 
497 		G_GATE_CHECK_VERSION(ggio);
498 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
499 		if (sc == NULL)
500 			return (ENXIO);
501 		lbp = NULL;
502 		mtx_lock(&sc->sc_queue_mtx);
503 		TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
504 			if (ggio->gctl_seq == 0 ||
505 			    ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
506 				G_GATE_LOGREQ(1, bp, "Request canceled.");
507 				bioq_remove(&sc->sc_outqueue, bp);
508 				/*
509 				 * Be sure to put requests back onto incoming
510 				 * queue in the proper order.
511 				 */
512 				if (lbp == NULL)
513 					bioq_insert_head(&sc->sc_inqueue, bp);
514 				else {
515 					TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
516 					    lbp, bp, bio_queue);
517 				}
518 				lbp = bp;
519 				/*
520 				 * If only one request was canceled, leave now.
521 				 */
522 				if (ggio->gctl_seq != 0)
523 					break;
524 			}
525 		}
526 		if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
527 			ggio->gctl_unit = sc->sc_unit;
528 		mtx_unlock(&sc->sc_queue_mtx);
529 		g_gate_release(sc);
530 		return (error);
531 	    }
532 	case G_GATE_CMD_START:
533 	    {
534 		struct g_gate_ctl_io *ggio = (void *)addr;
535 
536 		G_GATE_CHECK_VERSION(ggio);
537 		sc = g_gate_hold(ggio->gctl_unit, NULL);
538 		if (sc == NULL)
539 			return (ENXIO);
540 		error = 0;
541 		for (;;) {
542 			mtx_lock(&sc->sc_queue_mtx);
543 			bp = bioq_first(&sc->sc_inqueue);
544 			if (bp != NULL)
545 				break;
546 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
547 				ggio->gctl_error = ECANCELED;
548 				mtx_unlock(&sc->sc_queue_mtx);
549 				goto start_end;
550 			}
551 			if (msleep(sc, &sc->sc_queue_mtx,
552 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
553 				ggio->gctl_error = ECANCELED;
554 				goto start_end;
555 			}
556 		}
557 		ggio->gctl_cmd = bp->bio_cmd;
558 		if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
559 		    bp->bio_length > ggio->gctl_length) {
560 			mtx_unlock(&sc->sc_queue_mtx);
561 			ggio->gctl_length = bp->bio_length;
562 			ggio->gctl_error = ENOMEM;
563 			goto start_end;
564 		}
565 		bioq_remove(&sc->sc_inqueue, bp);
566 		bioq_insert_tail(&sc->sc_outqueue, bp);
567 		mtx_unlock(&sc->sc_queue_mtx);
568 
569 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
570 		ggio->gctl_offset = bp->bio_offset;
571 		ggio->gctl_length = bp->bio_length;
572 
573 		switch (bp->bio_cmd) {
574 		case BIO_READ:
575 		case BIO_DELETE:
576 			break;
577 		case BIO_WRITE:
578 			error = copyout(bp->bio_data, ggio->gctl_data,
579 			    bp->bio_length);
580 			if (error != 0) {
581 				mtx_lock(&sc->sc_queue_mtx);
582 				bioq_remove(&sc->sc_outqueue, bp);
583 				bioq_insert_head(&sc->sc_inqueue, bp);
584 				mtx_unlock(&sc->sc_queue_mtx);
585 				goto start_end;
586 			}
587 			break;
588 		}
589 start_end:
590 		g_gate_release(sc);
591 		return (error);
592 	    }
593 	case G_GATE_CMD_DONE:
594 	    {
595 		struct g_gate_ctl_io *ggio = (void *)addr;
596 
597 		G_GATE_CHECK_VERSION(ggio);
598 		sc = g_gate_hold(ggio->gctl_unit, NULL);
599 		if (sc == NULL)
600 			return (ENOENT);
601 		error = 0;
602 		mtx_lock(&sc->sc_queue_mtx);
603 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
604 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
605 				break;
606 		}
607 		if (bp != NULL) {
608 			bioq_remove(&sc->sc_outqueue, bp);
609 			sc->sc_queue_count--;
610 		}
611 		mtx_unlock(&sc->sc_queue_mtx);
612 		if (bp == NULL) {
613 			/*
614 			 * Request was probably canceled.
615 			 */
616 			goto done_end;
617 		}
618 		if (ggio->gctl_error == EAGAIN) {
619 			bp->bio_error = 0;
620 			G_GATE_LOGREQ(1, bp, "Request desisted.");
621 			mtx_lock(&sc->sc_queue_mtx);
622 			sc->sc_queue_count++;
623 			bioq_insert_head(&sc->sc_inqueue, bp);
624 			wakeup(sc);
625 			mtx_unlock(&sc->sc_queue_mtx);
626 		} else {
627 			bp->bio_error = ggio->gctl_error;
628 			if (bp->bio_error == 0) {
629 				bp->bio_completed = bp->bio_length;
630 				switch (bp->bio_cmd) {
631 				case BIO_READ:
632 					error = copyin(ggio->gctl_data,
633 					    bp->bio_data, bp->bio_length);
634 					if (error != 0)
635 						bp->bio_error = error;
636 					break;
637 				case BIO_DELETE:
638 				case BIO_WRITE:
639 					break;
640 				}
641 			}
642 			G_GATE_LOGREQ(2, bp, "Request done.");
643 			g_io_deliver(bp, bp->bio_error);
644 		}
645 done_end:
646 		g_gate_release(sc);
647 		return (error);
648 	    }
649 	}
650 	return (ENOIOCTL);
651 }
652 
653 static void
654 g_gate_device(void)
655 {
656 
657 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
658 	    G_GATE_CTL_NAME);
659 }
660 
661 static int
662 g_gate_modevent(module_t mod, int type, void *data)
663 {
664 	int error = 0;
665 
666 	switch (type) {
667 	case MOD_LOAD:
668 		mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
669 		g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
670 		    M_GATE, M_WAITOK | M_ZERO);
671 		g_gate_nunits = 0;
672 		g_gate_device();
673 		break;
674 	case MOD_UNLOAD:
675 		mtx_lock(&g_gate_units_lock);
676 		if (g_gate_nunits > 0) {
677 			mtx_unlock(&g_gate_units_lock);
678 			error = EBUSY;
679 			break;
680 		}
681 		mtx_unlock(&g_gate_units_lock);
682 		mtx_destroy(&g_gate_units_lock);
683 		if (status_dev != 0)
684 			destroy_dev(status_dev);
685 		free(g_gate_units, M_GATE);
686 		break;
687 	default:
688 		return (EOPNOTSUPP);
689 		break;
690 	}
691 
692 	return (error);
693 }
694 static moduledata_t g_gate_module = {
695 	G_GATE_MOD_NAME,
696 	g_gate_modevent,
697 	NULL
698 };
699 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
700 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
701