xref: /freebsd/sys/geom/gate/g_gate.c (revision 325151a3)
1 /*-
2  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * Copyright (c) 2009-2010 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Pawel Jakub Dawidek
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <sys/signalvar.h>
51 #include <sys/time.h>
52 #include <machine/atomic.h>
53 
54 #include <geom/geom.h>
55 #include <geom/gate/g_gate.h>
56 
57 FEATURE(geom_gate, "GEOM Gate module");
58 
59 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
60 
61 SYSCTL_DECL(_kern_geom);
62 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
63     "GEOM_GATE configuration");
64 static int g_gate_debug = 0;
65 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
66     "Debug level");
67 static u_int g_gate_maxunits = 256;
68 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
69     &g_gate_maxunits, 0, "Maximum number of ggate devices");
70 
71 struct g_class g_gate_class = {
72 	.name = G_GATE_CLASS_NAME,
73 	.version = G_VERSION,
74 };
75 
76 static struct cdev *status_dev;
77 static d_ioctl_t g_gate_ioctl;
78 static struct cdevsw g_gate_cdevsw = {
79 	.d_version =	D_VERSION,
80 	.d_ioctl =	g_gate_ioctl,
81 	.d_name =	G_GATE_CTL_NAME
82 };
83 
84 
85 static struct g_gate_softc **g_gate_units;
86 static u_int g_gate_nunits;
87 static struct mtx g_gate_units_lock;
88 
89 static int
90 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
91 {
92 	struct bio_queue_head queue;
93 	struct g_provider *pp;
94 	struct g_consumer *cp;
95 	struct g_geom *gp;
96 	struct bio *bp;
97 
98 	g_topology_assert();
99 	mtx_assert(&g_gate_units_lock, MA_OWNED);
100 	pp = sc->sc_provider;
101 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
102 		mtx_unlock(&g_gate_units_lock);
103 		return (EBUSY);
104 	}
105 	mtx_unlock(&g_gate_units_lock);
106 	mtx_lock(&sc->sc_queue_mtx);
107 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
108 		sc->sc_flags |= G_GATE_FLAG_DESTROY;
109 	wakeup(sc);
110 	mtx_unlock(&sc->sc_queue_mtx);
111 	gp = pp->geom;
112 	pp->flags |= G_PF_WITHER;
113 	g_orphan_provider(pp, ENXIO);
114 	callout_drain(&sc->sc_callout);
115 	bioq_init(&queue);
116 	mtx_lock(&sc->sc_queue_mtx);
117 	while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
118 		sc->sc_queue_count--;
119 		bioq_insert_tail(&queue, bp);
120 	}
121 	while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
122 		sc->sc_queue_count--;
123 		bioq_insert_tail(&queue, bp);
124 	}
125 	mtx_unlock(&sc->sc_queue_mtx);
126 	g_topology_unlock();
127 	while ((bp = bioq_takefirst(&queue)) != NULL) {
128 		G_GATE_LOGREQ(1, bp, "Request canceled.");
129 		g_io_deliver(bp, ENXIO);
130 	}
131 	mtx_lock(&g_gate_units_lock);
132 	/* One reference is ours. */
133 	sc->sc_ref--;
134 	while (sc->sc_ref > 0)
135 		msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
136 	g_gate_units[sc->sc_unit] = NULL;
137 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
138 	g_gate_nunits--;
139 	mtx_unlock(&g_gate_units_lock);
140 	mtx_destroy(&sc->sc_queue_mtx);
141 	g_topology_lock();
142 	if ((cp = sc->sc_readcons) != NULL) {
143 		sc->sc_readcons = NULL;
144 		(void)g_access(cp, -1, 0, 0);
145 		g_detach(cp);
146 		g_destroy_consumer(cp);
147 	}
148 	G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
149 	gp->softc = NULL;
150 	g_wither_geom(gp, ENXIO);
151 	sc->sc_provider = NULL;
152 	free(sc, M_GATE);
153 	return (0);
154 }
155 
156 static int
157 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
158 {
159 	struct g_gate_softc *sc;
160 
161 	if (dr <= 0 && dw <= 0 && de <= 0)
162 		return (0);
163 	sc = pp->geom->softc;
164 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
165 		return (ENXIO);
166 	/* XXX: Hack to allow read-only mounts. */
167 #if 0
168 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
169 		return (EPERM);
170 #endif
171 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
172 		return (EPERM);
173 	return (0);
174 }
175 
176 static void
177 g_gate_queue_io(struct bio *bp)
178 {
179 	struct g_gate_softc *sc;
180 
181 	sc = bp->bio_to->geom->softc;
182 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
183 		g_io_deliver(bp, ENXIO);
184 		return;
185 	}
186 
187 	mtx_lock(&sc->sc_queue_mtx);
188 
189 	if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
190 		mtx_unlock(&sc->sc_queue_mtx);
191 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
192 		g_io_deliver(bp, ENOMEM);
193 		return;
194 	}
195 
196 	bp->bio_driver1 = (void *)sc->sc_seq;
197 	sc->sc_seq++;
198 	sc->sc_queue_count++;
199 
200 	bioq_insert_tail(&sc->sc_inqueue, bp);
201 	wakeup(sc);
202 
203 	mtx_unlock(&sc->sc_queue_mtx);
204 }
205 
206 static void
207 g_gate_done(struct bio *cbp)
208 {
209 	struct bio *pbp;
210 
211 	pbp = cbp->bio_parent;
212 	if (cbp->bio_error == 0) {
213 		pbp->bio_completed = cbp->bio_completed;
214 		g_destroy_bio(cbp);
215 		pbp->bio_inbed++;
216 		g_io_deliver(pbp, 0);
217 	} else {
218 		/* If direct read failed, pass it through userland daemon. */
219 		g_destroy_bio(cbp);
220 		pbp->bio_children--;
221 		g_gate_queue_io(pbp);
222 	}
223 }
224 
225 static void
226 g_gate_start(struct bio *pbp)
227 {
228 	struct g_gate_softc *sc;
229 
230 	sc = pbp->bio_to->geom->softc;
231 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
232 		g_io_deliver(pbp, ENXIO);
233 		return;
234 	}
235 	G_GATE_LOGREQ(2, pbp, "Request received.");
236 	switch (pbp->bio_cmd) {
237 	case BIO_READ:
238 		if (sc->sc_readcons != NULL) {
239 			struct bio *cbp;
240 
241 			cbp = g_clone_bio(pbp);
242 			if (cbp == NULL) {
243 				g_io_deliver(pbp, ENOMEM);
244 				return;
245 			}
246 			cbp->bio_done = g_gate_done;
247 			cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
248 			cbp->bio_to = sc->sc_readcons->provider;
249 			g_io_request(cbp, sc->sc_readcons);
250 			return;
251 		}
252 		break;
253 	case BIO_DELETE:
254 	case BIO_WRITE:
255 	case BIO_FLUSH:
256 		/* XXX: Hack to allow read-only mounts. */
257 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
258 			g_io_deliver(pbp, EPERM);
259 			return;
260 		}
261 		break;
262 	case BIO_GETATTR:
263 	default:
264 		G_GATE_LOGREQ(2, pbp, "Ignoring request.");
265 		g_io_deliver(pbp, EOPNOTSUPP);
266 		return;
267 	}
268 
269 	g_gate_queue_io(pbp);
270 }
271 
272 static struct g_gate_softc *
273 g_gate_hold(int unit, const char *name)
274 {
275 	struct g_gate_softc *sc = NULL;
276 
277 	mtx_lock(&g_gate_units_lock);
278 	if (unit >= 0 && unit < g_gate_maxunits)
279 		sc = g_gate_units[unit];
280 	else if (unit == G_GATE_NAME_GIVEN) {
281 		KASSERT(name != NULL, ("name is NULL"));
282 		for (unit = 0; unit < g_gate_maxunits; unit++) {
283 			if (g_gate_units[unit] == NULL)
284 				continue;
285 			if (strcmp(name,
286 			    g_gate_units[unit]->sc_provider->name) != 0) {
287 				continue;
288 			}
289 			sc = g_gate_units[unit];
290 			break;
291 		}
292 	}
293 	if (sc != NULL)
294 		sc->sc_ref++;
295 	mtx_unlock(&g_gate_units_lock);
296 	return (sc);
297 }
298 
299 static void
300 g_gate_release(struct g_gate_softc *sc)
301 {
302 
303 	g_topology_assert_not();
304 	mtx_lock(&g_gate_units_lock);
305 	sc->sc_ref--;
306 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
307 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
308 		wakeup(&sc->sc_ref);
309 	mtx_unlock(&g_gate_units_lock);
310 }
311 
312 static int
313 g_gate_getunit(int unit, int *errorp)
314 {
315 
316 	mtx_assert(&g_gate_units_lock, MA_OWNED);
317 	if (unit >= 0) {
318 		if (unit >= g_gate_maxunits)
319 			*errorp = EINVAL;
320 		else if (g_gate_units[unit] == NULL)
321 			return (unit);
322 		else
323 			*errorp = EEXIST;
324 	} else {
325 		for (unit = 0; unit < g_gate_maxunits; unit++) {
326 			if (g_gate_units[unit] == NULL)
327 				return (unit);
328 		}
329 		*errorp = ENFILE;
330 	}
331 	return (-1);
332 }
333 
334 static void
335 g_gate_guard(void *arg)
336 {
337 	struct bio_queue_head queue;
338 	struct g_gate_softc *sc;
339 	struct bintime curtime;
340 	struct bio *bp, *bp2;
341 
342 	sc = arg;
343 	binuptime(&curtime);
344 	g_gate_hold(sc->sc_unit, NULL);
345 	bioq_init(&queue);
346 	mtx_lock(&sc->sc_queue_mtx);
347 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
348 		if (curtime.sec - bp->bio_t0.sec < 5)
349 			continue;
350 		bioq_remove(&sc->sc_inqueue, bp);
351 		sc->sc_queue_count--;
352 		bioq_insert_tail(&queue, bp);
353 	}
354 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
355 		if (curtime.sec - bp->bio_t0.sec < 5)
356 			continue;
357 		bioq_remove(&sc->sc_outqueue, bp);
358 		sc->sc_queue_count--;
359 		bioq_insert_tail(&queue, bp);
360 	}
361 	mtx_unlock(&sc->sc_queue_mtx);
362 	while ((bp = bioq_takefirst(&queue)) != NULL) {
363 		G_GATE_LOGREQ(1, bp, "Request timeout.");
364 		g_io_deliver(bp, EIO);
365 	}
366 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
367 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
368 		    g_gate_guard, sc);
369 	}
370 	g_gate_release(sc);
371 }
372 
373 static void
374 g_gate_orphan(struct g_consumer *cp)
375 {
376 	struct g_gate_softc *sc;
377 	struct g_geom *gp;
378 
379 	g_topology_assert();
380 	gp = cp->geom;
381 	sc = gp->softc;
382 	if (sc == NULL)
383 		return;
384 	KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
385 	    sc->sc_readcons));
386 	sc->sc_readcons = NULL;
387 	G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
388 	    cp->provider->name);
389 	(void)g_access(cp, -1, 0, 0);
390 	g_detach(cp);
391 	g_destroy_consumer(cp);
392 }
393 
394 static void
395 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
396     struct g_consumer *cp, struct g_provider *pp)
397 {
398 	struct g_gate_softc *sc;
399 
400 	sc = gp->softc;
401 	if (sc == NULL || pp != NULL || cp != NULL)
402 		return;
403 	sc = g_gate_hold(sc->sc_unit, NULL);
404 	if (sc == NULL)
405 		return;
406 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
407 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
408 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
409 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
410 		    "write-only");
411 	} else {
412 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
413 		    "read-write");
414 	}
415 	if (sc->sc_readcons != NULL) {
416 		sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
417 		    indent, (intmax_t)sc->sc_readoffset);
418 		sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
419 		    indent, sc->sc_readcons->provider->name);
420 	}
421 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
422 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
423 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
424 	    sc->sc_queue_count);
425 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
426 	    sc->sc_queue_size);
427 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
428 	sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
429 	g_topology_unlock();
430 	g_gate_release(sc);
431 	g_topology_lock();
432 }
433 
434 static int
435 g_gate_create(struct g_gate_ctl_create *ggio)
436 {
437 	struct g_gate_softc *sc;
438 	struct g_geom *gp;
439 	struct g_provider *pp, *ropp;
440 	struct g_consumer *cp;
441 	char name[NAME_MAX];
442 	int error = 0, unit;
443 
444 	if (ggio->gctl_mediasize <= 0) {
445 		G_GATE_DEBUG(1, "Invalid media size.");
446 		return (EINVAL);
447 	}
448 	if (ggio->gctl_sectorsize <= 0) {
449 		G_GATE_DEBUG(1, "Invalid sector size.");
450 		return (EINVAL);
451 	}
452 	if (!powerof2(ggio->gctl_sectorsize)) {
453 		G_GATE_DEBUG(1, "Invalid sector size.");
454 		return (EINVAL);
455 	}
456 	if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
457 		G_GATE_DEBUG(1, "Invalid media size.");
458 		return (EINVAL);
459 	}
460 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
461 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
462 		G_GATE_DEBUG(1, "Invalid flags.");
463 		return (EINVAL);
464 	}
465 	if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
466 	    ggio->gctl_unit != G_GATE_NAME_GIVEN &&
467 	    ggio->gctl_unit < 0) {
468 		G_GATE_DEBUG(1, "Invalid unit number.");
469 		return (EINVAL);
470 	}
471 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
472 	    ggio->gctl_name[0] == '\0') {
473 		G_GATE_DEBUG(1, "No device name.");
474 		return (EINVAL);
475 	}
476 
477 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
478 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
479 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
480 	sc->sc_seq = 1;
481 	bioq_init(&sc->sc_inqueue);
482 	bioq_init(&sc->sc_outqueue);
483 	mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
484 	sc->sc_queue_count = 0;
485 	sc->sc_queue_size = ggio->gctl_maxcount;
486 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
487 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
488 	sc->sc_timeout = ggio->gctl_timeout;
489 	callout_init(&sc->sc_callout, 1);
490 
491 	mtx_lock(&g_gate_units_lock);
492 	sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
493 	if (sc->sc_unit < 0)
494 		goto fail1;
495 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
496 		snprintf(name, sizeof(name), "%s", ggio->gctl_name);
497 	else {
498 		snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
499 		    sc->sc_unit);
500 	}
501 	/* Check for name collision. */
502 	for (unit = 0; unit < g_gate_maxunits; unit++) {
503 		if (g_gate_units[unit] == NULL)
504 			continue;
505 		if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
506 			continue;
507 		error = EEXIST;
508 		goto fail1;
509 	}
510 	sc->sc_name = name;
511 	g_gate_units[sc->sc_unit] = sc;
512 	g_gate_nunits++;
513 	mtx_unlock(&g_gate_units_lock);
514 
515 	g_topology_lock();
516 
517 	if (ggio->gctl_readprov[0] == '\0') {
518 		ropp = NULL;
519 	} else {
520 		ropp = g_provider_by_name(ggio->gctl_readprov);
521 		if (ropp == NULL) {
522 			G_GATE_DEBUG(1, "Provider %s doesn't exist.",
523 			    ggio->gctl_readprov);
524 			error = EINVAL;
525 			goto fail2;
526 		}
527 		if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
528 			G_GATE_DEBUG(1, "Invalid read offset.");
529 			error = EINVAL;
530 			goto fail2;
531 		}
532 		if (ggio->gctl_mediasize + ggio->gctl_readoffset >
533 		    ropp->mediasize) {
534 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
535 			error = EINVAL;
536 			goto fail2;
537 		}
538 	}
539 
540 	gp = g_new_geomf(&g_gate_class, "%s", name);
541 	gp->start = g_gate_start;
542 	gp->access = g_gate_access;
543 	gp->orphan = g_gate_orphan;
544 	gp->dumpconf = g_gate_dumpconf;
545 	gp->softc = sc;
546 
547 	if (ropp != NULL) {
548 		cp = g_new_consumer(gp);
549 		cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
550 		error = g_attach(cp, ropp);
551 		if (error != 0) {
552 			G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
553 			goto fail3;
554 		}
555 		error = g_access(cp, 1, 0, 0);
556 		if (error != 0) {
557 			G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
558 			g_detach(cp);
559 			goto fail3;
560 		}
561 		sc->sc_readcons = cp;
562 		sc->sc_readoffset = ggio->gctl_readoffset;
563 	}
564 
565 	ggio->gctl_unit = sc->sc_unit;
566 
567 	pp = g_new_providerf(gp, "%s", name);
568 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
569 	pp->mediasize = ggio->gctl_mediasize;
570 	pp->sectorsize = ggio->gctl_sectorsize;
571 	sc->sc_provider = pp;
572 	g_error_provider(pp, 0);
573 
574 	g_topology_unlock();
575 	mtx_lock(&g_gate_units_lock);
576 	sc->sc_name = sc->sc_provider->name;
577 	mtx_unlock(&g_gate_units_lock);
578 	G_GATE_DEBUG(1, "Device %s created.", gp->name);
579 
580 	if (sc->sc_timeout > 0) {
581 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
582 		    g_gate_guard, sc);
583 	}
584 	return (0);
585 fail3:
586 	g_destroy_consumer(cp);
587 	g_destroy_geom(gp);
588 fail2:
589 	g_topology_unlock();
590 	mtx_lock(&g_gate_units_lock);
591 	g_gate_units[sc->sc_unit] = NULL;
592 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
593 	g_gate_nunits--;
594 fail1:
595 	mtx_unlock(&g_gate_units_lock);
596 	mtx_destroy(&sc->sc_queue_mtx);
597 	free(sc, M_GATE);
598 	return (error);
599 }
600 
601 static int
602 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
603 {
604 	struct g_provider *pp;
605 	struct g_consumer *cp;
606 	int error;
607 
608 	if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
609 		if (ggio->gctl_mediasize <= 0) {
610 			G_GATE_DEBUG(1, "Invalid media size.");
611 			return (EINVAL);
612 		}
613 		pp = sc->sc_provider;
614 		if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
615 			G_GATE_DEBUG(1, "Invalid media size.");
616 			return (EINVAL);
617 		}
618 		/* TODO */
619 		return (EOPNOTSUPP);
620 	}
621 
622 	if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
623 		(void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
624 
625 	cp = NULL;
626 
627 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
628 		g_topology_lock();
629 		if (sc->sc_readcons != NULL) {
630 			cp = sc->sc_readcons;
631 			sc->sc_readcons = NULL;
632 			(void)g_access(cp, -1, 0, 0);
633 			g_detach(cp);
634 			g_destroy_consumer(cp);
635 		}
636 		if (ggio->gctl_readprov[0] != '\0') {
637 			pp = g_provider_by_name(ggio->gctl_readprov);
638 			if (pp == NULL) {
639 				g_topology_unlock();
640 				G_GATE_DEBUG(1, "Provider %s doesn't exist.",
641 				    ggio->gctl_readprov);
642 				return (EINVAL);
643 			}
644 			cp = g_new_consumer(sc->sc_provider->geom);
645 			cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
646 			error = g_attach(cp, pp);
647 			if (error != 0) {
648 				G_GATE_DEBUG(1, "Unable to attach to %s.",
649 				    pp->name);
650 			} else {
651 				error = g_access(cp, 1, 0, 0);
652 				if (error != 0) {
653 					G_GATE_DEBUG(1, "Unable to access %s.",
654 					    pp->name);
655 					g_detach(cp);
656 				}
657 			}
658 			if (error != 0) {
659 				g_destroy_consumer(cp);
660 				g_topology_unlock();
661 				return (error);
662 			}
663 		}
664 	} else {
665 		cp = sc->sc_readcons;
666 	}
667 
668 	if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
669 		if (cp == NULL) {
670 			G_GATE_DEBUG(1, "No read provider.");
671 			return (EINVAL);
672 		}
673 		pp = sc->sc_provider;
674 		if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
675 			G_GATE_DEBUG(1, "Invalid read offset.");
676 			return (EINVAL);
677 		}
678 		if (pp->mediasize + ggio->gctl_readoffset >
679 		    cp->provider->mediasize) {
680 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
681 			return (EINVAL);
682 		}
683 		sc->sc_readoffset = ggio->gctl_readoffset;
684 	}
685 
686 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
687 		sc->sc_readcons = cp;
688 		g_topology_unlock();
689 	}
690 
691 	return (0);
692 }
693 
694 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
695 	if ((ggio)->gctl_version != G_GATE_VERSION) {			\
696 		printf("Version mismatch %d != %d.\n",			\
697 		    ggio->gctl_version, G_GATE_VERSION);		\
698 		return (EINVAL);					\
699 	}								\
700 } while (0)
701 static int
702 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
703 {
704 	struct g_gate_softc *sc;
705 	struct bio *bp;
706 	int error = 0;
707 
708 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
709 	    flags, td);
710 
711 	switch (cmd) {
712 	case G_GATE_CMD_CREATE:
713 	    {
714 		struct g_gate_ctl_create *ggio = (void *)addr;
715 
716 		G_GATE_CHECK_VERSION(ggio);
717 		error = g_gate_create(ggio);
718 		/*
719 		 * Reset TDP_GEOM flag.
720 		 * There are pending events for sure, because we just created
721 		 * new provider and other classes want to taste it, but we
722 		 * cannot answer on I/O requests until we're here.
723 		 */
724 		td->td_pflags &= ~TDP_GEOM;
725 		return (error);
726 	    }
727 	case G_GATE_CMD_MODIFY:
728 	    {
729 		struct g_gate_ctl_modify *ggio = (void *)addr;
730 
731 		G_GATE_CHECK_VERSION(ggio);
732 		sc = g_gate_hold(ggio->gctl_unit, NULL);
733 		if (sc == NULL)
734 			return (ENXIO);
735 		error = g_gate_modify(sc, ggio);
736 		g_gate_release(sc);
737 		return (error);
738 	    }
739 	case G_GATE_CMD_DESTROY:
740 	    {
741 		struct g_gate_ctl_destroy *ggio = (void *)addr;
742 
743 		G_GATE_CHECK_VERSION(ggio);
744 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
745 		if (sc == NULL)
746 			return (ENXIO);
747 		g_topology_lock();
748 		mtx_lock(&g_gate_units_lock);
749 		error = g_gate_destroy(sc, ggio->gctl_force);
750 		g_topology_unlock();
751 		if (error != 0)
752 			g_gate_release(sc);
753 		return (error);
754 	    }
755 	case G_GATE_CMD_CANCEL:
756 	    {
757 		struct g_gate_ctl_cancel *ggio = (void *)addr;
758 		struct bio *tbp, *lbp;
759 
760 		G_GATE_CHECK_VERSION(ggio);
761 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
762 		if (sc == NULL)
763 			return (ENXIO);
764 		lbp = NULL;
765 		mtx_lock(&sc->sc_queue_mtx);
766 		TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
767 			if (ggio->gctl_seq == 0 ||
768 			    ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
769 				G_GATE_LOGREQ(1, bp, "Request canceled.");
770 				bioq_remove(&sc->sc_outqueue, bp);
771 				/*
772 				 * Be sure to put requests back onto incoming
773 				 * queue in the proper order.
774 				 */
775 				if (lbp == NULL)
776 					bioq_insert_head(&sc->sc_inqueue, bp);
777 				else {
778 					TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
779 					    lbp, bp, bio_queue);
780 				}
781 				lbp = bp;
782 				/*
783 				 * If only one request was canceled, leave now.
784 				 */
785 				if (ggio->gctl_seq != 0)
786 					break;
787 			}
788 		}
789 		if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
790 			ggio->gctl_unit = sc->sc_unit;
791 		mtx_unlock(&sc->sc_queue_mtx);
792 		g_gate_release(sc);
793 		return (error);
794 	    }
795 	case G_GATE_CMD_START:
796 	    {
797 		struct g_gate_ctl_io *ggio = (void *)addr;
798 
799 		G_GATE_CHECK_VERSION(ggio);
800 		sc = g_gate_hold(ggio->gctl_unit, NULL);
801 		if (sc == NULL)
802 			return (ENXIO);
803 		error = 0;
804 		for (;;) {
805 			mtx_lock(&sc->sc_queue_mtx);
806 			bp = bioq_first(&sc->sc_inqueue);
807 			if (bp != NULL)
808 				break;
809 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
810 				ggio->gctl_error = ECANCELED;
811 				mtx_unlock(&sc->sc_queue_mtx);
812 				goto start_end;
813 			}
814 			if (msleep(sc, &sc->sc_queue_mtx,
815 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
816 				ggio->gctl_error = ECANCELED;
817 				goto start_end;
818 			}
819 		}
820 		ggio->gctl_cmd = bp->bio_cmd;
821 		if (bp->bio_cmd == BIO_WRITE &&
822 		    bp->bio_length > ggio->gctl_length) {
823 			mtx_unlock(&sc->sc_queue_mtx);
824 			ggio->gctl_length = bp->bio_length;
825 			ggio->gctl_error = ENOMEM;
826 			goto start_end;
827 		}
828 		bioq_remove(&sc->sc_inqueue, bp);
829 		bioq_insert_tail(&sc->sc_outqueue, bp);
830 		mtx_unlock(&sc->sc_queue_mtx);
831 
832 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
833 		ggio->gctl_offset = bp->bio_offset;
834 		ggio->gctl_length = bp->bio_length;
835 
836 		switch (bp->bio_cmd) {
837 		case BIO_READ:
838 		case BIO_DELETE:
839 		case BIO_FLUSH:
840 			break;
841 		case BIO_WRITE:
842 			error = copyout(bp->bio_data, ggio->gctl_data,
843 			    bp->bio_length);
844 			if (error != 0) {
845 				mtx_lock(&sc->sc_queue_mtx);
846 				bioq_remove(&sc->sc_outqueue, bp);
847 				bioq_insert_head(&sc->sc_inqueue, bp);
848 				mtx_unlock(&sc->sc_queue_mtx);
849 				goto start_end;
850 			}
851 			break;
852 		}
853 start_end:
854 		g_gate_release(sc);
855 		return (error);
856 	    }
857 	case G_GATE_CMD_DONE:
858 	    {
859 		struct g_gate_ctl_io *ggio = (void *)addr;
860 
861 		G_GATE_CHECK_VERSION(ggio);
862 		sc = g_gate_hold(ggio->gctl_unit, NULL);
863 		if (sc == NULL)
864 			return (ENOENT);
865 		error = 0;
866 		mtx_lock(&sc->sc_queue_mtx);
867 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
868 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
869 				break;
870 		}
871 		if (bp != NULL) {
872 			bioq_remove(&sc->sc_outqueue, bp);
873 			sc->sc_queue_count--;
874 		}
875 		mtx_unlock(&sc->sc_queue_mtx);
876 		if (bp == NULL) {
877 			/*
878 			 * Request was probably canceled.
879 			 */
880 			goto done_end;
881 		}
882 		if (ggio->gctl_error == EAGAIN) {
883 			bp->bio_error = 0;
884 			G_GATE_LOGREQ(1, bp, "Request desisted.");
885 			mtx_lock(&sc->sc_queue_mtx);
886 			sc->sc_queue_count++;
887 			bioq_insert_head(&sc->sc_inqueue, bp);
888 			wakeup(sc);
889 			mtx_unlock(&sc->sc_queue_mtx);
890 		} else {
891 			bp->bio_error = ggio->gctl_error;
892 			if (bp->bio_error == 0) {
893 				bp->bio_completed = bp->bio_length;
894 				switch (bp->bio_cmd) {
895 				case BIO_READ:
896 					error = copyin(ggio->gctl_data,
897 					    bp->bio_data, bp->bio_length);
898 					if (error != 0)
899 						bp->bio_error = error;
900 					break;
901 				case BIO_DELETE:
902 				case BIO_WRITE:
903 				case BIO_FLUSH:
904 					break;
905 				}
906 			}
907 			G_GATE_LOGREQ(2, bp, "Request done.");
908 			g_io_deliver(bp, bp->bio_error);
909 		}
910 done_end:
911 		g_gate_release(sc);
912 		return (error);
913 	    }
914 	}
915 	return (ENOIOCTL);
916 }
917 
918 static void
919 g_gate_device(void)
920 {
921 
922 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
923 	    G_GATE_CTL_NAME);
924 }
925 
926 static int
927 g_gate_modevent(module_t mod, int type, void *data)
928 {
929 	int error = 0;
930 
931 	switch (type) {
932 	case MOD_LOAD:
933 		mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
934 		g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
935 		    M_GATE, M_WAITOK | M_ZERO);
936 		g_gate_nunits = 0;
937 		g_gate_device();
938 		break;
939 	case MOD_UNLOAD:
940 		mtx_lock(&g_gate_units_lock);
941 		if (g_gate_nunits > 0) {
942 			mtx_unlock(&g_gate_units_lock);
943 			error = EBUSY;
944 			break;
945 		}
946 		mtx_unlock(&g_gate_units_lock);
947 		mtx_destroy(&g_gate_units_lock);
948 		if (status_dev != 0)
949 			destroy_dev(status_dev);
950 		free(g_gate_units, M_GATE);
951 		break;
952 	default:
953 		return (EOPNOTSUPP);
954 		break;
955 	}
956 
957 	return (error);
958 }
959 static moduledata_t g_gate_module = {
960 	G_GATE_MOD_NAME,
961 	g_gate_modevent,
962 	NULL
963 };
964 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
965 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
966