xref: /freebsd/sys/geom/gate/g_gate.c (revision 315ee00f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5  * Copyright (c) 2009-2010 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Pawel Jakub Dawidek
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <sys/signalvar.h>
51 #include <sys/time.h>
52 #include <machine/atomic.h>
53 
54 #include <geom/geom.h>
55 #include <geom/geom_dbg.h>
56 #include <geom/gate/g_gate.h>
57 
58 FEATURE(geom_gate, "GEOM Gate module");
59 
60 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
61 
62 SYSCTL_DECL(_kern_geom);
63 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
64     "GEOM_GATE configuration");
65 static int g_gate_debug = 0;
66 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
67     "Debug level");
68 static u_int g_gate_maxunits = 256;
69 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
70     &g_gate_maxunits, 0, "Maximum number of ggate devices");
71 
72 struct g_class g_gate_class = {
73 	.name = G_GATE_CLASS_NAME,
74 	.version = G_VERSION,
75 };
76 
77 static struct cdev *status_dev;
78 static d_ioctl_t g_gate_ioctl;
79 static struct cdevsw g_gate_cdevsw = {
80 	.d_version =	D_VERSION,
81 	.d_ioctl =	g_gate_ioctl,
82 	.d_name =	G_GATE_CTL_NAME
83 };
84 
85 static struct g_gate_softc **g_gate_units;
86 static u_int g_gate_nunits;
87 static struct mtx g_gate_units_lock;
88 
89 static void
90 g_gate_detach(void *arg, int flags __unused)
91 {
92 	struct g_consumer *cp = arg;
93 
94 	g_topology_assert();
95 	G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
96 	    cp->provider->name);
97 	(void)g_access(cp, -1, 0, 0);
98 	g_detach(cp);
99 	g_destroy_consumer(cp);
100 }
101 
102 static int
103 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
104 {
105 	struct bio_queue_head queue;
106 	struct g_provider *pp;
107 	struct g_consumer *cp;
108 	struct g_geom *gp;
109 	struct bio *bp;
110 
111 	g_topology_assert();
112 	mtx_assert(&g_gate_units_lock, MA_OWNED);
113 	pp = sc->sc_provider;
114 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
115 		mtx_unlock(&g_gate_units_lock);
116 		return (EBUSY);
117 	}
118 	mtx_unlock(&g_gate_units_lock);
119 	mtx_lock(&sc->sc_queue_mtx);
120 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
121 		sc->sc_flags |= G_GATE_FLAG_DESTROY;
122 	wakeup(sc);
123 	mtx_unlock(&sc->sc_queue_mtx);
124 	gp = pp->geom;
125 	g_wither_provider(pp, ENXIO);
126 	callout_drain(&sc->sc_callout);
127 	bioq_init(&queue);
128 	mtx_lock(&sc->sc_queue_mtx);
129 	while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
130 		sc->sc_queue_count--;
131 		bioq_insert_tail(&queue, bp);
132 	}
133 	while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
134 		sc->sc_queue_count--;
135 		bioq_insert_tail(&queue, bp);
136 	}
137 	mtx_unlock(&sc->sc_queue_mtx);
138 	g_topology_unlock();
139 	while ((bp = bioq_takefirst(&queue)) != NULL) {
140 		G_GATE_LOGREQ(1, bp, "Request canceled.");
141 		g_io_deliver(bp, ENXIO);
142 	}
143 	mtx_lock(&g_gate_units_lock);
144 	/* One reference is ours. */
145 	sc->sc_ref--;
146 	while (sc->sc_ref > 0)
147 		msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
148 	g_gate_units[sc->sc_unit] = NULL;
149 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
150 	g_gate_nunits--;
151 	mtx_unlock(&g_gate_units_lock);
152 	mtx_destroy(&sc->sc_queue_mtx);
153 	mtx_destroy(&sc->sc_read_mtx);
154 	g_topology_lock();
155 	if ((cp = sc->sc_readcons) != NULL) {
156 		sc->sc_readcons = NULL;
157 		(void)g_access(cp, -1, 0, 0);
158 		g_detach(cp);
159 		g_destroy_consumer(cp);
160 	}
161 	G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
162 	gp->softc = NULL;
163 	g_wither_geom(gp, ENXIO);
164 	sc->sc_provider = NULL;
165 	free(sc, M_GATE);
166 	return (0);
167 }
168 
169 static int
170 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
171 {
172 	struct g_gate_softc *sc;
173 
174 	if (dr <= 0 && dw <= 0 && de <= 0)
175 		return (0);
176 	sc = pp->geom->softc;
177 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
178 		return (ENXIO);
179 	/* XXX: Hack to allow read-only mounts. */
180 #if 0
181 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
182 		return (EPERM);
183 #endif
184 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
185 		return (EPERM);
186 	return (0);
187 }
188 
189 static void
190 g_gate_queue_io(struct bio *bp)
191 {
192 	struct g_gate_softc *sc;
193 
194 	sc = bp->bio_to->geom->softc;
195 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
196 		g_io_deliver(bp, ENXIO);
197 		return;
198 	}
199 
200 	mtx_lock(&sc->sc_queue_mtx);
201 
202 	if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
203 		mtx_unlock(&sc->sc_queue_mtx);
204 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
205 		g_io_deliver(bp, ENOMEM);
206 		return;
207 	}
208 
209 	bp->bio_driver1 = (void *)sc->sc_seq;
210 	sc->sc_seq++;
211 	sc->sc_queue_count++;
212 
213 	bioq_insert_tail(&sc->sc_inqueue, bp);
214 	wakeup(sc);
215 
216 	mtx_unlock(&sc->sc_queue_mtx);
217 }
218 
219 static void
220 g_gate_done(struct bio *cbp)
221 {
222 	struct g_gate_softc *sc;
223 	struct bio *pbp;
224 	struct g_consumer *cp;
225 
226 	cp = cbp->bio_from;
227 	pbp = cbp->bio_parent;
228 	if (cbp->bio_error == 0) {
229 		pbp->bio_completed = cbp->bio_completed;
230 		g_destroy_bio(cbp);
231 		pbp->bio_inbed++;
232 		g_io_deliver(pbp, 0);
233 	} else {
234 		/* If direct read failed, pass it through userland daemon. */
235 		g_destroy_bio(cbp);
236 		pbp->bio_children--;
237 		g_gate_queue_io(pbp);
238 	}
239 
240 	sc = cp->geom->softc;
241 	mtx_lock(&sc->sc_read_mtx);
242 	if (--cp->index == 0 && sc->sc_readcons != cp)
243 		g_post_event(g_gate_detach, cp, M_NOWAIT, NULL);
244 	mtx_unlock(&sc->sc_read_mtx);
245 }
246 
247 static void
248 g_gate_start(struct bio *pbp)
249 {
250 	struct g_gate_softc *sc;
251 	struct g_consumer *cp;
252 	struct bio *cbp;
253 
254 	sc = pbp->bio_to->geom->softc;
255 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
256 		g_io_deliver(pbp, ENXIO);
257 		return;
258 	}
259 	G_GATE_LOGREQ(2, pbp, "Request received.");
260 	switch (pbp->bio_cmd) {
261 	case BIO_READ:
262 		if (sc->sc_readcons == NULL)
263 			break;
264 		cbp = g_clone_bio(pbp);
265 		if (cbp == NULL) {
266 			g_io_deliver(pbp, ENOMEM);
267 			return;
268 		}
269 		mtx_lock(&sc->sc_read_mtx);
270 		if ((cp = sc->sc_readcons) == NULL) {
271 			mtx_unlock(&sc->sc_read_mtx);
272 			g_destroy_bio(cbp);
273 			pbp->bio_children--;
274 			break;
275 		}
276 		cp->index++;
277 		cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
278 		mtx_unlock(&sc->sc_read_mtx);
279 		cbp->bio_done = g_gate_done;
280 		g_io_request(cbp, cp);
281 		return;
282 	case BIO_DELETE:
283 	case BIO_WRITE:
284 	case BIO_FLUSH:
285 	case BIO_SPEEDUP:
286 		/* XXX: Hack to allow read-only mounts. */
287 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
288 			g_io_deliver(pbp, EPERM);
289 			return;
290 		}
291 		break;
292 	case BIO_GETATTR:
293 	default:
294 		G_GATE_LOGREQ(2, pbp, "Ignoring request.");
295 		g_io_deliver(pbp, EOPNOTSUPP);
296 		return;
297 	}
298 
299 	g_gate_queue_io(pbp);
300 }
301 
302 static struct g_gate_softc *
303 g_gate_hold(int unit, const char *name)
304 {
305 	struct g_gate_softc *sc = NULL;
306 
307 	mtx_lock(&g_gate_units_lock);
308 	if (unit >= 0 && unit < g_gate_maxunits)
309 		sc = g_gate_units[unit];
310 	else if (unit == G_GATE_NAME_GIVEN) {
311 		KASSERT(name != NULL, ("name is NULL"));
312 		for (unit = 0; unit < g_gate_maxunits; unit++) {
313 			if (g_gate_units[unit] == NULL)
314 				continue;
315 			if (strcmp(name,
316 			    g_gate_units[unit]->sc_provider->name) != 0) {
317 				continue;
318 			}
319 			sc = g_gate_units[unit];
320 			break;
321 		}
322 	}
323 	if (sc != NULL)
324 		sc->sc_ref++;
325 	mtx_unlock(&g_gate_units_lock);
326 	return (sc);
327 }
328 
329 static void
330 g_gate_release(struct g_gate_softc *sc)
331 {
332 
333 	g_topology_assert_not();
334 	mtx_lock(&g_gate_units_lock);
335 	sc->sc_ref--;
336 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
337 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
338 		wakeup(&sc->sc_ref);
339 	mtx_unlock(&g_gate_units_lock);
340 }
341 
342 static int
343 g_gate_getunit(int unit, int *errorp)
344 {
345 
346 	mtx_assert(&g_gate_units_lock, MA_OWNED);
347 	if (unit >= 0) {
348 		if (unit >= g_gate_maxunits)
349 			*errorp = EINVAL;
350 		else if (g_gate_units[unit] == NULL)
351 			return (unit);
352 		else
353 			*errorp = EEXIST;
354 	} else {
355 		for (unit = 0; unit < g_gate_maxunits; unit++) {
356 			if (g_gate_units[unit] == NULL)
357 				return (unit);
358 		}
359 		*errorp = ENFILE;
360 	}
361 	return (-1);
362 }
363 
364 static void
365 g_gate_guard(void *arg)
366 {
367 	struct bio_queue_head queue;
368 	struct g_gate_softc *sc;
369 	struct bintime curtime;
370 	struct bio *bp, *bp2;
371 
372 	sc = arg;
373 	binuptime(&curtime);
374 	g_gate_hold(sc->sc_unit, NULL);
375 	bioq_init(&queue);
376 	mtx_lock(&sc->sc_queue_mtx);
377 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
378 		if (curtime.sec - bp->bio_t0.sec < 5)
379 			continue;
380 		bioq_remove(&sc->sc_inqueue, bp);
381 		sc->sc_queue_count--;
382 		bioq_insert_tail(&queue, bp);
383 	}
384 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
385 		if (curtime.sec - bp->bio_t0.sec < 5)
386 			continue;
387 		bioq_remove(&sc->sc_outqueue, bp);
388 		sc->sc_queue_count--;
389 		bioq_insert_tail(&queue, bp);
390 	}
391 	mtx_unlock(&sc->sc_queue_mtx);
392 	while ((bp = bioq_takefirst(&queue)) != NULL) {
393 		G_GATE_LOGREQ(1, bp, "Request timeout.");
394 		g_io_deliver(bp, EIO);
395 	}
396 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
397 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
398 		    g_gate_guard, sc);
399 	}
400 	g_gate_release(sc);
401 }
402 
403 static void
404 g_gate_orphan(struct g_consumer *cp)
405 {
406 	struct g_gate_softc *sc;
407 	struct g_geom *gp;
408 	int done;
409 
410 	g_topology_assert();
411 	gp = cp->geom;
412 	sc = gp->softc;
413 	mtx_lock(&sc->sc_read_mtx);
414 	if (sc->sc_readcons == cp)
415 		sc->sc_readcons = NULL;
416 	done = (cp->index == 0);
417 	mtx_unlock(&sc->sc_read_mtx);
418 	if (done)
419 		g_gate_detach(cp, 0);
420 }
421 
422 static void
423 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
424     struct g_consumer *cp, struct g_provider *pp)
425 {
426 	struct g_gate_softc *sc;
427 
428 	sc = gp->softc;
429 	if (sc == NULL || pp != NULL || cp != NULL)
430 		return;
431 	sc = g_gate_hold(sc->sc_unit, NULL);
432 	if (sc == NULL)
433 		return;
434 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
435 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
436 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
437 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
438 		    "write-only");
439 	} else {
440 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
441 		    "read-write");
442 	}
443 	if (sc->sc_readcons != NULL) {
444 		sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
445 		    indent, (intmax_t)sc->sc_readoffset);
446 		sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
447 		    indent, sc->sc_readcons->provider->name);
448 	}
449 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
450 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
451 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
452 	    sc->sc_queue_count);
453 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
454 	    sc->sc_queue_size);
455 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
456 	sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
457 	g_topology_unlock();
458 	g_gate_release(sc);
459 	g_topology_lock();
460 }
461 
462 static int
463 g_gate_create(struct g_gate_ctl_create *ggio)
464 {
465 	struct g_gate_softc *sc;
466 	struct g_geom *gp;
467 	struct g_provider *pp, *ropp;
468 	struct g_consumer *cp;
469 	char name[NAME_MAX + 1];
470 	char readprov[NAME_MAX + 1];
471 	int error = 0, unit;
472 
473 	if (ggio->gctl_mediasize <= 0) {
474 		G_GATE_DEBUG(1, "Invalid media size.");
475 		return (EINVAL);
476 	}
477 	if (ggio->gctl_sectorsize <= 0) {
478 		G_GATE_DEBUG(1, "Invalid sector size.");
479 		return (EINVAL);
480 	}
481 	if (!powerof2(ggio->gctl_sectorsize)) {
482 		G_GATE_DEBUG(1, "Invalid sector size.");
483 		return (EINVAL);
484 	}
485 	if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
486 		G_GATE_DEBUG(1, "Invalid media size.");
487 		return (EINVAL);
488 	}
489 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
490 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
491 		G_GATE_DEBUG(1, "Invalid flags.");
492 		return (EINVAL);
493 	}
494 	if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
495 	    ggio->gctl_unit != G_GATE_NAME_GIVEN &&
496 	    ggio->gctl_unit < 0) {
497 		G_GATE_DEBUG(1, "Invalid unit number.");
498 		return (EINVAL);
499 	}
500 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
501 	    ggio->gctl_name[0] == '\0') {
502 		G_GATE_DEBUG(1, "No device name.");
503 		return (EINVAL);
504 	}
505 
506 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
507 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
508 	memset(sc->sc_info, 0, sizeof(sc->sc_info));
509 	strncpy(sc->sc_info, ggio->gctl_info,
510 	    MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info)));
511 	sc->sc_seq = 1;
512 	bioq_init(&sc->sc_inqueue);
513 	bioq_init(&sc->sc_outqueue);
514 	mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
515 	mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF);
516 	sc->sc_queue_count = 0;
517 	sc->sc_queue_size = ggio->gctl_maxcount;
518 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
519 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
520 	sc->sc_timeout = ggio->gctl_timeout;
521 	callout_init(&sc->sc_callout, 1);
522 
523 	mtx_lock(&g_gate_units_lock);
524 	sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
525 	if (sc->sc_unit < 0)
526 		goto fail1;
527 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN) {
528 		memset(name, 0, sizeof(name));
529 		strncpy(name, ggio->gctl_name,
530 		    MIN(sizeof(name) - 1, sizeof(ggio->gctl_name)));
531 	} else {
532 		snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
533 		    sc->sc_unit);
534 	}
535 	/* Check for name collision. */
536 	for (unit = 0; unit < g_gate_maxunits; unit++) {
537 		if (g_gate_units[unit] == NULL)
538 			continue;
539 		if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
540 			continue;
541 		error = EEXIST;
542 		goto fail1;
543 	}
544 	// local stack buffer 'name' assigned here temporarily only.
545 	// the real provider name is assigned below.
546 	sc->sc_name = name;
547 	g_gate_units[sc->sc_unit] = sc;
548 	g_gate_nunits++;
549 	mtx_unlock(&g_gate_units_lock);
550 
551 	g_topology_lock();
552 
553 	if (ggio->gctl_readprov[0] == '\0') {
554 		ropp = NULL;
555 	} else {
556 		memset(readprov, 0, sizeof(readprov));
557 		strncpy(readprov, ggio->gctl_readprov,
558 		    MIN(sizeof(readprov) - 1, sizeof(ggio->gctl_readprov)));
559 		ropp = g_provider_by_name(readprov);
560 		if (ropp == NULL) {
561 			G_GATE_DEBUG(1, "Provider %s doesn't exist.", readprov);
562 			error = EINVAL;
563 			goto fail2;
564 		}
565 		if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
566 			G_GATE_DEBUG(1, "Invalid read offset.");
567 			error = EINVAL;
568 			goto fail2;
569 		}
570 		if (ggio->gctl_mediasize + ggio->gctl_readoffset >
571 		    ropp->mediasize) {
572 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
573 			error = EINVAL;
574 			goto fail2;
575 		}
576 	}
577 
578 	gp = g_new_geomf(&g_gate_class, "%s", name);
579 	gp->start = g_gate_start;
580 	gp->access = g_gate_access;
581 	gp->orphan = g_gate_orphan;
582 	gp->dumpconf = g_gate_dumpconf;
583 	gp->softc = sc;
584 
585 	if (ropp != NULL) {
586 		cp = g_new_consumer(gp);
587 		cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
588 		error = g_attach(cp, ropp);
589 		if (error != 0) {
590 			G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
591 			goto fail3;
592 		}
593 		error = g_access(cp, 1, 0, 0);
594 		if (error != 0) {
595 			G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
596 			g_detach(cp);
597 			goto fail3;
598 		}
599 		sc->sc_readcons = cp;
600 		sc->sc_readoffset = ggio->gctl_readoffset;
601 	}
602 
603 	ggio->gctl_unit = sc->sc_unit;
604 
605 	pp = g_new_providerf(gp, "%s", name);
606 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
607 	pp->mediasize = ggio->gctl_mediasize;
608 	pp->sectorsize = ggio->gctl_sectorsize;
609 	sc->sc_provider = pp;
610 	g_error_provider(pp, 0);
611 
612 	g_topology_unlock();
613 	mtx_lock(&g_gate_units_lock);
614 	sc->sc_name = sc->sc_provider->name;
615 	mtx_unlock(&g_gate_units_lock);
616 	G_GATE_DEBUG(1, "Device %s created.", gp->name);
617 
618 	if (sc->sc_timeout > 0) {
619 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
620 		    g_gate_guard, sc);
621 	}
622 	return (0);
623 fail3:
624 	g_destroy_consumer(cp);
625 	g_destroy_geom(gp);
626 fail2:
627 	g_topology_unlock();
628 	mtx_lock(&g_gate_units_lock);
629 	g_gate_units[sc->sc_unit] = NULL;
630 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
631 	g_gate_nunits--;
632 fail1:
633 	mtx_unlock(&g_gate_units_lock);
634 	mtx_destroy(&sc->sc_queue_mtx);
635 	mtx_destroy(&sc->sc_read_mtx);
636 	free(sc, M_GATE);
637 	return (error);
638 }
639 
640 static int
641 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
642 {
643 	char readprov[NAME_MAX + 1];
644 	struct g_provider *pp;
645 	struct g_consumer *cp;
646 	int done, error;
647 
648 	if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
649 		if (ggio->gctl_mediasize <= 0) {
650 			G_GATE_DEBUG(1, "Invalid media size.");
651 			return (EINVAL);
652 		}
653 		pp = sc->sc_provider;
654 		if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
655 			G_GATE_DEBUG(1, "Invalid media size.");
656 			return (EINVAL);
657 		}
658 		g_resize_provider(pp, ggio->gctl_mediasize);
659 		return (0);
660 	}
661 
662 	if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0) {
663 		memset(sc->sc_info, 0, sizeof(sc->sc_info));
664 		strncpy(sc->sc_info, ggio->gctl_info,
665 		    MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info)));
666 	}
667 	cp = NULL;
668 
669 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
670 		g_topology_lock();
671 		mtx_lock(&sc->sc_read_mtx);
672 		if ((cp = sc->sc_readcons) != NULL) {
673 			sc->sc_readcons = NULL;
674 			done = (cp->index == 0);
675 			mtx_unlock(&sc->sc_read_mtx);
676 			if (done)
677 				g_gate_detach(cp, 0);
678 		} else
679 			mtx_unlock(&sc->sc_read_mtx);
680 		if (ggio->gctl_readprov[0] != '\0') {
681 			memset(readprov, 0, sizeof(readprov));
682 			strncpy(readprov, ggio->gctl_readprov,
683 			    MIN(sizeof(readprov) - 1,
684 			    sizeof(ggio->gctl_readprov)));
685 			pp = g_provider_by_name(readprov);
686 			if (pp == NULL) {
687 				g_topology_unlock();
688 				G_GATE_DEBUG(1, "Provider %s doesn't exist.",
689 				    readprov);
690 				return (EINVAL);
691 			}
692 			cp = g_new_consumer(sc->sc_provider->geom);
693 			cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
694 			error = g_attach(cp, pp);
695 			if (error != 0) {
696 				G_GATE_DEBUG(1, "Unable to attach to %s.",
697 				    pp->name);
698 			} else {
699 				error = g_access(cp, 1, 0, 0);
700 				if (error != 0) {
701 					G_GATE_DEBUG(1, "Unable to access %s.",
702 					    pp->name);
703 					g_detach(cp);
704 				}
705 			}
706 			if (error != 0) {
707 				g_destroy_consumer(cp);
708 				g_topology_unlock();
709 				return (error);
710 			}
711 		}
712 	} else {
713 		cp = sc->sc_readcons;
714 	}
715 
716 	if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
717 		if (cp == NULL) {
718 			G_GATE_DEBUG(1, "No read provider.");
719 			return (EINVAL);
720 		}
721 		pp = sc->sc_provider;
722 		if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
723 			G_GATE_DEBUG(1, "Invalid read offset.");
724 			return (EINVAL);
725 		}
726 		if (pp->mediasize + ggio->gctl_readoffset >
727 		    cp->provider->mediasize) {
728 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
729 			return (EINVAL);
730 		}
731 		sc->sc_readoffset = ggio->gctl_readoffset;
732 	}
733 
734 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
735 		sc->sc_readcons = cp;
736 		g_topology_unlock();
737 	}
738 
739 	return (0);
740 }
741 
742 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
743 	if ((ggio)->gctl_version != G_GATE_VERSION) {			\
744 		printf("Version mismatch %d != %d.\n",			\
745 		    ggio->gctl_version, G_GATE_VERSION);		\
746 		return (EINVAL);					\
747 	}								\
748 } while (0)
749 static int
750 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
751 {
752 	struct g_gate_softc *sc;
753 	struct bio *bp;
754 	int error = 0;
755 
756 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
757 	    flags, td);
758 
759 	switch (cmd) {
760 	case G_GATE_CMD_CREATE:
761 	    {
762 		struct g_gate_ctl_create *ggio = (void *)addr;
763 
764 		G_GATE_CHECK_VERSION(ggio);
765 		error = g_gate_create(ggio);
766 		/*
767 		 * Reset TDP_GEOM flag.
768 		 * There are pending events for sure, because we just created
769 		 * new provider and other classes want to taste it, but we
770 		 * cannot answer on I/O requests until we're here.
771 		 */
772 		td->td_pflags &= ~TDP_GEOM;
773 		return (error);
774 	    }
775 	case G_GATE_CMD_MODIFY:
776 	    {
777 		struct g_gate_ctl_modify *ggio = (void *)addr;
778 
779 		G_GATE_CHECK_VERSION(ggio);
780 		sc = g_gate_hold(ggio->gctl_unit, NULL);
781 		if (sc == NULL)
782 			return (ENXIO);
783 		error = g_gate_modify(sc, ggio);
784 		g_gate_release(sc);
785 		return (error);
786 	    }
787 	case G_GATE_CMD_DESTROY:
788 	    {
789 		struct g_gate_ctl_destroy *ggio = (void *)addr;
790 
791 		G_GATE_CHECK_VERSION(ggio);
792 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
793 		if (sc == NULL)
794 			return (ENXIO);
795 		g_topology_lock();
796 		mtx_lock(&g_gate_units_lock);
797 		error = g_gate_destroy(sc, ggio->gctl_force);
798 		g_topology_unlock();
799 		if (error != 0)
800 			g_gate_release(sc);
801 		return (error);
802 	    }
803 	case G_GATE_CMD_CANCEL:
804 	    {
805 		struct g_gate_ctl_cancel *ggio = (void *)addr;
806 		struct bio *tbp, *lbp;
807 
808 		G_GATE_CHECK_VERSION(ggio);
809 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
810 		if (sc == NULL)
811 			return (ENXIO);
812 		lbp = NULL;
813 		mtx_lock(&sc->sc_queue_mtx);
814 		TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
815 			if (ggio->gctl_seq == 0 ||
816 			    ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
817 				G_GATE_LOGREQ(1, bp, "Request canceled.");
818 				bioq_remove(&sc->sc_outqueue, bp);
819 				/*
820 				 * Be sure to put requests back onto incoming
821 				 * queue in the proper order.
822 				 */
823 				if (lbp == NULL)
824 					bioq_insert_head(&sc->sc_inqueue, bp);
825 				else {
826 					TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
827 					    lbp, bp, bio_queue);
828 				}
829 				lbp = bp;
830 				/*
831 				 * If only one request was canceled, leave now.
832 				 */
833 				if (ggio->gctl_seq != 0)
834 					break;
835 			}
836 		}
837 		if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
838 			ggio->gctl_unit = sc->sc_unit;
839 		mtx_unlock(&sc->sc_queue_mtx);
840 		g_gate_release(sc);
841 		return (error);
842 	    }
843 	case G_GATE_CMD_START:
844 	    {
845 		struct g_gate_ctl_io *ggio = (void *)addr;
846 
847 		G_GATE_CHECK_VERSION(ggio);
848 		sc = g_gate_hold(ggio->gctl_unit, NULL);
849 		if (sc == NULL)
850 			return (ENXIO);
851 		error = 0;
852 		for (;;) {
853 			mtx_lock(&sc->sc_queue_mtx);
854 			bp = bioq_first(&sc->sc_inqueue);
855 			if (bp != NULL)
856 				break;
857 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
858 				ggio->gctl_error = ECANCELED;
859 				mtx_unlock(&sc->sc_queue_mtx);
860 				goto start_end;
861 			}
862 			error = msleep(sc, &sc->sc_queue_mtx,
863 				PPAUSE | PDROP | PCATCH, "ggwait", 0);
864 			if (error != 0)
865 				goto start_end;
866 		}
867 		ggio->gctl_cmd = bp->bio_cmd;
868 		if (bp->bio_cmd == BIO_WRITE &&
869 		    bp->bio_length > ggio->gctl_length) {
870 			mtx_unlock(&sc->sc_queue_mtx);
871 			ggio->gctl_length = bp->bio_length;
872 			ggio->gctl_error = ENOMEM;
873 			goto start_end;
874 		}
875 		bioq_remove(&sc->sc_inqueue, bp);
876 		bioq_insert_tail(&sc->sc_outqueue, bp);
877 		mtx_unlock(&sc->sc_queue_mtx);
878 
879 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
880 		ggio->gctl_offset = bp->bio_offset;
881 		ggio->gctl_length = bp->bio_length;
882 
883 		switch (bp->bio_cmd) {
884 		case BIO_READ:
885 		case BIO_DELETE:
886 		case BIO_FLUSH:
887 		case BIO_SPEEDUP:
888 			break;
889 		case BIO_WRITE:
890 			error = copyout(bp->bio_data, ggio->gctl_data,
891 			    bp->bio_length);
892 			if (error != 0) {
893 				mtx_lock(&sc->sc_queue_mtx);
894 				bioq_remove(&sc->sc_outqueue, bp);
895 				bioq_insert_head(&sc->sc_inqueue, bp);
896 				mtx_unlock(&sc->sc_queue_mtx);
897 				goto start_end;
898 			}
899 			break;
900 		}
901 start_end:
902 		g_gate_release(sc);
903 		return (error);
904 	    }
905 	case G_GATE_CMD_DONE:
906 	    {
907 		struct g_gate_ctl_io *ggio = (void *)addr;
908 
909 		G_GATE_CHECK_VERSION(ggio);
910 		sc = g_gate_hold(ggio->gctl_unit, NULL);
911 		if (sc == NULL)
912 			return (ENOENT);
913 		error = 0;
914 		mtx_lock(&sc->sc_queue_mtx);
915 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
916 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
917 				break;
918 		}
919 		if (bp != NULL) {
920 			bioq_remove(&sc->sc_outqueue, bp);
921 			sc->sc_queue_count--;
922 		}
923 		mtx_unlock(&sc->sc_queue_mtx);
924 		if (bp == NULL) {
925 			/*
926 			 * Request was probably canceled.
927 			 */
928 			goto done_end;
929 		}
930 		if (ggio->gctl_error == EAGAIN) {
931 			bp->bio_error = 0;
932 			G_GATE_LOGREQ(1, bp, "Request desisted.");
933 			mtx_lock(&sc->sc_queue_mtx);
934 			sc->sc_queue_count++;
935 			bioq_insert_head(&sc->sc_inqueue, bp);
936 			wakeup(sc);
937 			mtx_unlock(&sc->sc_queue_mtx);
938 		} else {
939 			bp->bio_error = ggio->gctl_error;
940 			if (bp->bio_error == 0) {
941 				bp->bio_completed = bp->bio_length;
942 				switch (bp->bio_cmd) {
943 				case BIO_READ:
944 					error = copyin(ggio->gctl_data,
945 					    bp->bio_data, bp->bio_length);
946 					if (error != 0)
947 						bp->bio_error = error;
948 					break;
949 				case BIO_DELETE:
950 				case BIO_WRITE:
951 				case BIO_FLUSH:
952 				case BIO_SPEEDUP:
953 					break;
954 				}
955 			}
956 			G_GATE_LOGREQ(2, bp, "Request done.");
957 			g_io_deliver(bp, bp->bio_error);
958 		}
959 done_end:
960 		g_gate_release(sc);
961 		return (error);
962 	    }
963 	}
964 	return (ENOIOCTL);
965 }
966 
967 static void
968 g_gate_device(void)
969 {
970 
971 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
972 	    G_GATE_CTL_NAME);
973 }
974 
975 static int
976 g_gate_modevent(module_t mod, int type, void *data)
977 {
978 	int error = 0;
979 
980 	switch (type) {
981 	case MOD_LOAD:
982 		mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
983 		g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
984 		    M_GATE, M_WAITOK | M_ZERO);
985 		g_gate_nunits = 0;
986 		g_gate_device();
987 		break;
988 	case MOD_UNLOAD:
989 		mtx_lock(&g_gate_units_lock);
990 		if (g_gate_nunits > 0) {
991 			mtx_unlock(&g_gate_units_lock);
992 			error = EBUSY;
993 			break;
994 		}
995 		mtx_unlock(&g_gate_units_lock);
996 		mtx_destroy(&g_gate_units_lock);
997 		if (status_dev != NULL)
998 			destroy_dev(status_dev);
999 		free(g_gate_units, M_GATE);
1000 		break;
1001 	default:
1002 		return (EOPNOTSUPP);
1003 		break;
1004 	}
1005 
1006 	return (error);
1007 }
1008 static moduledata_t g_gate_module = {
1009 	G_GATE_MOD_NAME,
1010 	g_gate_modevent,
1011 	NULL
1012 };
1013 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1014 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
1015 MODULE_VERSION(geom_gate, 0);
1016