173679edcSLukas Ertl /*- 273679edcSLukas Ertl * Copyright (c) 2004 Lukas Ertl 373679edcSLukas Ertl * All rights reserved. 473679edcSLukas Ertl * 573679edcSLukas Ertl * Redistribution and use in source and binary forms, with or without 673679edcSLukas Ertl * modification, are permitted provided that the following conditions 773679edcSLukas Ertl * are met: 873679edcSLukas Ertl * 1. Redistributions of source code must retain the above copyright 973679edcSLukas Ertl * notice, this list of conditions and the following disclaimer. 1073679edcSLukas Ertl * 2. Redistributions in binary form must reproduce the above copyright 1173679edcSLukas Ertl * notice, this list of conditions and the following disclaimer in the 1273679edcSLukas Ertl * documentation and/or other materials provided with the distribution. 1373679edcSLukas Ertl * 1473679edcSLukas Ertl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1573679edcSLukas Ertl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1673679edcSLukas Ertl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1773679edcSLukas Ertl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1873679edcSLukas Ertl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1973679edcSLukas Ertl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2073679edcSLukas Ertl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2173679edcSLukas Ertl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2273679edcSLukas Ertl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2373679edcSLukas Ertl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2473679edcSLukas Ertl * SUCH DAMAGE. 2573679edcSLukas Ertl */ 2673679edcSLukas Ertl 2773679edcSLukas Ertl #include <sys/cdefs.h> 2873679edcSLukas Ertl __FBSDID("$FreeBSD$"); 2973679edcSLukas Ertl 3073679edcSLukas Ertl #include <sys/param.h> 3173679edcSLukas Ertl #include <sys/bio.h> 3273679edcSLukas Ertl #include <sys/conf.h> 3373679edcSLukas Ertl #include <sys/kernel.h> 3467e3ab6eSLukas Ertl #include <sys/kthread.h> 3573679edcSLukas Ertl #include <sys/libkern.h> 3673679edcSLukas Ertl #include <sys/lock.h> 3773679edcSLukas Ertl #include <sys/malloc.h> 3873679edcSLukas Ertl #include <sys/module.h> 3973679edcSLukas Ertl #include <sys/mutex.h> 4073679edcSLukas Ertl #include <sys/systm.h> 4173679edcSLukas Ertl 4273679edcSLukas Ertl #include <geom/geom.h> 4373679edcSLukas Ertl #include <geom/vinum/geom_vinum_var.h> 4473679edcSLukas Ertl #include <geom/vinum/geom_vinum.h> 4573679edcSLukas Ertl 4667e3ab6eSLukas Ertl static void gv_vol_completed_request(struct gv_volume *, struct bio *); 4767e3ab6eSLukas Ertl static void gv_vol_normal_request(struct gv_volume *, struct bio *); 4867e3ab6eSLukas Ertl 4973679edcSLukas Ertl static void 5073679edcSLukas Ertl gv_volume_orphan(struct g_consumer *cp) 5173679edcSLukas Ertl { 5273679edcSLukas Ertl struct g_geom *gp; 5399b536d8SLukas Ertl struct gv_volume *v; 5473679edcSLukas Ertl int error; 5573679edcSLukas Ertl 5673679edcSLukas Ertl g_topology_assert(); 5773679edcSLukas Ertl gp = cp->geom; 5873679edcSLukas Ertl g_trace(G_T_TOPOLOGY, "gv_volume_orphan(%s)", gp->name); 5973679edcSLukas Ertl if (cp->acr != 0 || cp->acw != 0 || cp->ace != 0) 6073679edcSLukas Ertl g_access(cp, -cp->acr, -cp->acw, -cp->ace); 6173679edcSLukas Ertl error = cp->provider->error; 6273679edcSLukas Ertl if (error == 0) 6373679edcSLukas Ertl error = ENXIO; 6473679edcSLukas Ertl g_detach(cp); 6573679edcSLukas Ertl g_destroy_consumer(cp); 6673679edcSLukas Ertl if (!LIST_EMPTY(&gp->consumer)) 6773679edcSLukas Ertl return; 6899b536d8SLukas Ertl v = gp->softc; 6967e3ab6eSLukas Ertl if (v != NULL) { 7067e3ab6eSLukas Ertl gv_kill_vol_thread(v); 7199b536d8SLukas Ertl v->geom = NULL; 7267e3ab6eSLukas Ertl } 7399b536d8SLukas Ertl gp->softc = NULL; 7473679edcSLukas Ertl g_wither_geom(gp, error); 7573679edcSLukas Ertl } 7673679edcSLukas Ertl 7773679edcSLukas Ertl /* We end up here after the requests to our plexes are done. */ 7873679edcSLukas Ertl static void 7973679edcSLukas Ertl gv_volume_done(struct bio *bp) 8073679edcSLukas Ertl { 8167e3ab6eSLukas Ertl struct gv_volume *v; 8267e3ab6eSLukas Ertl struct gv_bioq *bq; 8373679edcSLukas Ertl 8467e3ab6eSLukas Ertl v = bp->bio_from->geom->softc; 8567e3ab6eSLukas Ertl bp->bio_cflags |= GV_BIO_DONE; 8667e3ab6eSLukas Ertl bq = g_malloc(sizeof(*bq), M_NOWAIT | M_ZERO); 8767e3ab6eSLukas Ertl bq->bp = bp; 8867e3ab6eSLukas Ertl mtx_lock(&v->bqueue_mtx); 8967e3ab6eSLukas Ertl TAILQ_INSERT_TAIL(&v->bqueue, bq, queue); 9067e3ab6eSLukas Ertl wakeup(v); 9167e3ab6eSLukas Ertl mtx_unlock(&v->bqueue_mtx); 9273679edcSLukas Ertl } 9373679edcSLukas Ertl 9473679edcSLukas Ertl static void 9573679edcSLukas Ertl gv_volume_start(struct bio *bp) 9673679edcSLukas Ertl { 9773679edcSLukas Ertl struct gv_volume *v; 9867e3ab6eSLukas Ertl struct gv_bioq *bq; 9973679edcSLukas Ertl 10073679edcSLukas Ertl switch(bp->bio_cmd) { 10173679edcSLukas Ertl case BIO_READ: 10273679edcSLukas Ertl case BIO_WRITE: 10373679edcSLukas Ertl case BIO_DELETE: 10467e3ab6eSLukas Ertl break; 10567e3ab6eSLukas Ertl case BIO_GETATTR: 10673679edcSLukas Ertl default: 10773679edcSLukas Ertl g_io_deliver(bp, EOPNOTSUPP); 10873679edcSLukas Ertl return; 10973679edcSLukas Ertl } 11067e3ab6eSLukas Ertl 11167e3ab6eSLukas Ertl v = bp->bio_to->geom->softc; 11267e3ab6eSLukas Ertl if (v->state != GV_VOL_UP) { 11367e3ab6eSLukas Ertl g_io_deliver(bp, ENXIO); 11467e3ab6eSLukas Ertl return; 11567e3ab6eSLukas Ertl } 11667e3ab6eSLukas Ertl 11767e3ab6eSLukas Ertl bq = g_malloc(sizeof(*bq), M_NOWAIT | M_ZERO); 11867e3ab6eSLukas Ertl bq->bp = bp; 11967e3ab6eSLukas Ertl mtx_lock(&v->bqueue_mtx); 12067e3ab6eSLukas Ertl TAILQ_INSERT_TAIL(&v->bqueue, bq, queue); 12167e3ab6eSLukas Ertl wakeup(v); 12267e3ab6eSLukas Ertl mtx_unlock(&v->bqueue_mtx); 12367e3ab6eSLukas Ertl } 12467e3ab6eSLukas Ertl 12567e3ab6eSLukas Ertl static void 12667e3ab6eSLukas Ertl gv_vol_worker(void *arg) 12767e3ab6eSLukas Ertl { 12867e3ab6eSLukas Ertl struct bio *bp; 12967e3ab6eSLukas Ertl struct gv_volume *v; 13067e3ab6eSLukas Ertl struct gv_bioq *bq; 13167e3ab6eSLukas Ertl 13267e3ab6eSLukas Ertl v = arg; 13367e3ab6eSLukas Ertl KASSERT(v != NULL, ("NULL v")); 13467e3ab6eSLukas Ertl mtx_lock(&v->bqueue_mtx); 13567e3ab6eSLukas Ertl for (;;) { 13667e3ab6eSLukas Ertl /* We were signaled to exit. */ 13767e3ab6eSLukas Ertl if (v->flags & GV_VOL_THREAD_DIE) 13867e3ab6eSLukas Ertl break; 13967e3ab6eSLukas Ertl 14067e3ab6eSLukas Ertl /* Take the first BIO from our queue. */ 14167e3ab6eSLukas Ertl bq = TAILQ_FIRST(&v->bqueue); 14267e3ab6eSLukas Ertl if (bq == NULL) { 14367e3ab6eSLukas Ertl msleep(v, &v->bqueue_mtx, PRIBIO, "-", hz/10); 14467e3ab6eSLukas Ertl continue; 14567e3ab6eSLukas Ertl } 14667e3ab6eSLukas Ertl TAILQ_REMOVE(&v->bqueue, bq, queue); 14767e3ab6eSLukas Ertl mtx_unlock(&v->bqueue_mtx); 14867e3ab6eSLukas Ertl 14967e3ab6eSLukas Ertl bp = bq->bp; 15067e3ab6eSLukas Ertl g_free(bq); 15167e3ab6eSLukas Ertl 15267e3ab6eSLukas Ertl if (bp->bio_cflags & GV_BIO_DONE) 15367e3ab6eSLukas Ertl gv_vol_completed_request(v, bp); 15467e3ab6eSLukas Ertl else 15567e3ab6eSLukas Ertl gv_vol_normal_request(v, bp); 15667e3ab6eSLukas Ertl 15767e3ab6eSLukas Ertl mtx_lock(&v->bqueue_mtx); 15867e3ab6eSLukas Ertl } 15967e3ab6eSLukas Ertl mtx_unlock(&v->bqueue_mtx); 16067e3ab6eSLukas Ertl v->flags |= GV_VOL_THREAD_DEAD; 16167e3ab6eSLukas Ertl wakeup(v); 16267e3ab6eSLukas Ertl 16367e3ab6eSLukas Ertl kthread_exit(ENXIO); 16467e3ab6eSLukas Ertl } 16567e3ab6eSLukas Ertl 16667e3ab6eSLukas Ertl static void 16767e3ab6eSLukas Ertl gv_vol_completed_request(struct gv_volume *v, struct bio *bp) 16867e3ab6eSLukas Ertl { 16967e3ab6eSLukas Ertl struct bio *pbp; 170d8688e11SLukas Ertl struct g_geom *gp; 171d8688e11SLukas Ertl struct g_consumer *cp, *cp2; 17267e3ab6eSLukas Ertl struct gv_bioq *bq; 17367e3ab6eSLukas Ertl 17467e3ab6eSLukas Ertl pbp = bp->bio_parent; 17567e3ab6eSLukas Ertl 17667e3ab6eSLukas Ertl if (pbp->bio_error == 0) 17767e3ab6eSLukas Ertl pbp->bio_error = bp->bio_error; 17867e3ab6eSLukas Ertl 17967e3ab6eSLukas Ertl switch (pbp->bio_cmd) { 18067e3ab6eSLukas Ertl case BIO_READ: 181d8688e11SLukas Ertl if (bp->bio_error == 0) 182d8688e11SLukas Ertl break; 183d8688e11SLukas Ertl 184d8688e11SLukas Ertl if (pbp->bio_cflags & GV_BIO_RETRY) 185d8688e11SLukas Ertl break; 186d8688e11SLukas Ertl 187d8688e11SLukas Ertl /* Check if we have another plex left. */ 188d8688e11SLukas Ertl cp = bp->bio_from; 189d8688e11SLukas Ertl gp = cp->geom; 190d8688e11SLukas Ertl cp2 = LIST_NEXT(cp, consumer); 191d8688e11SLukas Ertl if (cp2 == NULL) 192d8688e11SLukas Ertl break; 193d8688e11SLukas Ertl 194d8688e11SLukas Ertl if (LIST_NEXT(cp2, consumer) == NULL) 195d8688e11SLukas Ertl pbp->bio_cflags |= GV_BIO_RETRY; 196d8688e11SLukas Ertl 19767e3ab6eSLukas Ertl g_destroy_bio(bp); 19867e3ab6eSLukas Ertl pbp->bio_children--; 19967e3ab6eSLukas Ertl bq = g_malloc(sizeof(*bq), M_WAITOK | M_ZERO); 20067e3ab6eSLukas Ertl bq->bp = pbp; 20167e3ab6eSLukas Ertl mtx_lock(&v->bqueue_mtx); 20267e3ab6eSLukas Ertl TAILQ_INSERT_TAIL(&v->bqueue, bq, queue); 20367e3ab6eSLukas Ertl mtx_unlock(&v->bqueue_mtx); 20467e3ab6eSLukas Ertl return; 205d8688e11SLukas Ertl 20667e3ab6eSLukas Ertl case BIO_WRITE: 20767e3ab6eSLukas Ertl case BIO_DELETE: 208d8688e11SLukas Ertl /* Remember if this write request succeeded. */ 209d8688e11SLukas Ertl if (bp->bio_error == 0) 210d8688e11SLukas Ertl pbp->bio_cflags |= GV_BIO_SUCCEED; 21167e3ab6eSLukas Ertl break; 21267e3ab6eSLukas Ertl } 21367e3ab6eSLukas Ertl 21467e3ab6eSLukas Ertl /* When the original request is finished, we deliver it. */ 21567e3ab6eSLukas Ertl pbp->bio_inbed++; 21667e3ab6eSLukas Ertl if (pbp->bio_inbed == pbp->bio_children) { 217d8688e11SLukas Ertl if (pbp->bio_cflags & GV_BIO_SUCCEED) 218d8688e11SLukas Ertl pbp->bio_error = 0; 21967e3ab6eSLukas Ertl pbp->bio_completed = bp->bio_length; 22067e3ab6eSLukas Ertl g_io_deliver(pbp, pbp->bio_error); 22167e3ab6eSLukas Ertl } 22267e3ab6eSLukas Ertl 22367e3ab6eSLukas Ertl g_destroy_bio(bp); 22467e3ab6eSLukas Ertl } 22567e3ab6eSLukas Ertl 22667e3ab6eSLukas Ertl static void 22767e3ab6eSLukas Ertl gv_vol_normal_request(struct gv_volume *v, struct bio *bp) 22867e3ab6eSLukas Ertl { 2297ad68986SLukas Ertl struct bio_queue_head queue; 23067e3ab6eSLukas Ertl struct g_geom *gp; 2317ad68986SLukas Ertl struct gv_plex *p, *lp; 2327ad68986SLukas Ertl struct bio *cbp; 23367e3ab6eSLukas Ertl 23467e3ab6eSLukas Ertl gp = v->geom; 23567e3ab6eSLukas Ertl 23667e3ab6eSLukas Ertl switch (bp->bio_cmd) { 23767e3ab6eSLukas Ertl case BIO_READ: 23867e3ab6eSLukas Ertl cbp = g_clone_bio(bp); 23967e3ab6eSLukas Ertl if (cbp == NULL) { 24067e3ab6eSLukas Ertl g_io_deliver(bp, ENOMEM); 24167e3ab6eSLukas Ertl return; 24267e3ab6eSLukas Ertl } 24367e3ab6eSLukas Ertl cbp->bio_done = gv_volume_done; 244d8688e11SLukas Ertl /* 245d8688e11SLukas Ertl * Try to find a good plex where we can send the request to. 246d8688e11SLukas Ertl * The plex either has to be up, or it's a degraded RAID5 plex. 247d8688e11SLukas Ertl */ 2487ad68986SLukas Ertl lp = v->last_read_plex; 2497ad68986SLukas Ertl if (lp == NULL) 2507ad68986SLukas Ertl lp = LIST_FIRST(&v->plexes); 2517ad68986SLukas Ertl p = LIST_NEXT(lp, in_volume); 2527ad68986SLukas Ertl do { 2537ad68986SLukas Ertl if (p == NULL) 2547ad68986SLukas Ertl p = LIST_FIRST(&v->plexes); 255d8688e11SLukas Ertl if ((p->state > GV_PLEX_DEGRADED) || 256d8688e11SLukas Ertl (p->state >= GV_PLEX_DEGRADED && 257d8688e11SLukas Ertl p->org == GV_PLEX_RAID5)) 25867e3ab6eSLukas Ertl break; 2597ad68986SLukas Ertl p = LIST_NEXT(p, in_volume); 2607ad68986SLukas Ertl } while (p != lp); 2617ad68986SLukas Ertl 2627ad68986SLukas Ertl if (p == NULL || 2637ad68986SLukas Ertl (p->org == GV_PLEX_RAID5 && p->state < GV_PLEX_DEGRADED) || 2647ad68986SLukas Ertl (p->state <= GV_PLEX_DEGRADED)) { 265d8688e11SLukas Ertl g_destroy_bio(cbp); 266d8688e11SLukas Ertl bp->bio_children--; 267d8688e11SLukas Ertl g_io_deliver(bp, ENXIO); 268d8688e11SLukas Ertl return; 269d8688e11SLukas Ertl } 27067e3ab6eSLukas Ertl g_io_request(cbp, p->consumer); 2717ad68986SLukas Ertl v->last_read_plex = p; 27267e3ab6eSLukas Ertl 27367e3ab6eSLukas Ertl break; 27467e3ab6eSLukas Ertl 27567e3ab6eSLukas Ertl case BIO_WRITE: 27667e3ab6eSLukas Ertl case BIO_DELETE: 2777ad68986SLukas Ertl bioq_init(&queue); 27867e3ab6eSLukas Ertl LIST_FOREACH(p, &v->plexes, in_volume) { 27967e3ab6eSLukas Ertl if (p->state < GV_PLEX_DEGRADED) 28067e3ab6eSLukas Ertl continue; 28167e3ab6eSLukas Ertl cbp = g_clone_bio(bp); 2827ad68986SLukas Ertl if (cbp == NULL) { 2837ad68986SLukas Ertl for (cbp = bioq_first(&queue); cbp != NULL; 2847ad68986SLukas Ertl cbp = bioq_first(&queue)) { 2857ad68986SLukas Ertl bioq_remove(&queue, cbp); 2867ad68986SLukas Ertl g_destroy_bio(cbp); 2877ad68986SLukas Ertl } 2887ad68986SLukas Ertl if (bp->bio_error == 0) 2897ad68986SLukas Ertl bp->bio_error = ENOMEM; 2907ad68986SLukas Ertl g_io_deliver(bp, bp->bio_error); 2917ad68986SLukas Ertl return; 2927ad68986SLukas Ertl } 2937ad68986SLukas Ertl bioq_insert_tail(&queue, cbp); 29467e3ab6eSLukas Ertl cbp->bio_done = gv_volume_done; 2957ad68986SLukas Ertl cbp->bio_caller1 = p->consumer; 29667e3ab6eSLukas Ertl } 29767e3ab6eSLukas Ertl /* Fire off all sub-requests. */ 2987ad68986SLukas Ertl for (cbp = bioq_first(&queue); cbp != NULL; 2997ad68986SLukas Ertl cbp = bioq_first(&queue)) { 3007ad68986SLukas Ertl bioq_remove(&queue, cbp); 3017ad68986SLukas Ertl g_io_request(cbp, cbp->bio_caller1); 30267e3ab6eSLukas Ertl } 30367e3ab6eSLukas Ertl break; 30467e3ab6eSLukas Ertl } 30573679edcSLukas Ertl } 30673679edcSLukas Ertl 30773679edcSLukas Ertl static int 30873679edcSLukas Ertl gv_volume_access(struct g_provider *pp, int dr, int dw, int de) 30973679edcSLukas Ertl { 31073679edcSLukas Ertl struct g_geom *gp; 31173679edcSLukas Ertl struct g_consumer *cp, *cp2; 31273679edcSLukas Ertl int error; 31373679edcSLukas Ertl 31473679edcSLukas Ertl gp = pp->geom; 31573679edcSLukas Ertl 31673679edcSLukas Ertl error = ENXIO; 31773679edcSLukas Ertl LIST_FOREACH(cp, &gp->consumer, consumer) { 31873679edcSLukas Ertl error = g_access(cp, dr, dw, de); 31973679edcSLukas Ertl if (error) { 32073679edcSLukas Ertl LIST_FOREACH(cp2, &gp->consumer, consumer) { 32173679edcSLukas Ertl if (cp == cp2) 32273679edcSLukas Ertl break; 32373679edcSLukas Ertl g_access(cp2, -dr, -dw, -de); 32473679edcSLukas Ertl } 32573679edcSLukas Ertl return (error); 32673679edcSLukas Ertl } 32773679edcSLukas Ertl } 32873679edcSLukas Ertl return (error); 32973679edcSLukas Ertl } 33073679edcSLukas Ertl 33173679edcSLukas Ertl static struct g_geom * 33273679edcSLukas Ertl gv_volume_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 33373679edcSLukas Ertl { 33473679edcSLukas Ertl struct g_geom *gp; 33573679edcSLukas Ertl struct g_provider *pp2; 3364328802cSLukas Ertl struct g_consumer *cp, *ocp; 33773679edcSLukas Ertl struct gv_softc *sc; 33873679edcSLukas Ertl struct gv_volume *v; 33973679edcSLukas Ertl struct gv_plex *p; 3404328802cSLukas Ertl int error, first; 34173679edcSLukas Ertl 34273679edcSLukas Ertl g_trace(G_T_TOPOLOGY, "gv_volume_taste(%s, %s)", mp->name, pp->name); 34373679edcSLukas Ertl g_topology_assert(); 34473679edcSLukas Ertl 34573679edcSLukas Ertl /* First, find the VINUM class and its associated geom. */ 34673679edcSLukas Ertl gp = find_vinum_geom(); 34773679edcSLukas Ertl if (gp == NULL) 34873679edcSLukas Ertl return (NULL); 34973679edcSLukas Ertl 35073679edcSLukas Ertl sc = gp->softc; 35173679edcSLukas Ertl KASSERT(sc != NULL, ("gv_volume_taste: NULL sc")); 35273679edcSLukas Ertl 35373679edcSLukas Ertl gp = pp->geom; 35473679edcSLukas Ertl 35573679edcSLukas Ertl /* We only want to attach to plexes. */ 35673679edcSLukas Ertl if (strcmp(gp->class->name, "VINUMPLEX")) 35773679edcSLukas Ertl return (NULL); 35873679edcSLukas Ertl 35973679edcSLukas Ertl first = 0; 36073679edcSLukas Ertl p = gp->softc; 361f11c507cSLukas Ertl 362f11c507cSLukas Ertl /* Let's see if the volume this plex wants is already configured. */ 36373679edcSLukas Ertl v = gv_find_vol(sc, p->volume); 36473679edcSLukas Ertl if (v == NULL) 36573679edcSLukas Ertl return (NULL); 36673679edcSLukas Ertl if (v->geom == NULL) { 36773679edcSLukas Ertl gp = g_new_geomf(mp, "%s", p->volume); 36873679edcSLukas Ertl gp->start = gv_volume_start; 36973679edcSLukas Ertl gp->orphan = gv_volume_orphan; 37073679edcSLukas Ertl gp->access = gv_volume_access; 37173679edcSLukas Ertl gp->softc = v; 37273679edcSLukas Ertl first++; 37367e3ab6eSLukas Ertl TAILQ_INIT(&v->bqueue); 374f11c507cSLukas Ertl } else 375f11c507cSLukas Ertl gp = v->geom; 376f11c507cSLukas Ertl 377f11c507cSLukas Ertl /* Create bio queue mutex and worker thread, if necessary. */ 378f11c507cSLukas Ertl if (mtx_initialized(&v->bqueue_mtx) == 0) 37967e3ab6eSLukas Ertl mtx_init(&v->bqueue_mtx, "gv_plex", NULL, MTX_DEF); 380f11c507cSLukas Ertl 381f11c507cSLukas Ertl if (!(v->flags & GV_VOL_THREAD_ACTIVE)) { 38267e3ab6eSLukas Ertl kthread_create(gv_vol_worker, v, NULL, 0, 0, "gv_v %s", 38367e3ab6eSLukas Ertl v->name); 38467e3ab6eSLukas Ertl v->flags |= GV_VOL_THREAD_ACTIVE; 385f11c507cSLukas Ertl } 38673679edcSLukas Ertl 3874328802cSLukas Ertl /* 3884328802cSLukas Ertl * Create a new consumer and attach it to the plex geom. Since this 3894328802cSLukas Ertl * volume might already have a plex attached, we need to adjust the 3904328802cSLukas Ertl * access counts of the new consumer. 3914328802cSLukas Ertl */ 3924328802cSLukas Ertl ocp = LIST_FIRST(&gp->consumer); 39373679edcSLukas Ertl cp = g_new_consumer(gp); 39473679edcSLukas Ertl g_attach(cp, pp); 3954328802cSLukas Ertl if ((ocp != NULL) && (ocp->acr > 0 || ocp->acw > 0 || ocp->ace > 0)) { 3964328802cSLukas Ertl error = g_access(cp, ocp->acr, ocp->acw, ocp->ace); 3974328802cSLukas Ertl if (error) { 3984328802cSLukas Ertl printf("GEOM_VINUM: failed g_access %s -> %s; " 3994328802cSLukas Ertl "errno %d\n", v->name, p->name, error); 4004328802cSLukas Ertl g_detach(cp); 4014328802cSLukas Ertl g_destroy_consumer(cp); 4024328802cSLukas Ertl if (first) 4034328802cSLukas Ertl g_destroy_geom(gp); 4044328802cSLukas Ertl return (NULL); 4054328802cSLukas Ertl } 4064328802cSLukas Ertl } 4074328802cSLukas Ertl 40873679edcSLukas Ertl p->consumer = cp; 40973679edcSLukas Ertl 41073679edcSLukas Ertl if (p->vol_sc != v) { 41173679edcSLukas Ertl p->vol_sc = v; 41273679edcSLukas Ertl v->plexcount++; 41373679edcSLukas Ertl LIST_INSERT_HEAD(&v->plexes, p, in_volume); 41473679edcSLukas Ertl } 41573679edcSLukas Ertl 41673679edcSLukas Ertl /* We need to setup a new VINUMVOLUME geom. */ 41773679edcSLukas Ertl if (first) { 41873679edcSLukas Ertl pp2 = g_new_providerf(gp, "gvinum/%s", v->name); 41973679edcSLukas Ertl pp2->mediasize = pp->mediasize; 42073679edcSLukas Ertl pp2->sectorsize = pp->sectorsize; 42173679edcSLukas Ertl g_error_provider(pp2, 0); 42273679edcSLukas Ertl v->size = pp2->mediasize; 42373679edcSLukas Ertl v->geom = gp; 42473679edcSLukas Ertl return (gp); 42573679edcSLukas Ertl } 42673679edcSLukas Ertl 42773679edcSLukas Ertl return (NULL); 42873679edcSLukas Ertl } 42973679edcSLukas Ertl 43073679edcSLukas Ertl static int 43173679edcSLukas Ertl gv_volume_destroy_geom(struct gctl_req *req, struct g_class *mp, 43273679edcSLukas Ertl struct g_geom *gp) 43373679edcSLukas Ertl { 43467e3ab6eSLukas Ertl struct gv_volume *v; 43567e3ab6eSLukas Ertl 43673679edcSLukas Ertl g_trace(G_T_TOPOLOGY, "gv_volume_destroy_geom: %s", gp->name); 43773679edcSLukas Ertl g_topology_assert(); 4387f72de2dSLukas Ertl 43967e3ab6eSLukas Ertl v = gp->softc; 44067e3ab6eSLukas Ertl gv_kill_vol_thread(v); 44173679edcSLukas Ertl g_wither_geom(gp, ENXIO); 44273679edcSLukas Ertl return (0); 44373679edcSLukas Ertl } 44473679edcSLukas Ertl 44573679edcSLukas Ertl #define VINUMVOLUME_CLASS_NAME "VINUMVOLUME" 44673679edcSLukas Ertl 44773679edcSLukas Ertl static struct g_class g_vinum_volume_class = { 44873679edcSLukas Ertl .name = VINUMVOLUME_CLASS_NAME, 4495721c9c7SPoul-Henning Kamp .version = G_VERSION, 45073679edcSLukas Ertl .taste = gv_volume_taste, 45173679edcSLukas Ertl .destroy_geom = gv_volume_destroy_geom, 45273679edcSLukas Ertl }; 45373679edcSLukas Ertl 45473679edcSLukas Ertl DECLARE_GEOM_CLASS(g_vinum_volume_class, g_vinum_volume); 455