12d1661a5SPawel Jakub Dawidek /*- 2e6757059SPawel Jakub Dawidek * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 32d1661a5SPawel Jakub Dawidek * All rights reserved. 42d1661a5SPawel Jakub Dawidek * 52d1661a5SPawel Jakub Dawidek * Redistribution and use in source and binary forms, with or without 62d1661a5SPawel Jakub Dawidek * modification, are permitted provided that the following conditions 72d1661a5SPawel Jakub Dawidek * are met: 82d1661a5SPawel Jakub Dawidek * 1. Redistributions of source code must retain the above copyright 92d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer. 102d1661a5SPawel Jakub Dawidek * 2. Redistributions in binary form must reproduce the above copyright 112d1661a5SPawel Jakub Dawidek * notice, this list of conditions and the following disclaimer in the 122d1661a5SPawel Jakub Dawidek * documentation and/or other materials provided with the distribution. 132d1661a5SPawel Jakub Dawidek * 142d1661a5SPawel Jakub Dawidek * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 152d1661a5SPawel Jakub Dawidek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 162d1661a5SPawel Jakub Dawidek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 172d1661a5SPawel Jakub Dawidek * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 182d1661a5SPawel Jakub Dawidek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 192d1661a5SPawel Jakub Dawidek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 202d1661a5SPawel Jakub Dawidek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 212d1661a5SPawel Jakub Dawidek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 222d1661a5SPawel Jakub Dawidek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 232d1661a5SPawel Jakub Dawidek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 242d1661a5SPawel Jakub Dawidek * SUCH DAMAGE. 252d1661a5SPawel Jakub Dawidek */ 262d1661a5SPawel Jakub Dawidek 272d1661a5SPawel Jakub Dawidek #include <sys/cdefs.h> 282d1661a5SPawel Jakub Dawidek __FBSDID("$FreeBSD$"); 292d1661a5SPawel Jakub Dawidek 302d1661a5SPawel Jakub Dawidek #include <sys/param.h> 312d1661a5SPawel Jakub Dawidek #include <sys/systm.h> 322d1661a5SPawel Jakub Dawidek #include <sys/kernel.h> 332d1661a5SPawel Jakub Dawidek #include <sys/module.h> 342d1661a5SPawel Jakub Dawidek #include <sys/limits.h> 352d1661a5SPawel Jakub Dawidek #include <sys/lock.h> 362d1661a5SPawel Jakub Dawidek #include <sys/mutex.h> 372d1661a5SPawel Jakub Dawidek #include <sys/bio.h> 385d807a0eSAndrey V. Elsukov #include <sys/sbuf.h> 392d1661a5SPawel Jakub Dawidek #include <sys/sysctl.h> 402d1661a5SPawel Jakub Dawidek #include <sys/malloc.h> 419da3072cSPawel Jakub Dawidek #include <sys/eventhandler.h> 422d1661a5SPawel Jakub Dawidek #include <vm/uma.h> 432d1661a5SPawel Jakub Dawidek #include <geom/geom.h> 442d1661a5SPawel Jakub Dawidek #include <sys/proc.h> 452d1661a5SPawel Jakub Dawidek #include <sys/kthread.h> 4663710c4dSJohn Baldwin #include <sys/sched.h> 472d1661a5SPawel Jakub Dawidek #include <geom/raid3/g_raid3.h> 482d1661a5SPawel Jakub Dawidek 49cb08c2ccSAlexander Leidinger FEATURE(geom_raid3, "GEOM RAID-3 functionality"); 502d1661a5SPawel Jakub Dawidek 515bb84bc8SRobert Watson static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data"); 522d1661a5SPawel Jakub Dawidek 532d1661a5SPawel Jakub Dawidek SYSCTL_DECL(_kern_geom); 546472ac3dSEd Schouten static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0, 556472ac3dSEd Schouten "GEOM_RAID3 stuff"); 56809a9dc6SPawel Jakub Dawidek u_int g_raid3_debug = 0; 57af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0, 582d1661a5SPawel Jakub Dawidek "Debug level"); 59e5e7825cSPawel Jakub Dawidek static u_int g_raid3_timeout = 4; 60af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout, 612d1661a5SPawel Jakub Dawidek 0, "Time to wait on all raid3 components"); 624d006a98SPawel Jakub Dawidek static u_int g_raid3_idletime = 5; 63af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN, 644d006a98SPawel Jakub Dawidek &g_raid3_idletime, 0, "Mark components as clean when idling"); 653aae74ecSPawel Jakub Dawidek static u_int g_raid3_disconnect_on_failure = 1; 66af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 673aae74ecSPawel Jakub Dawidek &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 68e6757059SPawel Jakub Dawidek static u_int g_raid3_syncreqs = 2; 693650be51SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 703650be51SPawel Jakub Dawidek &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests."); 71ed940a82SPawel Jakub Dawidek static u_int g_raid3_use_malloc = 0; 72ed940a82SPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN, 73ed940a82SPawel Jakub Dawidek &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9)."); 742d1661a5SPawel Jakub Dawidek 752d1661a5SPawel Jakub Dawidek static u_int g_raid3_n64k = 50; 76af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0, 772d1661a5SPawel Jakub Dawidek "Maximum number of 64kB allocations"); 782d1661a5SPawel Jakub Dawidek static u_int g_raid3_n16k = 200; 79af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0, 802d1661a5SPawel Jakub Dawidek "Maximum number of 16kB allocations"); 812d1661a5SPawel Jakub Dawidek static u_int g_raid3_n4k = 1200; 82af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0, 832d1661a5SPawel Jakub Dawidek "Maximum number of 4kB allocations"); 842d1661a5SPawel Jakub Dawidek 856472ac3dSEd Schouten static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0, 862d1661a5SPawel Jakub Dawidek "GEOM_RAID3 statistics"); 87dba915cfSPawel Jakub Dawidek static u_int g_raid3_parity_mismatch = 0; 88dba915cfSPawel Jakub Dawidek SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD, 89dba915cfSPawel Jakub Dawidek &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode"); 902d1661a5SPawel Jakub Dawidek 912d1661a5SPawel Jakub Dawidek #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 922d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 932d1661a5SPawel Jakub Dawidek msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 942d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 952d1661a5SPawel Jakub Dawidek } while (0) 962d1661a5SPawel Jakub Dawidek 97f62c1a47SAlexander Motin static eventhandler_tag g_raid3_post_sync = NULL; 98f62c1a47SAlexander Motin static int g_raid3_shutdown = 0; 992d1661a5SPawel Jakub Dawidek 1002d1661a5SPawel Jakub Dawidek static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp, 1012d1661a5SPawel Jakub Dawidek struct g_geom *gp); 1022d1661a5SPawel Jakub Dawidek static g_taste_t g_raid3_taste; 1039da3072cSPawel Jakub Dawidek static void g_raid3_init(struct g_class *mp); 1049da3072cSPawel Jakub Dawidek static void g_raid3_fini(struct g_class *mp); 1052d1661a5SPawel Jakub Dawidek 1062d1661a5SPawel Jakub Dawidek struct g_class g_raid3_class = { 1072d1661a5SPawel Jakub Dawidek .name = G_RAID3_CLASS_NAME, 1082d1661a5SPawel Jakub Dawidek .version = G_VERSION, 1092d1661a5SPawel Jakub Dawidek .ctlreq = g_raid3_config, 1102d1661a5SPawel Jakub Dawidek .taste = g_raid3_taste, 1119da3072cSPawel Jakub Dawidek .destroy_geom = g_raid3_destroy_geom, 1129da3072cSPawel Jakub Dawidek .init = g_raid3_init, 1139da3072cSPawel Jakub Dawidek .fini = g_raid3_fini 1142d1661a5SPawel Jakub Dawidek }; 1152d1661a5SPawel Jakub Dawidek 1162d1661a5SPawel Jakub Dawidek 1172d1661a5SPawel Jakub Dawidek static void g_raid3_destroy_provider(struct g_raid3_softc *sc); 118d97d5ee9SPawel Jakub Dawidek static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state); 119d97d5ee9SPawel Jakub Dawidek static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force); 1202d1661a5SPawel Jakub Dawidek static void g_raid3_dumpconf(struct sbuf *sb, const char *indent, 1212d1661a5SPawel Jakub Dawidek struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 1222d1661a5SPawel Jakub Dawidek static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type); 1233650be51SPawel Jakub Dawidek static int g_raid3_register_request(struct bio *pbp); 1243650be51SPawel Jakub Dawidek static void g_raid3_sync_release(struct g_raid3_softc *sc); 1252d1661a5SPawel Jakub Dawidek 1262d1661a5SPawel Jakub Dawidek 1272d1661a5SPawel Jakub Dawidek static const char * 1282d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(int state) 1292d1661a5SPawel Jakub Dawidek { 1302d1661a5SPawel Jakub Dawidek 1312d1661a5SPawel Jakub Dawidek switch (state) { 1322d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NODISK: 1332d1661a5SPawel Jakub Dawidek return ("NODISK"); 1342d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NONE: 1352d1661a5SPawel Jakub Dawidek return ("NONE"); 1362d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 1372d1661a5SPawel Jakub Dawidek return ("NEW"); 1382d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 1392d1661a5SPawel Jakub Dawidek return ("ACTIVE"); 1402d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 1412d1661a5SPawel Jakub Dawidek return ("STALE"); 1422d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 1432d1661a5SPawel Jakub Dawidek return ("SYNCHRONIZING"); 1442d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 1452d1661a5SPawel Jakub Dawidek return ("DISCONNECTED"); 1462d1661a5SPawel Jakub Dawidek default: 1472d1661a5SPawel Jakub Dawidek return ("INVALID"); 1482d1661a5SPawel Jakub Dawidek } 1492d1661a5SPawel Jakub Dawidek } 1502d1661a5SPawel Jakub Dawidek 1512d1661a5SPawel Jakub Dawidek static const char * 1522d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(int state) 1532d1661a5SPawel Jakub Dawidek { 1542d1661a5SPawel Jakub Dawidek 1552d1661a5SPawel Jakub Dawidek switch (state) { 1562d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 1572d1661a5SPawel Jakub Dawidek return ("STARTING"); 1582d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 1592d1661a5SPawel Jakub Dawidek return ("DEGRADED"); 1602d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 1612d1661a5SPawel Jakub Dawidek return ("COMPLETE"); 1622d1661a5SPawel Jakub Dawidek default: 1632d1661a5SPawel Jakub Dawidek return ("INVALID"); 1642d1661a5SPawel Jakub Dawidek } 1652d1661a5SPawel Jakub Dawidek } 1662d1661a5SPawel Jakub Dawidek 1672d1661a5SPawel Jakub Dawidek const char * 1682d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(struct g_raid3_disk *disk) 1692d1661a5SPawel Jakub Dawidek { 1702d1661a5SPawel Jakub Dawidek 1712d1661a5SPawel Jakub Dawidek if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 1722d1661a5SPawel Jakub Dawidek return ("[unknown]"); 1732d1661a5SPawel Jakub Dawidek return (disk->d_name); 1742d1661a5SPawel Jakub Dawidek } 1752d1661a5SPawel Jakub Dawidek 176ed940a82SPawel Jakub Dawidek static void * 177ed940a82SPawel Jakub Dawidek g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags) 178ed940a82SPawel Jakub Dawidek { 179ed940a82SPawel Jakub Dawidek void *ptr; 180d4060fa6SAlexander Motin enum g_raid3_zones zone; 181ed940a82SPawel Jakub Dawidek 182d4060fa6SAlexander Motin if (g_raid3_use_malloc || 183d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 184ed940a82SPawel Jakub Dawidek ptr = malloc(size, M_RAID3, flags); 185ed940a82SPawel Jakub Dawidek else { 186d4060fa6SAlexander Motin ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone, 187d4060fa6SAlexander Motin &sc->sc_zones[zone], flags); 188d4060fa6SAlexander Motin sc->sc_zones[zone].sz_requested++; 189ed940a82SPawel Jakub Dawidek if (ptr == NULL) 190d4060fa6SAlexander Motin sc->sc_zones[zone].sz_failed++; 191ed940a82SPawel Jakub Dawidek } 192ed940a82SPawel Jakub Dawidek return (ptr); 193ed940a82SPawel Jakub Dawidek } 194ed940a82SPawel Jakub Dawidek 195ed940a82SPawel Jakub Dawidek static void 196ed940a82SPawel Jakub Dawidek g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size) 197ed940a82SPawel Jakub Dawidek { 198d4060fa6SAlexander Motin enum g_raid3_zones zone; 199ed940a82SPawel Jakub Dawidek 200d4060fa6SAlexander Motin if (g_raid3_use_malloc || 201d4060fa6SAlexander Motin (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 202ed940a82SPawel Jakub Dawidek free(ptr, M_RAID3); 203ed940a82SPawel Jakub Dawidek else { 204d4060fa6SAlexander Motin uma_zfree_arg(sc->sc_zones[zone].sz_zone, 205d4060fa6SAlexander Motin ptr, &sc->sc_zones[zone]); 206ed940a82SPawel Jakub Dawidek } 207ed940a82SPawel Jakub Dawidek } 208ed940a82SPawel Jakub Dawidek 2093650be51SPawel Jakub Dawidek static int 2103650be51SPawel Jakub Dawidek g_raid3_uma_ctor(void *mem, int size, void *arg, int flags) 2113650be51SPawel Jakub Dawidek { 2123650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2133650be51SPawel Jakub Dawidek 2140d14fae5SPawel Jakub Dawidek if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max) 2153650be51SPawel Jakub Dawidek return (ENOMEM); 2163650be51SPawel Jakub Dawidek sz->sz_inuse++; 2173650be51SPawel Jakub Dawidek return (0); 2183650be51SPawel Jakub Dawidek } 2193650be51SPawel Jakub Dawidek 2203650be51SPawel Jakub Dawidek static void 2213650be51SPawel Jakub Dawidek g_raid3_uma_dtor(void *mem, int size, void *arg) 2223650be51SPawel Jakub Dawidek { 2233650be51SPawel Jakub Dawidek struct g_raid3_zone *sz = arg; 2243650be51SPawel Jakub Dawidek 2253650be51SPawel Jakub Dawidek sz->sz_inuse--; 2263650be51SPawel Jakub Dawidek } 2273650be51SPawel Jakub Dawidek 22806b215fdSAlexander Motin #define g_raid3_xor(src, dst, size) \ 22906b215fdSAlexander Motin _g_raid3_xor((uint64_t *)(src), \ 2302d1661a5SPawel Jakub Dawidek (uint64_t *)(dst), (size_t)size) 2312d1661a5SPawel Jakub Dawidek static void 23206b215fdSAlexander Motin _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size) 2332d1661a5SPawel Jakub Dawidek { 2342d1661a5SPawel Jakub Dawidek 2352d1661a5SPawel Jakub Dawidek KASSERT((size % 128) == 0, ("Invalid size: %zu.", size)); 2362d1661a5SPawel Jakub Dawidek for (; size > 0; size -= 128) { 23706b215fdSAlexander Motin *dst++ ^= (*src++); 23806b215fdSAlexander Motin *dst++ ^= (*src++); 23906b215fdSAlexander Motin *dst++ ^= (*src++); 24006b215fdSAlexander Motin *dst++ ^= (*src++); 24106b215fdSAlexander Motin *dst++ ^= (*src++); 24206b215fdSAlexander Motin *dst++ ^= (*src++); 24306b215fdSAlexander Motin *dst++ ^= (*src++); 24406b215fdSAlexander Motin *dst++ ^= (*src++); 24506b215fdSAlexander Motin *dst++ ^= (*src++); 24606b215fdSAlexander Motin *dst++ ^= (*src++); 24706b215fdSAlexander Motin *dst++ ^= (*src++); 24806b215fdSAlexander Motin *dst++ ^= (*src++); 24906b215fdSAlexander Motin *dst++ ^= (*src++); 25006b215fdSAlexander Motin *dst++ ^= (*src++); 25106b215fdSAlexander Motin *dst++ ^= (*src++); 25206b215fdSAlexander Motin *dst++ ^= (*src++); 2532d1661a5SPawel Jakub Dawidek } 2542d1661a5SPawel Jakub Dawidek } 2552d1661a5SPawel Jakub Dawidek 256dba915cfSPawel Jakub Dawidek static int 257dba915cfSPawel Jakub Dawidek g_raid3_is_zero(struct bio *bp) 258dba915cfSPawel Jakub Dawidek { 259dba915cfSPawel Jakub Dawidek static const uint64_t zeros[] = { 260dba915cfSPawel Jakub Dawidek 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 261dba915cfSPawel Jakub Dawidek }; 262dba915cfSPawel Jakub Dawidek u_char *addr; 263dba915cfSPawel Jakub Dawidek ssize_t size; 264dba915cfSPawel Jakub Dawidek 265dba915cfSPawel Jakub Dawidek size = bp->bio_length; 266dba915cfSPawel Jakub Dawidek addr = (u_char *)bp->bio_data; 267dba915cfSPawel Jakub Dawidek for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) { 268dba915cfSPawel Jakub Dawidek if (bcmp(addr, zeros, sizeof(zeros)) != 0) 269dba915cfSPawel Jakub Dawidek return (0); 270dba915cfSPawel Jakub Dawidek } 271dba915cfSPawel Jakub Dawidek return (1); 272dba915cfSPawel Jakub Dawidek } 273dba915cfSPawel Jakub Dawidek 2742d1661a5SPawel Jakub Dawidek /* 2752d1661a5SPawel Jakub Dawidek * --- Events handling functions --- 2762d1661a5SPawel Jakub Dawidek * Events in geom_raid3 are used to maintain disks and device status 2772d1661a5SPawel Jakub Dawidek * from one thread to simplify locking. 2782d1661a5SPawel Jakub Dawidek */ 2792d1661a5SPawel Jakub Dawidek static void 2802d1661a5SPawel Jakub Dawidek g_raid3_event_free(struct g_raid3_event *ep) 2812d1661a5SPawel Jakub Dawidek { 2822d1661a5SPawel Jakub Dawidek 2832d1661a5SPawel Jakub Dawidek free(ep, M_RAID3); 2842d1661a5SPawel Jakub Dawidek } 2852d1661a5SPawel Jakub Dawidek 2862d1661a5SPawel Jakub Dawidek int 2872d1661a5SPawel Jakub Dawidek g_raid3_event_send(void *arg, int state, int flags) 2882d1661a5SPawel Jakub Dawidek { 2892d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 2902d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 2912d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 2922d1661a5SPawel Jakub Dawidek int error; 2932d1661a5SPawel Jakub Dawidek 2942d1661a5SPawel Jakub Dawidek ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK); 2952d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep); 2962d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DEVICE) != 0) { 2972d1661a5SPawel Jakub Dawidek disk = NULL; 2982d1661a5SPawel Jakub Dawidek sc = arg; 2992d1661a5SPawel Jakub Dawidek } else { 3002d1661a5SPawel Jakub Dawidek disk = arg; 3012d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3022d1661a5SPawel Jakub Dawidek } 3032d1661a5SPawel Jakub Dawidek ep->e_disk = disk; 3042d1661a5SPawel Jakub Dawidek ep->e_state = state; 3052d1661a5SPawel Jakub Dawidek ep->e_flags = flags; 3062d1661a5SPawel Jakub Dawidek ep->e_error = 0; 3072d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3082d1661a5SPawel Jakub Dawidek TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 3092d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3102d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3112d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 3122d1661a5SPawel Jakub Dawidek wakeup(sc); 3132d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 3142d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 3152d1661a5SPawel Jakub Dawidek if ((flags & G_RAID3_EVENT_DONTWAIT) != 0) 3162d1661a5SPawel Jakub Dawidek return (0); 3173650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3182d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 3193650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3202d1661a5SPawel Jakub Dawidek while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) { 3212d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3222d1661a5SPawel Jakub Dawidek MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event", 3232d1661a5SPawel Jakub Dawidek hz * 5); 3242d1661a5SPawel Jakub Dawidek } 3252d1661a5SPawel Jakub Dawidek error = ep->e_error; 3262d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3273650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3282d1661a5SPawel Jakub Dawidek return (error); 3292d1661a5SPawel Jakub Dawidek } 3302d1661a5SPawel Jakub Dawidek 3312d1661a5SPawel Jakub Dawidek static struct g_raid3_event * 3322d1661a5SPawel Jakub Dawidek g_raid3_event_get(struct g_raid3_softc *sc) 3332d1661a5SPawel Jakub Dawidek { 3342d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 3352d1661a5SPawel Jakub Dawidek 3362d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3372d1661a5SPawel Jakub Dawidek ep = TAILQ_FIRST(&sc->sc_events); 3382d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3392d1661a5SPawel Jakub Dawidek return (ep); 3402d1661a5SPawel Jakub Dawidek } 3412d1661a5SPawel Jakub Dawidek 3422d1661a5SPawel Jakub Dawidek static void 343d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep) 344d97d5ee9SPawel Jakub Dawidek { 345d97d5ee9SPawel Jakub Dawidek 346d97d5ee9SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 347d97d5ee9SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 348d97d5ee9SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 349d97d5ee9SPawel Jakub Dawidek } 350d97d5ee9SPawel Jakub Dawidek 351d97d5ee9SPawel Jakub Dawidek static void 3522d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(struct g_raid3_disk *disk) 3532d1661a5SPawel Jakub Dawidek { 3542d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3552d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep, *tmpep; 3562d1661a5SPawel Jakub Dawidek 3572d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 3583650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 3593650be51SPawel Jakub Dawidek 3602d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 3612d1661a5SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 3622d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) 3632d1661a5SPawel Jakub Dawidek continue; 3642d1661a5SPawel Jakub Dawidek if (ep->e_disk != disk) 3652d1661a5SPawel Jakub Dawidek continue; 3662d1661a5SPawel Jakub Dawidek TAILQ_REMOVE(&sc->sc_events, ep, e_next); 3672d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 3682d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 3692d1661a5SPawel Jakub Dawidek else { 3702d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 3712d1661a5SPawel Jakub Dawidek wakeup(ep); 3722d1661a5SPawel Jakub Dawidek } 3732d1661a5SPawel Jakub Dawidek } 3742d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 3752d1661a5SPawel Jakub Dawidek } 3762d1661a5SPawel Jakub Dawidek 3772d1661a5SPawel Jakub Dawidek /* 3782d1661a5SPawel Jakub Dawidek * Return the number of disks in the given state. 3792d1661a5SPawel Jakub Dawidek * If state is equal to -1, count all connected disks. 3802d1661a5SPawel Jakub Dawidek */ 3812d1661a5SPawel Jakub Dawidek u_int 3822d1661a5SPawel Jakub Dawidek g_raid3_ndisks(struct g_raid3_softc *sc, int state) 3832d1661a5SPawel Jakub Dawidek { 3842d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 385fa6a7837SDavid E. O'Brien u_int n, ndisks; 3862d1661a5SPawel Jakub Dawidek 3873650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 3883650be51SPawel Jakub Dawidek 389fa6a7837SDavid E. O'Brien for (n = ndisks = 0; n < sc->sc_ndisks; n++) { 3902d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 3912d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 3922d1661a5SPawel Jakub Dawidek continue; 3932d1661a5SPawel Jakub Dawidek if (state == -1 || disk->d_state == state) 3942d1661a5SPawel Jakub Dawidek ndisks++; 3952d1661a5SPawel Jakub Dawidek } 3962d1661a5SPawel Jakub Dawidek return (ndisks); 3972d1661a5SPawel Jakub Dawidek } 3982d1661a5SPawel Jakub Dawidek 3992d1661a5SPawel Jakub Dawidek static u_int 4002d1661a5SPawel Jakub Dawidek g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp) 4012d1661a5SPawel Jakub Dawidek { 4022d1661a5SPawel Jakub Dawidek struct bio *bp; 4032d1661a5SPawel Jakub Dawidek u_int nreqs = 0; 4042d1661a5SPawel Jakub Dawidek 4052d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 4062d1661a5SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 4072d1661a5SPawel Jakub Dawidek if (bp->bio_from == cp) 4082d1661a5SPawel Jakub Dawidek nreqs++; 4092d1661a5SPawel Jakub Dawidek } 4102d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 4112d1661a5SPawel Jakub Dawidek return (nreqs); 4122d1661a5SPawel Jakub Dawidek } 4132d1661a5SPawel Jakub Dawidek 4142d1661a5SPawel Jakub Dawidek static int 4152d1661a5SPawel Jakub Dawidek g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp) 4162d1661a5SPawel Jakub Dawidek { 4172d1661a5SPawel Jakub Dawidek 41879e61493SPawel Jakub Dawidek if (cp->index > 0) { 4192d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4202d1661a5SPawel Jakub Dawidek "I/O requests for %s exist, can't destroy it now.", 4212d1661a5SPawel Jakub Dawidek cp->provider->name); 4222d1661a5SPawel Jakub Dawidek return (1); 4232d1661a5SPawel Jakub Dawidek } 4242d1661a5SPawel Jakub Dawidek if (g_raid3_nrequests(sc, cp) > 0) { 4252d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, 4262d1661a5SPawel Jakub Dawidek "I/O requests for %s in queue, can't destroy it now.", 4272d1661a5SPawel Jakub Dawidek cp->provider->name); 4282d1661a5SPawel Jakub Dawidek return (1); 4292d1661a5SPawel Jakub Dawidek } 4302d1661a5SPawel Jakub Dawidek return (0); 4312d1661a5SPawel Jakub Dawidek } 4322d1661a5SPawel Jakub Dawidek 4332d1661a5SPawel Jakub Dawidek static void 434d97d5ee9SPawel Jakub Dawidek g_raid3_destroy_consumer(void *arg, int flags __unused) 435d97d5ee9SPawel Jakub Dawidek { 436d97d5ee9SPawel Jakub Dawidek struct g_consumer *cp; 437d97d5ee9SPawel Jakub Dawidek 4383650be51SPawel Jakub Dawidek g_topology_assert(); 4393650be51SPawel Jakub Dawidek 440d97d5ee9SPawel Jakub Dawidek cp = arg; 441d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 442d97d5ee9SPawel Jakub Dawidek g_detach(cp); 443d97d5ee9SPawel Jakub Dawidek g_destroy_consumer(cp); 444d97d5ee9SPawel Jakub Dawidek } 445d97d5ee9SPawel Jakub Dawidek 446d97d5ee9SPawel Jakub Dawidek static void 4472d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 4482d1661a5SPawel Jakub Dawidek { 449d97d5ee9SPawel Jakub Dawidek struct g_provider *pp; 450d97d5ee9SPawel Jakub Dawidek int retaste_wait; 4512d1661a5SPawel Jakub Dawidek 4522d1661a5SPawel Jakub Dawidek g_topology_assert(); 4532d1661a5SPawel Jakub Dawidek 4542d1661a5SPawel Jakub Dawidek cp->private = NULL; 4552d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 4562d1661a5SPawel Jakub Dawidek return; 4572d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name); 458d97d5ee9SPawel Jakub Dawidek pp = cp->provider; 459d97d5ee9SPawel Jakub Dawidek retaste_wait = 0; 460d97d5ee9SPawel Jakub Dawidek if (cp->acw == 1) { 461d97d5ee9SPawel Jakub Dawidek if ((pp->geom->flags & G_GEOM_WITHER) == 0) 462d97d5ee9SPawel Jakub Dawidek retaste_wait = 1; 463d97d5ee9SPawel Jakub Dawidek } 464d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 465d97d5ee9SPawel Jakub Dawidek -cp->acw, -cp->ace, 0); 466d97d5ee9SPawel Jakub Dawidek if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 467d97d5ee9SPawel Jakub Dawidek g_access(cp, -cp->acr, -cp->acw, -cp->ace); 468d97d5ee9SPawel Jakub Dawidek if (retaste_wait) { 469d97d5ee9SPawel Jakub Dawidek /* 470d97d5ee9SPawel Jakub Dawidek * After retaste event was send (inside g_access()), we can send 471d97d5ee9SPawel Jakub Dawidek * event to detach and destroy consumer. 472d97d5ee9SPawel Jakub Dawidek * A class, which has consumer to the given provider connected 473d97d5ee9SPawel Jakub Dawidek * will not receive retaste event for the provider. 474d97d5ee9SPawel Jakub Dawidek * This is the way how I ignore retaste events when I close 475d97d5ee9SPawel Jakub Dawidek * consumers opened for write: I detach and destroy consumer 476d97d5ee9SPawel Jakub Dawidek * after retaste event is sent. 477d97d5ee9SPawel Jakub Dawidek */ 478d97d5ee9SPawel Jakub Dawidek g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL); 479d97d5ee9SPawel Jakub Dawidek return; 480d97d5ee9SPawel Jakub Dawidek } 481d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name); 4822d1661a5SPawel Jakub Dawidek g_detach(cp); 4832d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 4842d1661a5SPawel Jakub Dawidek } 4852d1661a5SPawel Jakub Dawidek 4862d1661a5SPawel Jakub Dawidek static int 4872d1661a5SPawel Jakub Dawidek g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp) 4882d1661a5SPawel Jakub Dawidek { 48934cb1517SPawel Jakub Dawidek struct g_consumer *cp; 4902d1661a5SPawel Jakub Dawidek int error; 4912d1661a5SPawel Jakub Dawidek 4923650be51SPawel Jakub Dawidek g_topology_assert_not(); 4932d1661a5SPawel Jakub Dawidek KASSERT(disk->d_consumer == NULL, 4942d1661a5SPawel Jakub Dawidek ("Disk already connected (device %s).", disk->d_softc->sc_name)); 4952d1661a5SPawel Jakub Dawidek 4963650be51SPawel Jakub Dawidek g_topology_lock(); 49734cb1517SPawel Jakub Dawidek cp = g_new_consumer(disk->d_softc->sc_geom); 49834cb1517SPawel Jakub Dawidek error = g_attach(cp, pp); 499d97d5ee9SPawel Jakub Dawidek if (error != 0) { 50034cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 5013650be51SPawel Jakub Dawidek g_topology_unlock(); 50234cb1517SPawel Jakub Dawidek return (error); 50334cb1517SPawel Jakub Dawidek } 50434cb1517SPawel Jakub Dawidek error = g_access(cp, 1, 1, 1); 5053650be51SPawel Jakub Dawidek g_topology_unlock(); 50634cb1517SPawel Jakub Dawidek if (error != 0) { 50734cb1517SPawel Jakub Dawidek g_detach(cp); 50834cb1517SPawel Jakub Dawidek g_destroy_consumer(cp); 509d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).", 510d97d5ee9SPawel Jakub Dawidek pp->name, error); 511d97d5ee9SPawel Jakub Dawidek return (error); 512d97d5ee9SPawel Jakub Dawidek } 51334cb1517SPawel Jakub Dawidek disk->d_consumer = cp; 51434cb1517SPawel Jakub Dawidek disk->d_consumer->private = disk; 51534cb1517SPawel Jakub Dawidek disk->d_consumer->index = 0; 5162d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk)); 5172d1661a5SPawel Jakub Dawidek return (0); 5182d1661a5SPawel Jakub Dawidek } 5192d1661a5SPawel Jakub Dawidek 5202d1661a5SPawel Jakub Dawidek static void 5212d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 5222d1661a5SPawel Jakub Dawidek { 5232d1661a5SPawel Jakub Dawidek 5242d1661a5SPawel Jakub Dawidek g_topology_assert(); 5252d1661a5SPawel Jakub Dawidek 5262d1661a5SPawel Jakub Dawidek if (cp == NULL) 5272d1661a5SPawel Jakub Dawidek return; 528d97d5ee9SPawel Jakub Dawidek if (cp->provider != NULL) 5292d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 530d97d5ee9SPawel Jakub Dawidek else 5312d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 5322d1661a5SPawel Jakub Dawidek } 5332d1661a5SPawel Jakub Dawidek 5342d1661a5SPawel Jakub Dawidek /* 5352d1661a5SPawel Jakub Dawidek * Initialize disk. This means allocate memory, create consumer, attach it 5362d1661a5SPawel Jakub Dawidek * to the provider and open access (r1w1e1) to it. 5372d1661a5SPawel Jakub Dawidek */ 5382d1661a5SPawel Jakub Dawidek static struct g_raid3_disk * 5392d1661a5SPawel Jakub Dawidek g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp, 5402d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md, int *errorp) 5412d1661a5SPawel Jakub Dawidek { 5422d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 5432d1661a5SPawel Jakub Dawidek int error; 5442d1661a5SPawel Jakub Dawidek 5452d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[md->md_no]; 5462d1661a5SPawel Jakub Dawidek error = g_raid3_connect_disk(disk, pp); 54734cb1517SPawel Jakub Dawidek if (error != 0) { 54834cb1517SPawel Jakub Dawidek if (errorp != NULL) 54934cb1517SPawel Jakub Dawidek *errorp = error; 55034cb1517SPawel Jakub Dawidek return (NULL); 55134cb1517SPawel Jakub Dawidek } 5522d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NONE; 5532d1661a5SPawel Jakub Dawidek disk->d_flags = md->md_dflags; 5542d1661a5SPawel Jakub Dawidek if (md->md_provider[0] != '\0') 5552d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED; 5562d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 5572d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = md->md_sync_offset; 5582d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = md->md_sync_offset; 559a245a548SPawel Jakub Dawidek disk->d_genid = md->md_genid; 5602d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = md->md_syncid; 5612d1661a5SPawel Jakub Dawidek if (errorp != NULL) 5622d1661a5SPawel Jakub Dawidek *errorp = 0; 5632d1661a5SPawel Jakub Dawidek return (disk); 5642d1661a5SPawel Jakub Dawidek } 5652d1661a5SPawel Jakub Dawidek 5662d1661a5SPawel Jakub Dawidek static void 5672d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(struct g_raid3_disk *disk) 5682d1661a5SPawel Jakub Dawidek { 5692d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 5702d1661a5SPawel Jakub Dawidek 5713650be51SPawel Jakub Dawidek g_topology_assert_not(); 5723650be51SPawel Jakub Dawidek sc = disk->d_softc; 5733650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 5742d1661a5SPawel Jakub Dawidek 5752d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 5762d1661a5SPawel Jakub Dawidek return; 5772d1661a5SPawel Jakub Dawidek g_raid3_event_cancel(disk); 5782d1661a5SPawel Jakub Dawidek switch (disk->d_state) { 5792d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 5802d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 5812d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 5822d1661a5SPawel Jakub Dawidek /* FALLTHROUGH */ 5832d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 5842d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 5852d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 5863650be51SPawel Jakub Dawidek g_topology_lock(); 5872d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, disk->d_consumer); 5883650be51SPawel Jakub Dawidek g_topology_unlock(); 5892d1661a5SPawel Jakub Dawidek disk->d_consumer = NULL; 5902d1661a5SPawel Jakub Dawidek break; 5912d1661a5SPawel Jakub Dawidek default: 5922d1661a5SPawel Jakub Dawidek KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 5932d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 5942d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 5952d1661a5SPawel Jakub Dawidek } 5962d1661a5SPawel Jakub Dawidek disk->d_state = G_RAID3_DISK_STATE_NODISK; 5972d1661a5SPawel Jakub Dawidek } 5982d1661a5SPawel Jakub Dawidek 5992d1661a5SPawel Jakub Dawidek static void 6002d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(struct g_raid3_softc *sc) 6012d1661a5SPawel Jakub Dawidek { 6022d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 6039da3072cSPawel Jakub Dawidek struct g_raid3_disk *disk; 6042d1661a5SPawel Jakub Dawidek struct g_geom *gp; 6052d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 6062d1661a5SPawel Jakub Dawidek u_int n; 6072d1661a5SPawel Jakub Dawidek 6083650be51SPawel Jakub Dawidek g_topology_assert_not(); 6093650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 6102d1661a5SPawel Jakub Dawidek 6112d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 6122d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 6132d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 6149da3072cSPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 6159da3072cSPawel Jakub Dawidek disk = &sc->sc_disks[n]; 616d97d5ee9SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_NODISK) { 6179da3072cSPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 6189da3072cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 6199da3072cSPawel Jakub Dawidek g_raid3_destroy_disk(disk); 6209da3072cSPawel Jakub Dawidek } 621d97d5ee9SPawel Jakub Dawidek } 6222d1661a5SPawel Jakub Dawidek while ((ep = g_raid3_event_get(sc)) != NULL) { 623d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 6242d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 6252d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 6262d1661a5SPawel Jakub Dawidek else { 6272d1661a5SPawel Jakub Dawidek ep->e_error = ECANCELED; 6282d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 6292d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep); 6302d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 6312d1661a5SPawel Jakub Dawidek wakeup(ep); 6322d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 6332d1661a5SPawel Jakub Dawidek } 6342d1661a5SPawel Jakub Dawidek } 6352d1661a5SPawel Jakub Dawidek callout_drain(&sc->sc_callout); 6362d1661a5SPawel Jakub Dawidek cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer); 6373650be51SPawel Jakub Dawidek g_topology_lock(); 6382d1661a5SPawel Jakub Dawidek if (cp != NULL) 6392d1661a5SPawel Jakub Dawidek g_raid3_disconnect_consumer(sc, cp); 6402d1661a5SPawel Jakub Dawidek g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 6412d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name); 6422d1661a5SPawel Jakub Dawidek g_wither_geom(gp, ENXIO); 6433650be51SPawel Jakub Dawidek g_topology_unlock(); 644ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 6453650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 6463650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 6473650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 648ed940a82SPawel Jakub Dawidek } 6493650be51SPawel Jakub Dawidek mtx_destroy(&sc->sc_queue_mtx); 6503650be51SPawel Jakub Dawidek mtx_destroy(&sc->sc_events_mtx); 6513650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 6523650be51SPawel Jakub Dawidek sx_destroy(&sc->sc_lock); 6532d1661a5SPawel Jakub Dawidek } 6542d1661a5SPawel Jakub Dawidek 6552d1661a5SPawel Jakub Dawidek static void 6562d1661a5SPawel Jakub Dawidek g_raid3_orphan(struct g_consumer *cp) 6572d1661a5SPawel Jakub Dawidek { 6582d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 6592d1661a5SPawel Jakub Dawidek 6602d1661a5SPawel Jakub Dawidek g_topology_assert(); 6612d1661a5SPawel Jakub Dawidek 6622d1661a5SPawel Jakub Dawidek disk = cp->private; 6632d1661a5SPawel Jakub Dawidek if (disk == NULL) 6642d1661a5SPawel Jakub Dawidek return; 665ea973705SPawel Jakub Dawidek disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID; 6662d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED, 6672d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 6682d1661a5SPawel Jakub Dawidek } 6692d1661a5SPawel Jakub Dawidek 6702d1661a5SPawel Jakub Dawidek static int 6712d1661a5SPawel Jakub Dawidek g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 6722d1661a5SPawel Jakub Dawidek { 6732d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 6742d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 6752d1661a5SPawel Jakub Dawidek off_t offset, length; 6762d1661a5SPawel Jakub Dawidek u_char *sector; 677d97d5ee9SPawel Jakub Dawidek int error = 0; 6782d1661a5SPawel Jakub Dawidek 6793650be51SPawel Jakub Dawidek g_topology_assert_not(); 6802d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 6813650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 6823650be51SPawel Jakub Dawidek 6832d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 6842d1661a5SPawel Jakub Dawidek KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 6852d1661a5SPawel Jakub Dawidek KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 6863650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 687d97d5ee9SPawel Jakub Dawidek ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 688d97d5ee9SPawel Jakub Dawidek cp->acw, cp->ace)); 6892d1661a5SPawel Jakub Dawidek length = cp->provider->sectorsize; 6902d1661a5SPawel Jakub Dawidek offset = cp->provider->mediasize - length; 6912d1661a5SPawel Jakub Dawidek sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO); 6922d1661a5SPawel Jakub Dawidek if (md != NULL) 6932d1661a5SPawel Jakub Dawidek raid3_metadata_encode(md, sector); 6942d1661a5SPawel Jakub Dawidek error = g_write_data(cp, offset, sector, length); 6952d1661a5SPawel Jakub Dawidek free(sector, M_RAID3); 6962d1661a5SPawel Jakub Dawidek if (error != 0) { 6973aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 6983aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot write metadata on %s " 6993aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7003aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7013aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 7023aae74ecSPawel Jakub Dawidek } else { 7033aae74ecSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot write metadata on %s " 7043aae74ecSPawel Jakub Dawidek "(device=%s, error=%d).", 7053aae74ecSPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name, error); 7063aae74ecSPawel Jakub Dawidek } 7073aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 7083aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 7093aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 7103aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 7113aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 7122d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 7132d1661a5SPawel Jakub Dawidek } 7143aae74ecSPawel Jakub Dawidek } 7152d1661a5SPawel Jakub Dawidek return (error); 7162d1661a5SPawel Jakub Dawidek } 7172d1661a5SPawel Jakub Dawidek 7182d1661a5SPawel Jakub Dawidek int 7192d1661a5SPawel Jakub Dawidek g_raid3_clear_metadata(struct g_raid3_disk *disk) 7202d1661a5SPawel Jakub Dawidek { 7212d1661a5SPawel Jakub Dawidek int error; 7222d1661a5SPawel Jakub Dawidek 7233650be51SPawel Jakub Dawidek g_topology_assert_not(); 7243650be51SPawel Jakub Dawidek sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 7253650be51SPawel Jakub Dawidek 7262d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, NULL); 7272d1661a5SPawel Jakub Dawidek if (error == 0) { 7282d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s cleared.", 7292d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 7302d1661a5SPawel Jakub Dawidek } else { 7312d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 7322d1661a5SPawel Jakub Dawidek "Cannot clear metadata on disk %s (error=%d).", 7332d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 7342d1661a5SPawel Jakub Dawidek } 7352d1661a5SPawel Jakub Dawidek return (error); 7362d1661a5SPawel Jakub Dawidek } 7372d1661a5SPawel Jakub Dawidek 7382d1661a5SPawel Jakub Dawidek void 7392d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 7402d1661a5SPawel Jakub Dawidek { 7412d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 742e6890985SPawel Jakub Dawidek struct g_provider *pp; 7432d1661a5SPawel Jakub Dawidek 7442d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 7452d1661a5SPawel Jakub Dawidek strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic)); 7462d1661a5SPawel Jakub Dawidek md->md_version = G_RAID3_VERSION; 7472d1661a5SPawel Jakub Dawidek strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 7482d1661a5SPawel Jakub Dawidek md->md_id = sc->sc_id; 7492d1661a5SPawel Jakub Dawidek md->md_all = sc->sc_ndisks; 750a245a548SPawel Jakub Dawidek md->md_genid = sc->sc_genid; 7512d1661a5SPawel Jakub Dawidek md->md_mediasize = sc->sc_mediasize; 7522d1661a5SPawel Jakub Dawidek md->md_sectorsize = sc->sc_sectorsize; 7532d1661a5SPawel Jakub Dawidek md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK); 7542d1661a5SPawel Jakub Dawidek md->md_no = disk->d_no; 7552d1661a5SPawel Jakub Dawidek md->md_syncid = disk->d_sync.ds_syncid; 7562d1661a5SPawel Jakub Dawidek md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK); 757c082905bSPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 7582d1661a5SPawel Jakub Dawidek md->md_sync_offset = 0; 759c082905bSPawel Jakub Dawidek else { 760c082905bSPawel Jakub Dawidek md->md_sync_offset = 761c082905bSPawel Jakub Dawidek disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1); 762c082905bSPawel Jakub Dawidek } 763e6890985SPawel Jakub Dawidek if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL) 764e6890985SPawel Jakub Dawidek pp = disk->d_consumer->provider; 765e6890985SPawel Jakub Dawidek else 766e6890985SPawel Jakub Dawidek pp = NULL; 767e6890985SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL) 768e6890985SPawel Jakub Dawidek strlcpy(md->md_provider, pp->name, sizeof(md->md_provider)); 769e6890985SPawel Jakub Dawidek else 7702d1661a5SPawel Jakub Dawidek bzero(md->md_provider, sizeof(md->md_provider)); 771e6890985SPawel Jakub Dawidek if (pp != NULL) 772e6890985SPawel Jakub Dawidek md->md_provsize = pp->mediasize; 773e6890985SPawel Jakub Dawidek else 774e6890985SPawel Jakub Dawidek md->md_provsize = 0; 7752d1661a5SPawel Jakub Dawidek } 7762d1661a5SPawel Jakub Dawidek 7772d1661a5SPawel Jakub Dawidek void 7782d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(struct g_raid3_disk *disk) 7792d1661a5SPawel Jakub Dawidek { 7803650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 7812d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 7822d1661a5SPawel Jakub Dawidek int error; 7832d1661a5SPawel Jakub Dawidek 7843650be51SPawel Jakub Dawidek g_topology_assert_not(); 7853650be51SPawel Jakub Dawidek sc = disk->d_softc; 7863650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 7873650be51SPawel Jakub Dawidek 7882d1661a5SPawel Jakub Dawidek g_raid3_fill_metadata(disk, &md); 7892d1661a5SPawel Jakub Dawidek error = g_raid3_write_metadata(disk, &md); 7902d1661a5SPawel Jakub Dawidek if (error == 0) { 7912d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Metadata on %s updated.", 7922d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 7932d1661a5SPawel Jakub Dawidek } else { 7942d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 7952d1661a5SPawel Jakub Dawidek "Cannot update metadata on disk %s (error=%d).", 7962d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), error); 7972d1661a5SPawel Jakub Dawidek } 7982d1661a5SPawel Jakub Dawidek } 7992d1661a5SPawel Jakub Dawidek 8002d1661a5SPawel Jakub Dawidek static void 801d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(struct g_raid3_softc *sc) 8022d1661a5SPawel Jakub Dawidek { 8032d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 8042d1661a5SPawel Jakub Dawidek u_int n; 8052d1661a5SPawel Jakub Dawidek 8063650be51SPawel Jakub Dawidek g_topology_assert_not(); 8073650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8082d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 8092d1661a5SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 8102d1661a5SPawel Jakub Dawidek sc->sc_name)); 8112d1661a5SPawel Jakub Dawidek 8122d1661a5SPawel Jakub Dawidek sc->sc_syncid++; 813a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 814a245a548SPawel Jakub Dawidek sc->sc_syncid); 8152d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 8162d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 8172d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 8182d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 8192d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 8202d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 8212d1661a5SPawel Jakub Dawidek } 8222d1661a5SPawel Jakub Dawidek } 8232d1661a5SPawel Jakub Dawidek } 8242d1661a5SPawel Jakub Dawidek 8254d006a98SPawel Jakub Dawidek static void 826a245a548SPawel Jakub Dawidek g_raid3_bump_genid(struct g_raid3_softc *sc) 827a245a548SPawel Jakub Dawidek { 828a245a548SPawel Jakub Dawidek struct g_raid3_disk *disk; 829a245a548SPawel Jakub Dawidek u_int n; 830a245a548SPawel Jakub Dawidek 8313650be51SPawel Jakub Dawidek g_topology_assert_not(); 8323650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 833a245a548SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 834a245a548SPawel Jakub Dawidek ("%s called with no active disks (device=%s).", __func__, 835a245a548SPawel Jakub Dawidek sc->sc_name)); 836a245a548SPawel Jakub Dawidek 837a245a548SPawel Jakub Dawidek sc->sc_genid++; 838a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 839a245a548SPawel Jakub Dawidek sc->sc_genid); 840a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 841a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 842a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 843a245a548SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 844a245a548SPawel Jakub Dawidek disk->d_genid = sc->sc_genid; 845a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 846a245a548SPawel Jakub Dawidek } 847a245a548SPawel Jakub Dawidek } 848a245a548SPawel Jakub Dawidek } 849a245a548SPawel Jakub Dawidek 8500962f942SPawel Jakub Dawidek static int 8513650be51SPawel Jakub Dawidek g_raid3_idle(struct g_raid3_softc *sc, int acw) 8524d006a98SPawel Jakub Dawidek { 8534d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 8544d006a98SPawel Jakub Dawidek u_int i; 8550962f942SPawel Jakub Dawidek int timeout; 8564d006a98SPawel Jakub Dawidek 8573650be51SPawel Jakub Dawidek g_topology_assert_not(); 8583650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8593650be51SPawel Jakub Dawidek 8600962f942SPawel Jakub Dawidek if (sc->sc_provider == NULL) 8610962f942SPawel Jakub Dawidek return (0); 862501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 863501250baSPawel Jakub Dawidek return (0); 8640962f942SPawel Jakub Dawidek if (sc->sc_idle) 8650962f942SPawel Jakub Dawidek return (0); 8660962f942SPawel Jakub Dawidek if (sc->sc_writes > 0) 8670962f942SPawel Jakub Dawidek return (0); 8683650be51SPawel Jakub Dawidek if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 86901f1f41cSPawel Jakub Dawidek timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write); 870f62c1a47SAlexander Motin if (!g_raid3_shutdown && timeout > 0) 8710962f942SPawel Jakub Dawidek return (timeout); 8720962f942SPawel Jakub Dawidek } 8734d006a98SPawel Jakub Dawidek sc->sc_idle = 1; 8744d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 8754d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 8764d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 8774d006a98SPawel Jakub Dawidek continue; 8784d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 8794d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 8804d006a98SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 8814d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 8824d006a98SPawel Jakub Dawidek } 8830962f942SPawel Jakub Dawidek return (0); 8844d006a98SPawel Jakub Dawidek } 8854d006a98SPawel Jakub Dawidek 8864d006a98SPawel Jakub Dawidek static void 8874d006a98SPawel Jakub Dawidek g_raid3_unidle(struct g_raid3_softc *sc) 8884d006a98SPawel Jakub Dawidek { 8894d006a98SPawel Jakub Dawidek struct g_raid3_disk *disk; 8904d006a98SPawel Jakub Dawidek u_int i; 8914d006a98SPawel Jakub Dawidek 8923650be51SPawel Jakub Dawidek g_topology_assert_not(); 8933650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 8943650be51SPawel Jakub Dawidek 895501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 896501250baSPawel Jakub Dawidek return; 8974d006a98SPawel Jakub Dawidek sc->sc_idle = 0; 89801f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 8994d006a98SPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 9004d006a98SPawel Jakub Dawidek disk = &sc->sc_disks[i]; 9014d006a98SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 9024d006a98SPawel Jakub Dawidek continue; 9034d006a98SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 9044d006a98SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 9054d006a98SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 9064d006a98SPawel Jakub Dawidek g_raid3_update_metadata(disk); 9074d006a98SPawel Jakub Dawidek } 9084d006a98SPawel Jakub Dawidek } 9094d006a98SPawel Jakub Dawidek 9102d1661a5SPawel Jakub Dawidek /* 9112d1661a5SPawel Jakub Dawidek * Treat bio_driver1 field in parent bio as list head and field bio_caller1 9122d1661a5SPawel Jakub Dawidek * in child bio as pointer to the next element on the list. 9132d1661a5SPawel Jakub Dawidek */ 9142d1661a5SPawel Jakub Dawidek #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1 9152d1661a5SPawel Jakub Dawidek 9162d1661a5SPawel Jakub Dawidek #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1 9172d1661a5SPawel Jakub Dawidek 9182d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_BIO(pbp, bp) \ 9192d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \ 9202d1661a5SPawel Jakub Dawidek (bp) = G_RAID3_NEXT_BIO(bp)) 9212d1661a5SPawel Jakub Dawidek 9222d1661a5SPawel Jakub Dawidek #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \ 9232d1661a5SPawel Jakub Dawidek for ((bp) = G_RAID3_HEAD_BIO(pbp); \ 9242d1661a5SPawel Jakub Dawidek (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \ 9252d1661a5SPawel Jakub Dawidek (bp) = (tmpbp)) 9262d1661a5SPawel Jakub Dawidek 9272d1661a5SPawel Jakub Dawidek static void 9282d1661a5SPawel Jakub Dawidek g_raid3_init_bio(struct bio *pbp) 9292d1661a5SPawel Jakub Dawidek { 9302d1661a5SPawel Jakub Dawidek 9312d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = NULL; 9322d1661a5SPawel Jakub Dawidek } 9332d1661a5SPawel Jakub Dawidek 9342d1661a5SPawel Jakub Dawidek static void 935dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(struct bio *cbp) 936dba915cfSPawel Jakub Dawidek { 937dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 938dba915cfSPawel Jakub Dawidek 939dba915cfSPawel Jakub Dawidek pbp = cbp->bio_parent; 940dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) 941dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 942dba915cfSPawel Jakub Dawidek else { 943dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 944dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) { 945dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 946dba915cfSPawel Jakub Dawidek break; 947dba915cfSPawel Jakub Dawidek } 948dba915cfSPawel Jakub Dawidek } 949dba915cfSPawel Jakub Dawidek } 950dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 951dba915cfSPawel Jakub Dawidek } 952dba915cfSPawel Jakub Dawidek 953dba915cfSPawel Jakub Dawidek static void 954dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(struct bio *sbp, struct bio *dbp) 955dba915cfSPawel Jakub Dawidek { 956dba915cfSPawel Jakub Dawidek struct bio *pbp, *bp; 957dba915cfSPawel Jakub Dawidek 958dba915cfSPawel Jakub Dawidek g_raid3_remove_bio(sbp); 959dba915cfSPawel Jakub Dawidek pbp = dbp->bio_parent; 960dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp); 961dba915cfSPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == dbp) 962dba915cfSPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = sbp; 963dba915cfSPawel Jakub Dawidek else { 964dba915cfSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 965dba915cfSPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == dbp) { 966dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = sbp; 967dba915cfSPawel Jakub Dawidek break; 968dba915cfSPawel Jakub Dawidek } 969dba915cfSPawel Jakub Dawidek } 970dba915cfSPawel Jakub Dawidek } 971dba915cfSPawel Jakub Dawidek G_RAID3_NEXT_BIO(dbp) = NULL; 972dba915cfSPawel Jakub Dawidek } 973dba915cfSPawel Jakub Dawidek 974dba915cfSPawel Jakub Dawidek static void 9752d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp) 9762d1661a5SPawel Jakub Dawidek { 9772d1661a5SPawel Jakub Dawidek struct bio *bp, *pbp; 9782d1661a5SPawel Jakub Dawidek size_t size; 9792d1661a5SPawel Jakub Dawidek 9802d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 9812d1661a5SPawel Jakub Dawidek pbp->bio_children--; 9822d1661a5SPawel Jakub Dawidek KASSERT(cbp->bio_data != NULL, ("NULL bio_data")); 9832d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 984ed940a82SPawel Jakub Dawidek g_raid3_free(sc, cbp->bio_data, size); 9852d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == cbp) { 9862d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 9872d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 9882d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 9892d1661a5SPawel Jakub Dawidek } else { 9902d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 9912d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == cbp) 9922d1661a5SPawel Jakub Dawidek break; 9932d1661a5SPawel Jakub Dawidek } 994dba915cfSPawel Jakub Dawidek if (bp != NULL) { 995dba915cfSPawel Jakub Dawidek KASSERT(G_RAID3_NEXT_BIO(bp) != NULL, 996dba915cfSPawel Jakub Dawidek ("NULL bp->bio_driver1")); 9972d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 9982d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 999dba915cfSPawel Jakub Dawidek } 10002d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10012d1661a5SPawel Jakub Dawidek } 10022d1661a5SPawel Jakub Dawidek } 10032d1661a5SPawel Jakub Dawidek 10042d1661a5SPawel Jakub Dawidek static struct bio * 10052d1661a5SPawel Jakub Dawidek g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp) 10062d1661a5SPawel Jakub Dawidek { 10072d1661a5SPawel Jakub Dawidek struct bio *bp, *cbp; 10082d1661a5SPawel Jakub Dawidek size_t size; 10093650be51SPawel Jakub Dawidek int memflag; 10102d1661a5SPawel Jakub Dawidek 10112d1661a5SPawel Jakub Dawidek cbp = g_clone_bio(pbp); 10122d1661a5SPawel Jakub Dawidek if (cbp == NULL) 10132d1661a5SPawel Jakub Dawidek return (NULL); 10142d1661a5SPawel Jakub Dawidek size = pbp->bio_length / (sc->sc_ndisks - 1); 10153650be51SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 10163650be51SPawel Jakub Dawidek memflag = M_WAITOK; 10172d1661a5SPawel Jakub Dawidek else 10183650be51SPawel Jakub Dawidek memflag = M_NOWAIT; 1019ed940a82SPawel Jakub Dawidek cbp->bio_data = g_raid3_alloc(sc, size, memflag); 10203650be51SPawel Jakub Dawidek if (cbp->bio_data == NULL) { 10212d1661a5SPawel Jakub Dawidek pbp->bio_children--; 10222d1661a5SPawel Jakub Dawidek g_destroy_bio(cbp); 10232d1661a5SPawel Jakub Dawidek return (NULL); 10242d1661a5SPawel Jakub Dawidek } 10252d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(cbp) = NULL; 10262d1661a5SPawel Jakub Dawidek if (G_RAID3_HEAD_BIO(pbp) == NULL) 10272d1661a5SPawel Jakub Dawidek G_RAID3_HEAD_BIO(pbp) = cbp; 10282d1661a5SPawel Jakub Dawidek else { 10292d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, bp) { 10302d1661a5SPawel Jakub Dawidek if (G_RAID3_NEXT_BIO(bp) == NULL) { 10312d1661a5SPawel Jakub Dawidek G_RAID3_NEXT_BIO(bp) = cbp; 10322d1661a5SPawel Jakub Dawidek break; 10332d1661a5SPawel Jakub Dawidek } 10342d1661a5SPawel Jakub Dawidek } 10352d1661a5SPawel Jakub Dawidek } 10362d1661a5SPawel Jakub Dawidek return (cbp); 10372d1661a5SPawel Jakub Dawidek } 10382d1661a5SPawel Jakub Dawidek 10392d1661a5SPawel Jakub Dawidek static void 10402d1661a5SPawel Jakub Dawidek g_raid3_scatter(struct bio *pbp) 10412d1661a5SPawel Jakub Dawidek { 10422d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 10432d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1044ee40c7aaSPawel Jakub Dawidek struct bio *bp, *cbp, *tmpbp; 10452d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 104606b215fdSAlexander Motin int first; 10472d1661a5SPawel Jakub Dawidek 10482d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 10492d1661a5SPawel Jakub Dawidek bp = NULL; 10502d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 10512d1661a5SPawel Jakub Dawidek /* 10522d1661a5SPawel Jakub Dawidek * Find bio for which we should calculate data. 10532d1661a5SPawel Jakub Dawidek */ 10542d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10552d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 10562d1661a5SPawel Jakub Dawidek bp = cbp; 10572d1661a5SPawel Jakub Dawidek break; 10582d1661a5SPawel Jakub Dawidek } 10592d1661a5SPawel Jakub Dawidek } 10602d1661a5SPawel Jakub Dawidek KASSERT(bp != NULL, ("NULL parity bio.")); 10612d1661a5SPawel Jakub Dawidek } 10622d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 10632d1661a5SPawel Jakub Dawidek cadd = padd = 0; 10642d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 10652d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 10662d1661a5SPawel Jakub Dawidek if (cbp == bp) 10672d1661a5SPawel Jakub Dawidek continue; 10682d1661a5SPawel Jakub Dawidek bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom); 10692d1661a5SPawel Jakub Dawidek padd += atom; 10702d1661a5SPawel Jakub Dawidek } 10712d1661a5SPawel Jakub Dawidek cadd += atom; 10722d1661a5SPawel Jakub Dawidek } 10732d1661a5SPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 10742d1661a5SPawel Jakub Dawidek /* 10752d1661a5SPawel Jakub Dawidek * Calculate parity. 10762d1661a5SPawel Jakub Dawidek */ 107706b215fdSAlexander Motin first = 1; 10782d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 10792d1661a5SPawel Jakub Dawidek if (cbp == bp) 10802d1661a5SPawel Jakub Dawidek continue; 108106b215fdSAlexander Motin if (first) { 108206b215fdSAlexander Motin bcopy(cbp->bio_data, bp->bio_data, 10832d1661a5SPawel Jakub Dawidek bp->bio_length); 108406b215fdSAlexander Motin first = 0; 108506b215fdSAlexander Motin } else { 108606b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, bp->bio_data, 108706b215fdSAlexander Motin bp->bio_length); 108806b215fdSAlexander Motin } 10892d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0) 10902d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 10912d1661a5SPawel Jakub Dawidek } 10922d1661a5SPawel Jakub Dawidek } 1093ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 10942d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 10952d1661a5SPawel Jakub Dawidek 10962d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 10972d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 10982d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 10992d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 11003650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1101d97d5ee9SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1102d97d5ee9SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 110379e61493SPawel Jakub Dawidek cp->index++; 11040962f942SPawel Jakub Dawidek sc->sc_writes++; 11052d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 11062d1661a5SPawel Jakub Dawidek } 11072d1661a5SPawel Jakub Dawidek } 11082d1661a5SPawel Jakub Dawidek 11092d1661a5SPawel Jakub Dawidek static void 11102d1661a5SPawel Jakub Dawidek g_raid3_gather(struct bio *pbp) 11112d1661a5SPawel Jakub Dawidek { 11122d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 11132d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 1114f5a2f7feSPawel Jakub Dawidek struct bio *xbp, *fbp, *cbp; 11152d1661a5SPawel Jakub Dawidek off_t atom, cadd, padd, left; 11162d1661a5SPawel Jakub Dawidek 11172d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 11182d1661a5SPawel Jakub Dawidek /* 1119f5a2f7feSPawel Jakub Dawidek * Find bio for which we have to calculate data. 11202d1661a5SPawel Jakub Dawidek * While going through this path, check if all requests 11212d1661a5SPawel Jakub Dawidek * succeeded, if not, deny whole request. 1122f5a2f7feSPawel Jakub Dawidek * If we're in COMPLETE mode, we allow one request to fail, 1123f5a2f7feSPawel Jakub Dawidek * so if we find one, we're sending it to the parity consumer. 1124f5a2f7feSPawel Jakub Dawidek * If there are more failed requests, we deny whole request. 11252d1661a5SPawel Jakub Dawidek */ 1126f5a2f7feSPawel Jakub Dawidek xbp = fbp = NULL; 11272d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 11282d1661a5SPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1129f5a2f7feSPawel Jakub Dawidek KASSERT(xbp == NULL, ("More than one parity bio.")); 1130f5a2f7feSPawel Jakub Dawidek xbp = cbp; 11312d1661a5SPawel Jakub Dawidek } 11322d1661a5SPawel Jakub Dawidek if (cbp->bio_error == 0) 11332d1661a5SPawel Jakub Dawidek continue; 11342d1661a5SPawel Jakub Dawidek /* 11352d1661a5SPawel Jakub Dawidek * Found failed request. 11362d1661a5SPawel Jakub Dawidek */ 1137f5a2f7feSPawel Jakub Dawidek if (fbp == NULL) { 1138f5a2f7feSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) { 11392d1661a5SPawel Jakub Dawidek /* 1140f5a2f7feSPawel Jakub Dawidek * We are already in degraded mode, so we can't 1141f5a2f7feSPawel Jakub Dawidek * accept any failures. 11422d1661a5SPawel Jakub Dawidek */ 1143f5a2f7feSPawel Jakub Dawidek if (pbp->bio_error == 0) 114417fec17eSPawel Jakub Dawidek pbp->bio_error = cbp->bio_error; 11452d1661a5SPawel Jakub Dawidek } else { 1146f5a2f7feSPawel Jakub Dawidek fbp = cbp; 11472d1661a5SPawel Jakub Dawidek } 1148f5a2f7feSPawel Jakub Dawidek } else { 11492d1661a5SPawel Jakub Dawidek /* 11502d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 11512d1661a5SPawel Jakub Dawidek */ 11522d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 1153f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 11542d1661a5SPawel Jakub Dawidek } 11553aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 11563aae74ecSPawel Jakub Dawidek if (disk == NULL) 11573aae74ecSPawel Jakub Dawidek continue; 11583aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 11593aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 11603aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).", 11613aae74ecSPawel Jakub Dawidek cbp->bio_error); 11623aae74ecSPawel Jakub Dawidek } else { 11633aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).", 11643aae74ecSPawel Jakub Dawidek cbp->bio_error); 11653aae74ecSPawel Jakub Dawidek } 11663aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 11673aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 11683aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 11693aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 11703aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 11713aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 11723aae74ecSPawel Jakub Dawidek } 11732d1661a5SPawel Jakub Dawidek } 11742d1661a5SPawel Jakub Dawidek if (pbp->bio_error != 0) 11752d1661a5SPawel Jakub Dawidek goto finish; 1176dba915cfSPawel Jakub Dawidek if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1177dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY; 1178dba915cfSPawel Jakub Dawidek if (xbp != fbp) 1179dba915cfSPawel Jakub Dawidek g_raid3_replace_bio(xbp, fbp); 1180dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, fbp); 1181dba915cfSPawel Jakub Dawidek } else if (fbp != NULL) { 11822d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 11832d1661a5SPawel Jakub Dawidek 11842d1661a5SPawel Jakub Dawidek /* 11852d1661a5SPawel Jakub Dawidek * One request failed, so send the same request to 11862d1661a5SPawel Jakub Dawidek * the parity consumer. 11872d1661a5SPawel Jakub Dawidek */ 1188f5a2f7feSPawel Jakub Dawidek disk = pbp->bio_driver2; 11892d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1190f5a2f7feSPawel Jakub Dawidek pbp->bio_error = fbp->bio_error; 11912d1661a5SPawel Jakub Dawidek goto finish; 11922d1661a5SPawel Jakub Dawidek } 11932d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 11942d1661a5SPawel Jakub Dawidek pbp->bio_inbed--; 1195f5a2f7feSPawel Jakub Dawidek fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR); 1196f5a2f7feSPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 1197f5a2f7feSPawel Jakub Dawidek fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1198f5a2f7feSPawel Jakub Dawidek fbp->bio_error = 0; 1199f5a2f7feSPawel Jakub Dawidek fbp->bio_completed = 0; 1200f5a2f7feSPawel Jakub Dawidek fbp->bio_children = 0; 1201f5a2f7feSPawel Jakub Dawidek fbp->bio_inbed = 0; 12022d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 1203f5a2f7feSPawel Jakub Dawidek fbp->bio_caller2 = disk; 1204f5a2f7feSPawel Jakub Dawidek fbp->bio_to = cp->provider; 1205f5a2f7feSPawel Jakub Dawidek G_RAID3_LOGREQ(3, fbp, "Sending request (recover)."); 12063650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 12072d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 12082d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 120979e61493SPawel Jakub Dawidek cp->index++; 1210f5a2f7feSPawel Jakub Dawidek g_io_request(fbp, cp); 12112d1661a5SPawel Jakub Dawidek return; 12122d1661a5SPawel Jakub Dawidek } 1213f5a2f7feSPawel Jakub Dawidek if (xbp != NULL) { 1214f5a2f7feSPawel Jakub Dawidek /* 1215f5a2f7feSPawel Jakub Dawidek * Calculate parity. 1216f5a2f7feSPawel Jakub Dawidek */ 1217f5a2f7feSPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 1218f5a2f7feSPawel Jakub Dawidek if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) 1219f5a2f7feSPawel Jakub Dawidek continue; 122006b215fdSAlexander Motin g_raid3_xor(cbp->bio_data, xbp->bio_data, 1221f5a2f7feSPawel Jakub Dawidek xbp->bio_length); 1222f5a2f7feSPawel Jakub Dawidek } 1223f5a2f7feSPawel Jakub Dawidek xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY; 1224dba915cfSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1225dba915cfSPawel Jakub Dawidek if (!g_raid3_is_zero(xbp)) { 1226dba915cfSPawel Jakub Dawidek g_raid3_parity_mismatch++; 1227dba915cfSPawel Jakub Dawidek pbp->bio_error = EIO; 1228dba915cfSPawel Jakub Dawidek goto finish; 1229dba915cfSPawel Jakub Dawidek } 1230dba915cfSPawel Jakub Dawidek g_raid3_destroy_bio(sc, xbp); 1231dba915cfSPawel Jakub Dawidek } 12322d1661a5SPawel Jakub Dawidek } 12332d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 12342d1661a5SPawel Jakub Dawidek cadd = padd = 0; 12352d1661a5SPawel Jakub Dawidek for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 12362d1661a5SPawel Jakub Dawidek G_RAID3_FOREACH_BIO(pbp, cbp) { 12372d1661a5SPawel Jakub Dawidek bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom); 12382d1661a5SPawel Jakub Dawidek pbp->bio_completed += atom; 12392d1661a5SPawel Jakub Dawidek padd += atom; 12402d1661a5SPawel Jakub Dawidek } 12412d1661a5SPawel Jakub Dawidek cadd += atom; 12422d1661a5SPawel Jakub Dawidek } 12432d1661a5SPawel Jakub Dawidek finish: 12442d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 12452d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 12464cf67afeSPawel Jakub Dawidek else { 12474cf67afeSPawel Jakub Dawidek if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) 12484cf67afeSPawel Jakub Dawidek G_RAID3_LOGREQ(1, pbp, "Verification error."); 12492d1661a5SPawel Jakub Dawidek else 12502d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 12514cf67afeSPawel Jakub Dawidek } 1252dba915cfSPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK; 12532d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 12542d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1255290c6161SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 12562d1661a5SPawel Jakub Dawidek } 12572d1661a5SPawel Jakub Dawidek 12582d1661a5SPawel Jakub Dawidek static void 12592d1661a5SPawel Jakub Dawidek g_raid3_done(struct bio *bp) 12602d1661a5SPawel Jakub Dawidek { 12612d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 12622d1661a5SPawel Jakub Dawidek 12632d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 12642d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR; 12652d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error); 12662d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 12672d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 12688de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 12692d1661a5SPawel Jakub Dawidek wakeup(sc); 12702d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 12712d1661a5SPawel Jakub Dawidek } 12722d1661a5SPawel Jakub Dawidek 12732d1661a5SPawel Jakub Dawidek static void 12742d1661a5SPawel Jakub Dawidek g_raid3_regular_request(struct bio *cbp) 12752d1661a5SPawel Jakub Dawidek { 12762d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 12772d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 12782d1661a5SPawel Jakub Dawidek struct bio *pbp; 12792d1661a5SPawel Jakub Dawidek 12802d1661a5SPawel Jakub Dawidek g_topology_assert_not(); 12812d1661a5SPawel Jakub Dawidek 12822d1661a5SPawel Jakub Dawidek pbp = cbp->bio_parent; 12832d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 12840962f942SPawel Jakub Dawidek cbp->bio_from->index--; 12850962f942SPawel Jakub Dawidek if (cbp->bio_cmd == BIO_WRITE) 12860962f942SPawel Jakub Dawidek sc->sc_writes--; 12872d1661a5SPawel Jakub Dawidek disk = cbp->bio_from->private; 12882d1661a5SPawel Jakub Dawidek if (disk == NULL) { 12892d1661a5SPawel Jakub Dawidek g_topology_lock(); 12902d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cbp->bio_from); 12912d1661a5SPawel Jakub Dawidek g_topology_unlock(); 12922d1661a5SPawel Jakub Dawidek } 12932d1661a5SPawel Jakub Dawidek 12942d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Request finished."); 12952d1661a5SPawel Jakub Dawidek pbp->bio_inbed++; 12962d1661a5SPawel Jakub Dawidek KASSERT(pbp->bio_inbed <= pbp->bio_children, 12972d1661a5SPawel Jakub Dawidek ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 12982d1661a5SPawel Jakub Dawidek pbp->bio_children)); 12992d1661a5SPawel Jakub Dawidek if (pbp->bio_inbed != pbp->bio_children) 13002d1661a5SPawel Jakub Dawidek return; 13012d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 13022d1661a5SPawel Jakub Dawidek case BIO_READ: 13032d1661a5SPawel Jakub Dawidek g_raid3_gather(pbp); 13042d1661a5SPawel Jakub Dawidek break; 13052d1661a5SPawel Jakub Dawidek case BIO_WRITE: 13062d1661a5SPawel Jakub Dawidek case BIO_DELETE: 13072d1661a5SPawel Jakub Dawidek { 13082d1661a5SPawel Jakub Dawidek int error = 0; 13092d1661a5SPawel Jakub Dawidek 13102d1661a5SPawel Jakub Dawidek pbp->bio_completed = pbp->bio_length; 13112d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) { 13123aae74ecSPawel Jakub Dawidek if (cbp->bio_error == 0) { 13133aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13143aae74ecSPawel Jakub Dawidek continue; 13152d1661a5SPawel Jakub Dawidek } 13163aae74ecSPawel Jakub Dawidek 13172d1661a5SPawel Jakub Dawidek if (error == 0) 13182d1661a5SPawel Jakub Dawidek error = cbp->bio_error; 13192d1661a5SPawel Jakub Dawidek else if (pbp->bio_error == 0) { 13202d1661a5SPawel Jakub Dawidek /* 13212d1661a5SPawel Jakub Dawidek * Next failed request, that's too many. 13222d1661a5SPawel Jakub Dawidek */ 13232d1661a5SPawel Jakub Dawidek pbp->bio_error = error; 13242d1661a5SPawel Jakub Dawidek } 13253aae74ecSPawel Jakub Dawidek 13263aae74ecSPawel Jakub Dawidek disk = cbp->bio_caller2; 13273aae74ecSPawel Jakub Dawidek if (disk == NULL) { 13283aae74ecSPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13293aae74ecSPawel Jakub Dawidek continue; 13303aae74ecSPawel Jakub Dawidek } 13313aae74ecSPawel Jakub Dawidek 13323aae74ecSPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 13333aae74ecSPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 13343aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(0, cbp, 13353aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13363aae74ecSPawel Jakub Dawidek cbp->bio_error); 13373aae74ecSPawel Jakub Dawidek } else { 13383aae74ecSPawel Jakub Dawidek G_RAID3_LOGREQ(1, cbp, 13393aae74ecSPawel Jakub Dawidek "Request failed (error=%d).", 13403aae74ecSPawel Jakub Dawidek cbp->bio_error); 13413aae74ecSPawel Jakub Dawidek } 13423aae74ecSPawel Jakub Dawidek if (g_raid3_disconnect_on_failure && 13433aae74ecSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 13443aae74ecSPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 13453aae74ecSPawel Jakub Dawidek g_raid3_event_send(disk, 13463aae74ecSPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 13473aae74ecSPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 13482d1661a5SPawel Jakub Dawidek } 13492d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 13502d1661a5SPawel Jakub Dawidek } 13512d1661a5SPawel Jakub Dawidek if (pbp->bio_error == 0) 13522d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, pbp, "Request finished."); 13532d1661a5SPawel Jakub Dawidek else 13542d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, pbp, "Request failed."); 13552d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED; 13562d1661a5SPawel Jakub Dawidek pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY; 13573650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_inflight, pbp); 13583650be51SPawel Jakub Dawidek /* Release delayed sync requests if possible. */ 13593650be51SPawel Jakub Dawidek g_raid3_sync_release(sc); 13602d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, pbp->bio_error); 13612d1661a5SPawel Jakub Dawidek break; 13622d1661a5SPawel Jakub Dawidek } 13632d1661a5SPawel Jakub Dawidek } 13642d1661a5SPawel Jakub Dawidek } 13652d1661a5SPawel Jakub Dawidek 13662d1661a5SPawel Jakub Dawidek static void 13672d1661a5SPawel Jakub Dawidek g_raid3_sync_done(struct bio *bp) 13682d1661a5SPawel Jakub Dawidek { 13692d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 13702d1661a5SPawel Jakub Dawidek 13712d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request delivered."); 13722d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 13732d1661a5SPawel Jakub Dawidek bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC; 13742d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 13752d1661a5SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 13768de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 13772d1661a5SPawel Jakub Dawidek wakeup(sc); 13782d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 13792d1661a5SPawel Jakub Dawidek } 13802d1661a5SPawel Jakub Dawidek 13812d1661a5SPawel Jakub Dawidek static void 138242461fbaSPawel Jakub Dawidek g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp) 138342461fbaSPawel Jakub Dawidek { 138442461fbaSPawel Jakub Dawidek struct bio_queue_head queue; 138542461fbaSPawel Jakub Dawidek struct g_raid3_disk *disk; 138642461fbaSPawel Jakub Dawidek struct g_consumer *cp; 138742461fbaSPawel Jakub Dawidek struct bio *cbp; 138842461fbaSPawel Jakub Dawidek u_int i; 138942461fbaSPawel Jakub Dawidek 139042461fbaSPawel Jakub Dawidek bioq_init(&queue); 139142461fbaSPawel Jakub Dawidek for (i = 0; i < sc->sc_ndisks; i++) { 139242461fbaSPawel Jakub Dawidek disk = &sc->sc_disks[i]; 139342461fbaSPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 139442461fbaSPawel Jakub Dawidek continue; 139542461fbaSPawel Jakub Dawidek cbp = g_clone_bio(bp); 139642461fbaSPawel Jakub Dawidek if (cbp == NULL) { 139742461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; 139842461fbaSPawel Jakub Dawidek cbp = bioq_first(&queue)) { 139942461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 140042461fbaSPawel Jakub Dawidek g_destroy_bio(cbp); 140142461fbaSPawel Jakub Dawidek } 140242461fbaSPawel Jakub Dawidek if (bp->bio_error == 0) 140342461fbaSPawel Jakub Dawidek bp->bio_error = ENOMEM; 140442461fbaSPawel Jakub Dawidek g_io_deliver(bp, bp->bio_error); 140542461fbaSPawel Jakub Dawidek return; 140642461fbaSPawel Jakub Dawidek } 140742461fbaSPawel Jakub Dawidek bioq_insert_tail(&queue, cbp); 140842461fbaSPawel Jakub Dawidek cbp->bio_done = g_std_done; 140942461fbaSPawel Jakub Dawidek cbp->bio_caller1 = disk; 141042461fbaSPawel Jakub Dawidek cbp->bio_to = disk->d_consumer->provider; 141142461fbaSPawel Jakub Dawidek } 141242461fbaSPawel Jakub Dawidek for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 141342461fbaSPawel Jakub Dawidek bioq_remove(&queue, cbp); 141442461fbaSPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 141542461fbaSPawel Jakub Dawidek disk = cbp->bio_caller1; 141642461fbaSPawel Jakub Dawidek cbp->bio_caller1 = NULL; 141742461fbaSPawel Jakub Dawidek cp = disk->d_consumer; 141842461fbaSPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 141942461fbaSPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 142042461fbaSPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 142142461fbaSPawel Jakub Dawidek g_io_request(cbp, disk->d_consumer); 142242461fbaSPawel Jakub Dawidek } 142342461fbaSPawel Jakub Dawidek } 142442461fbaSPawel Jakub Dawidek 142542461fbaSPawel Jakub Dawidek static void 14262d1661a5SPawel Jakub Dawidek g_raid3_start(struct bio *bp) 14272d1661a5SPawel Jakub Dawidek { 14282d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 14292d1661a5SPawel Jakub Dawidek 14302d1661a5SPawel Jakub Dawidek sc = bp->bio_to->geom->softc; 14312d1661a5SPawel Jakub Dawidek /* 14322d1661a5SPawel Jakub Dawidek * If sc == NULL or there are no valid disks, provider's error 14332d1661a5SPawel Jakub Dawidek * should be set and g_raid3_start() should not be called at all. 14342d1661a5SPawel Jakub Dawidek */ 14352d1661a5SPawel Jakub Dawidek KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 14362d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE), 14372d1661a5SPawel Jakub Dawidek ("Provider's error should be set (error=%d)(device=%s).", 14382d1661a5SPawel Jakub Dawidek bp->bio_to->error, bp->bio_to->name)); 14392d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Request received."); 14402d1661a5SPawel Jakub Dawidek 14412d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 14422d1661a5SPawel Jakub Dawidek case BIO_READ: 14432d1661a5SPawel Jakub Dawidek case BIO_WRITE: 14442d1661a5SPawel Jakub Dawidek case BIO_DELETE: 14452d1661a5SPawel Jakub Dawidek break; 144642461fbaSPawel Jakub Dawidek case BIO_FLUSH: 144742461fbaSPawel Jakub Dawidek g_raid3_flush(sc, bp); 144842461fbaSPawel Jakub Dawidek return; 14492d1661a5SPawel Jakub Dawidek case BIO_GETATTR: 14502d1661a5SPawel Jakub Dawidek default: 14512d1661a5SPawel Jakub Dawidek g_io_deliver(bp, EOPNOTSUPP); 14522d1661a5SPawel Jakub Dawidek return; 14532d1661a5SPawel Jakub Dawidek } 14542d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 14552d1661a5SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_queue, bp); 14568de58113SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 14572d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 14582d1661a5SPawel Jakub Dawidek wakeup(sc); 14592d1661a5SPawel Jakub Dawidek } 14602d1661a5SPawel Jakub Dawidek 14612d1661a5SPawel Jakub Dawidek /* 14623650be51SPawel Jakub Dawidek * Return TRUE if the given request is colliding with a in-progress 14633650be51SPawel Jakub Dawidek * synchronization request. 14642d1661a5SPawel Jakub Dawidek */ 14653650be51SPawel Jakub Dawidek static int 14663650be51SPawel Jakub Dawidek g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp) 14672d1661a5SPawel Jakub Dawidek { 14682d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 14693650be51SPawel Jakub Dawidek struct bio *sbp; 14703650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 14713650be51SPawel Jakub Dawidek int i; 14723650be51SPawel Jakub Dawidek 14733650be51SPawel Jakub Dawidek disk = sc->sc_syncdisk; 14743650be51SPawel Jakub Dawidek if (disk == NULL) 14753650be51SPawel Jakub Dawidek return (0); 14763650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 14773650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 14783650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 14793650be51SPawel Jakub Dawidek sbp = disk->d_sync.ds_bios[i]; 14803650be51SPawel Jakub Dawidek if (sbp == NULL) 14813650be51SPawel Jakub Dawidek continue; 14823650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 14833650be51SPawel Jakub Dawidek send = sbp->bio_length; 14843650be51SPawel Jakub Dawidek if (sbp->bio_cmd == BIO_WRITE) { 14853650be51SPawel Jakub Dawidek sstart *= sc->sc_ndisks - 1; 14863650be51SPawel Jakub Dawidek send *= sc->sc_ndisks - 1; 14873650be51SPawel Jakub Dawidek } 14883650be51SPawel Jakub Dawidek send += sstart; 14893650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 14903650be51SPawel Jakub Dawidek return (1); 14913650be51SPawel Jakub Dawidek } 14923650be51SPawel Jakub Dawidek return (0); 14933650be51SPawel Jakub Dawidek } 14943650be51SPawel Jakub Dawidek 14953650be51SPawel Jakub Dawidek /* 14963650be51SPawel Jakub Dawidek * Return TRUE if the given sync request is colliding with a in-progress regular 14973650be51SPawel Jakub Dawidek * request. 14983650be51SPawel Jakub Dawidek */ 14993650be51SPawel Jakub Dawidek static int 15003650be51SPawel Jakub Dawidek g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp) 15013650be51SPawel Jakub Dawidek { 15023650be51SPawel Jakub Dawidek off_t rstart, rend, sstart, send; 15032d1661a5SPawel Jakub Dawidek struct bio *bp; 15042d1661a5SPawel Jakub Dawidek 15053650be51SPawel Jakub Dawidek if (sc->sc_syncdisk == NULL) 15063650be51SPawel Jakub Dawidek return (0); 15073650be51SPawel Jakub Dawidek sstart = sbp->bio_offset; 15083650be51SPawel Jakub Dawidek send = sstart + sbp->bio_length; 15093650be51SPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 15103650be51SPawel Jakub Dawidek rstart = bp->bio_offset; 15113650be51SPawel Jakub Dawidek rend = bp->bio_offset + bp->bio_length; 15123650be51SPawel Jakub Dawidek if (rend > sstart && rstart < send) 15133650be51SPawel Jakub Dawidek return (1); 15142d1661a5SPawel Jakub Dawidek } 15153650be51SPawel Jakub Dawidek return (0); 15162d1661a5SPawel Jakub Dawidek } 15172d1661a5SPawel Jakub Dawidek 15183650be51SPawel Jakub Dawidek /* 15193650be51SPawel Jakub Dawidek * Puts request onto delayed queue. 15203650be51SPawel Jakub Dawidek */ 15213650be51SPawel Jakub Dawidek static void 15223650be51SPawel Jakub Dawidek g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp) 15233650be51SPawel Jakub Dawidek { 15243650be51SPawel Jakub Dawidek 15253650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying request."); 15263650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_regular_delayed, bp); 15273650be51SPawel Jakub Dawidek } 15283650be51SPawel Jakub Dawidek 15293650be51SPawel Jakub Dawidek /* 15303650be51SPawel Jakub Dawidek * Puts synchronization request onto delayed queue. 15313650be51SPawel Jakub Dawidek */ 15323650be51SPawel Jakub Dawidek static void 15333650be51SPawel Jakub Dawidek g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp) 15343650be51SPawel Jakub Dawidek { 15353650be51SPawel Jakub Dawidek 15363650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Delaying synchronization request."); 15373650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_sync_delayed, bp); 15383650be51SPawel Jakub Dawidek } 15393650be51SPawel Jakub Dawidek 15403650be51SPawel Jakub Dawidek /* 15413650be51SPawel Jakub Dawidek * Releases delayed regular requests which don't collide anymore with sync 15423650be51SPawel Jakub Dawidek * requests. 15433650be51SPawel Jakub Dawidek */ 15443650be51SPawel Jakub Dawidek static void 15453650be51SPawel Jakub Dawidek g_raid3_regular_release(struct g_raid3_softc *sc) 15463650be51SPawel Jakub Dawidek { 15473650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 15483650be51SPawel Jakub Dawidek 15493650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 15503650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, bp)) 15513650be51SPawel Jakub Dawidek continue; 15523650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_regular_delayed, bp); 15533650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 15543650be51SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 15553650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 15563650be51SPawel Jakub Dawidek #if 0 15573650be51SPawel Jakub Dawidek /* 15583650be51SPawel Jakub Dawidek * wakeup() is not needed, because this function is called from 15593650be51SPawel Jakub Dawidek * the worker thread. 15603650be51SPawel Jakub Dawidek */ 15613650be51SPawel Jakub Dawidek wakeup(&sc->sc_queue); 15623650be51SPawel Jakub Dawidek #endif 15633650be51SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 15643650be51SPawel Jakub Dawidek } 15653650be51SPawel Jakub Dawidek } 15663650be51SPawel Jakub Dawidek 15673650be51SPawel Jakub Dawidek /* 15683650be51SPawel Jakub Dawidek * Releases delayed sync requests which don't collide anymore with regular 15693650be51SPawel Jakub Dawidek * requests. 15703650be51SPawel Jakub Dawidek */ 15713650be51SPawel Jakub Dawidek static void 15723650be51SPawel Jakub Dawidek g_raid3_sync_release(struct g_raid3_softc *sc) 15733650be51SPawel Jakub Dawidek { 15743650be51SPawel Jakub Dawidek struct bio *bp, *bp2; 15753650be51SPawel Jakub Dawidek 15763650be51SPawel Jakub Dawidek TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 15773650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 15783650be51SPawel Jakub Dawidek continue; 15793650be51SPawel Jakub Dawidek bioq_remove(&sc->sc_sync_delayed, bp); 15803650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(2, bp, 15813650be51SPawel Jakub Dawidek "Releasing delayed synchronization request."); 15823650be51SPawel Jakub Dawidek g_io_request(bp, bp->bio_from); 15833650be51SPawel Jakub Dawidek } 15843650be51SPawel Jakub Dawidek } 15853650be51SPawel Jakub Dawidek 15863650be51SPawel Jakub Dawidek /* 15873650be51SPawel Jakub Dawidek * Handle synchronization requests. 15883650be51SPawel Jakub Dawidek * Every synchronization request is two-steps process: first, READ request is 15893650be51SPawel Jakub Dawidek * send to active provider and then WRITE request (with read data) to the provider 15903650be51SPawel Jakub Dawidek * beeing synchronized. When WRITE is finished, new synchronization request is 15913650be51SPawel Jakub Dawidek * send. 15923650be51SPawel Jakub Dawidek */ 15932d1661a5SPawel Jakub Dawidek static void 15942d1661a5SPawel Jakub Dawidek g_raid3_sync_request(struct bio *bp) 15952d1661a5SPawel Jakub Dawidek { 15962d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 15972d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 15982d1661a5SPawel Jakub Dawidek 159979e61493SPawel Jakub Dawidek bp->bio_from->index--; 16002d1661a5SPawel Jakub Dawidek sc = bp->bio_from->geom->softc; 16012d1661a5SPawel Jakub Dawidek disk = bp->bio_from->private; 16022d1661a5SPawel Jakub Dawidek if (disk == NULL) { 16033650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 16042d1661a5SPawel Jakub Dawidek g_topology_lock(); 16052d1661a5SPawel Jakub Dawidek g_raid3_kill_consumer(sc, bp->bio_from); 16062d1661a5SPawel Jakub Dawidek g_topology_unlock(); 16073650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 16082d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16093650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 16102d1661a5SPawel Jakub Dawidek return; 16112d1661a5SPawel Jakub Dawidek } 16122d1661a5SPawel Jakub Dawidek 16132d1661a5SPawel Jakub Dawidek /* 16142d1661a5SPawel Jakub Dawidek * Synchronization request. 16152d1661a5SPawel Jakub Dawidek */ 16162d1661a5SPawel Jakub Dawidek switch (bp->bio_cmd) { 16172d1661a5SPawel Jakub Dawidek case BIO_READ: 16182d1661a5SPawel Jakub Dawidek { 16192d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 16202d1661a5SPawel Jakub Dawidek u_char *dst, *src; 16212d1661a5SPawel Jakub Dawidek off_t left; 16222d1661a5SPawel Jakub Dawidek u_int atom; 16232d1661a5SPawel Jakub Dawidek 16242d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 16252d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 16262d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 16272d1661a5SPawel Jakub Dawidek bp->bio_error); 16282d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 16292d1661a5SPawel Jakub Dawidek return; 16302d1661a5SPawel Jakub Dawidek } 16312d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 16322d1661a5SPawel Jakub Dawidek atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 16332d1661a5SPawel Jakub Dawidek dst = src = bp->bio_data; 16342d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) { 16352d1661a5SPawel Jakub Dawidek u_int n; 16362d1661a5SPawel Jakub Dawidek 16372d1661a5SPawel Jakub Dawidek /* Parity component. */ 16382d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16392d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16402d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16412d1661a5SPawel Jakub Dawidek src += atom; 16422d1661a5SPawel Jakub Dawidek for (n = 1; n < sc->sc_ndisks - 1; n++) { 164306b215fdSAlexander Motin g_raid3_xor(src, dst, atom); 16442d1661a5SPawel Jakub Dawidek src += atom; 16452d1661a5SPawel Jakub Dawidek } 16462d1661a5SPawel Jakub Dawidek dst += atom; 16472d1661a5SPawel Jakub Dawidek } 16482d1661a5SPawel Jakub Dawidek } else { 16492d1661a5SPawel Jakub Dawidek /* Regular component. */ 16502d1661a5SPawel Jakub Dawidek src += atom * disk->d_no; 16512d1661a5SPawel Jakub Dawidek for (left = bp->bio_length; left > 0; 16522d1661a5SPawel Jakub Dawidek left -= sc->sc_sectorsize) { 16532d1661a5SPawel Jakub Dawidek bcopy(src, dst, atom); 16542d1661a5SPawel Jakub Dawidek src += sc->sc_sectorsize; 16552d1661a5SPawel Jakub Dawidek dst += atom; 16562d1661a5SPawel Jakub Dawidek } 16572d1661a5SPawel Jakub Dawidek } 16583650be51SPawel Jakub Dawidek bp->bio_driver1 = bp->bio_driver2 = NULL; 16593650be51SPawel Jakub Dawidek bp->bio_pflags = 0; 16602d1661a5SPawel Jakub Dawidek bp->bio_offset /= sc->sc_ndisks - 1; 16612d1661a5SPawel Jakub Dawidek bp->bio_length /= sc->sc_ndisks - 1; 16622d1661a5SPawel Jakub Dawidek bp->bio_cmd = BIO_WRITE; 16632d1661a5SPawel Jakub Dawidek bp->bio_cflags = 0; 16642d1661a5SPawel Jakub Dawidek bp->bio_children = bp->bio_inbed = 0; 16652d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 16663650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 16672d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 16682d1661a5SPawel Jakub Dawidek cp->acr, cp->acw, cp->ace)); 166979e61493SPawel Jakub Dawidek cp->index++; 16702d1661a5SPawel Jakub Dawidek g_io_request(bp, cp); 16712d1661a5SPawel Jakub Dawidek return; 16722d1661a5SPawel Jakub Dawidek } 16732d1661a5SPawel Jakub Dawidek case BIO_WRITE: 1674d2fb9c62SPawel Jakub Dawidek { 1675d2fb9c62SPawel Jakub Dawidek struct g_raid3_disk_sync *sync; 16763650be51SPawel Jakub Dawidek off_t boffset, moffset; 16773650be51SPawel Jakub Dawidek void *data; 16783650be51SPawel Jakub Dawidek int i; 1679d2fb9c62SPawel Jakub Dawidek 16802d1661a5SPawel Jakub Dawidek if (bp->bio_error != 0) { 16812d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(0, bp, 16822d1661a5SPawel Jakub Dawidek "Synchronization request failed (error=%d).", 16832d1661a5SPawel Jakub Dawidek bp->bio_error); 16842d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 1685ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_GENID; 16862d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, 16872d1661a5SPawel Jakub Dawidek G_RAID3_DISK_STATE_DISCONNECTED, 16882d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 16892d1661a5SPawel Jakub Dawidek return; 16902d1661a5SPawel Jakub Dawidek } 16912d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1692d2fb9c62SPawel Jakub Dawidek sync = &disk->d_sync; 16933650be51SPawel Jakub Dawidek if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) || 16943650be51SPawel Jakub Dawidek sync->ds_consumer == NULL || 16953650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 16963650be51SPawel Jakub Dawidek /* Don't send more synchronization requests. */ 16973650be51SPawel Jakub Dawidek sync->ds_inflight--; 16983650be51SPawel Jakub Dawidek if (sync->ds_bios != NULL) { 1699ef25813dSRuslan Ermilov i = (int)(uintptr_t)bp->bio_caller1; 17003650be51SPawel Jakub Dawidek sync->ds_bios[i] = NULL; 17013650be51SPawel Jakub Dawidek } 17023650be51SPawel Jakub Dawidek free(bp->bio_data, M_RAID3); 17032d1661a5SPawel Jakub Dawidek g_destroy_bio(bp); 17043650be51SPawel Jakub Dawidek if (sync->ds_inflight > 0) 1705d2fb9c62SPawel Jakub Dawidek return; 17063650be51SPawel Jakub Dawidek if (sync->ds_consumer == NULL || 17073650be51SPawel Jakub Dawidek (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 17083650be51SPawel Jakub Dawidek return; 17093650be51SPawel Jakub Dawidek } 17102d1661a5SPawel Jakub Dawidek /* 17112d1661a5SPawel Jakub Dawidek * Disk up-to-date, activate it. 17122d1661a5SPawel Jakub Dawidek */ 17132d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE, 17142d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT); 17152d1661a5SPawel Jakub Dawidek return; 17163650be51SPawel Jakub Dawidek } 17173650be51SPawel Jakub Dawidek 17183650be51SPawel Jakub Dawidek /* Send next synchronization request. */ 17193650be51SPawel Jakub Dawidek data = bp->bio_data; 1720c55f5707SWarner Losh g_reset_bio(bp); 17213650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 17223650be51SPawel Jakub Dawidek bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1); 17233650be51SPawel Jakub Dawidek bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 17243650be51SPawel Jakub Dawidek sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 17253650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 17263650be51SPawel Jakub Dawidek bp->bio_data = data; 17273650be51SPawel Jakub Dawidek bp->bio_from = sync->ds_consumer; 17283650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 17293650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 17303650be51SPawel Jakub Dawidek sync->ds_consumer->index++; 17312d1661a5SPawel Jakub Dawidek /* 17323650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 17332d1661a5SPawel Jakub Dawidek */ 17343650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 17353650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 17363650be51SPawel Jakub Dawidek else 17373650be51SPawel Jakub Dawidek g_io_request(bp, sync->ds_consumer); 17383650be51SPawel Jakub Dawidek 17393650be51SPawel Jakub Dawidek /* Release delayed requests if possible. */ 17403650be51SPawel Jakub Dawidek g_raid3_regular_release(sc); 17413650be51SPawel Jakub Dawidek 17423650be51SPawel Jakub Dawidek /* Find the smallest offset. */ 17433650be51SPawel Jakub Dawidek moffset = sc->sc_mediasize; 17443650be51SPawel Jakub Dawidek for (i = 0; i < g_raid3_syncreqs; i++) { 17453650be51SPawel Jakub Dawidek bp = sync->ds_bios[i]; 17463650be51SPawel Jakub Dawidek boffset = bp->bio_offset; 17473650be51SPawel Jakub Dawidek if (bp->bio_cmd == BIO_WRITE) 17483650be51SPawel Jakub Dawidek boffset *= sc->sc_ndisks - 1; 17493650be51SPawel Jakub Dawidek if (boffset < moffset) 17503650be51SPawel Jakub Dawidek moffset = boffset; 17513650be51SPawel Jakub Dawidek } 17523650be51SPawel Jakub Dawidek if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) { 17533650be51SPawel Jakub Dawidek /* Update offset_done on every 100 blocks. */ 17543650be51SPawel Jakub Dawidek sync->ds_offset_done = moffset; 17552d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 17562d1661a5SPawel Jakub Dawidek } 17572d1661a5SPawel Jakub Dawidek return; 1758d2fb9c62SPawel Jakub Dawidek } 17592d1661a5SPawel Jakub Dawidek default: 17602d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 17612d1661a5SPawel Jakub Dawidek bp->bio_cmd, sc->sc_name)); 17622d1661a5SPawel Jakub Dawidek break; 17632d1661a5SPawel Jakub Dawidek } 17642d1661a5SPawel Jakub Dawidek } 17652d1661a5SPawel Jakub Dawidek 17662d1661a5SPawel Jakub Dawidek static int 17672d1661a5SPawel Jakub Dawidek g_raid3_register_request(struct bio *pbp) 17682d1661a5SPawel Jakub Dawidek { 17692d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 17702d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 17712d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 1772ee40c7aaSPawel Jakub Dawidek struct bio *cbp, *tmpbp; 17732d1661a5SPawel Jakub Dawidek off_t offset, length; 1774fa6a7837SDavid E. O'Brien u_int n, ndisks; 1775dba915cfSPawel Jakub Dawidek int round_robin, verify; 17762d1661a5SPawel Jakub Dawidek 1777fa6a7837SDavid E. O'Brien ndisks = 0; 17782d1661a5SPawel Jakub Dawidek sc = pbp->bio_to->geom->softc; 17792d1661a5SPawel Jakub Dawidek if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 && 17802d1661a5SPawel Jakub Dawidek sc->sc_syncdisk == NULL) { 17812d1661a5SPawel Jakub Dawidek g_io_deliver(pbp, EIO); 17822d1661a5SPawel Jakub Dawidek return (0); 17832d1661a5SPawel Jakub Dawidek } 17842d1661a5SPawel Jakub Dawidek g_raid3_init_bio(pbp); 17852d1661a5SPawel Jakub Dawidek length = pbp->bio_length / (sc->sc_ndisks - 1); 17862d1661a5SPawel Jakub Dawidek offset = pbp->bio_offset / (sc->sc_ndisks - 1); 1787dba915cfSPawel Jakub Dawidek round_robin = verify = 0; 17882d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 17892d1661a5SPawel Jakub Dawidek case BIO_READ: 1790dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 1791dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1792dba915cfSPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY; 1793dba915cfSPawel Jakub Dawidek verify = 1; 1794dba915cfSPawel Jakub Dawidek ndisks = sc->sc_ndisks; 1795dba915cfSPawel Jakub Dawidek } else { 1796dba915cfSPawel Jakub Dawidek verify = 0; 17972d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks - 1; 1798dba915cfSPawel Jakub Dawidek } 1799dba915cfSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 && 1800dba915cfSPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1801dba915cfSPawel Jakub Dawidek round_robin = 1; 1802dba915cfSPawel Jakub Dawidek } else { 1803dba915cfSPawel Jakub Dawidek round_robin = 0; 1804dba915cfSPawel Jakub Dawidek } 1805dba915cfSPawel Jakub Dawidek KASSERT(!round_robin || !verify, 1806dba915cfSPawel Jakub Dawidek ("ROUND-ROBIN and VERIFY are mutually exclusive.")); 1807f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1]; 18082d1661a5SPawel Jakub Dawidek break; 18092d1661a5SPawel Jakub Dawidek case BIO_WRITE: 18102d1661a5SPawel Jakub Dawidek case BIO_DELETE: 18113650be51SPawel Jakub Dawidek /* 18123650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a synchronization 18133650be51SPawel Jakub Dawidek * request. 18143650be51SPawel Jakub Dawidek */ 18153650be51SPawel Jakub Dawidek if (g_raid3_sync_collision(sc, pbp)) { 18163650be51SPawel Jakub Dawidek g_raid3_regular_delay(sc, pbp); 18173650be51SPawel Jakub Dawidek return (0); 18183650be51SPawel Jakub Dawidek } 1819d2fb9c62SPawel Jakub Dawidek 18204d006a98SPawel Jakub Dawidek if (sc->sc_idle) 18214d006a98SPawel Jakub Dawidek g_raid3_unidle(sc); 18220962f942SPawel Jakub Dawidek else 182301f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 18244d006a98SPawel Jakub Dawidek 18252d1661a5SPawel Jakub Dawidek ndisks = sc->sc_ndisks; 18262d1661a5SPawel Jakub Dawidek break; 18272d1661a5SPawel Jakub Dawidek } 18282d1661a5SPawel Jakub Dawidek for (n = 0; n < ndisks; n++) { 18292d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 18302d1661a5SPawel Jakub Dawidek cbp = g_raid3_clone_bio(sc, pbp); 18312d1661a5SPawel Jakub Dawidek if (cbp == NULL) { 18322d1661a5SPawel Jakub Dawidek while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 18332d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 1834a65a0da2SPawel Jakub Dawidek /* 1835a65a0da2SPawel Jakub Dawidek * To prevent deadlock, we must run back up 1836a65a0da2SPawel Jakub Dawidek * with the ENOMEM for failed requests of any 1837a65a0da2SPawel Jakub Dawidek * of our consumers. Our own sync requests 1838a65a0da2SPawel Jakub Dawidek * can stick around, as they are finite. 1839a65a0da2SPawel Jakub Dawidek */ 1840a65a0da2SPawel Jakub Dawidek if ((pbp->bio_cflags & 1841a65a0da2SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_REGULAR) != 0) { 1842a65a0da2SPawel Jakub Dawidek g_io_deliver(pbp, ENOMEM); 1843a65a0da2SPawel Jakub Dawidek return (0); 1844a65a0da2SPawel Jakub Dawidek } 18452d1661a5SPawel Jakub Dawidek return (ENOMEM); 18462d1661a5SPawel Jakub Dawidek } 18472d1661a5SPawel Jakub Dawidek cbp->bio_offset = offset; 18482d1661a5SPawel Jakub Dawidek cbp->bio_length = length; 18492d1661a5SPawel Jakub Dawidek cbp->bio_done = g_raid3_done; 18502d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 18512d1661a5SPawel Jakub Dawidek case BIO_READ: 18522d1661a5SPawel Jakub Dawidek if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 18532d1661a5SPawel Jakub Dawidek /* 18542d1661a5SPawel Jakub Dawidek * Replace invalid component with the parity 18552d1661a5SPawel Jakub Dawidek * component. 18562d1661a5SPawel Jakub Dawidek */ 18572d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 18582d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 18592d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1860f5a2f7feSPawel Jakub Dawidek } else if (round_robin && 1861f5a2f7feSPawel Jakub Dawidek disk->d_no == sc->sc_round_robin) { 1862f5a2f7feSPawel Jakub Dawidek /* 1863f5a2f7feSPawel Jakub Dawidek * In round-robin mode skip one data component 1864f5a2f7feSPawel Jakub Dawidek * and use parity component when reading. 1865f5a2f7feSPawel Jakub Dawidek */ 1866f5a2f7feSPawel Jakub Dawidek pbp->bio_driver2 = disk; 1867f5a2f7feSPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1868f5a2f7feSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1869f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin++; 1870f5a2f7feSPawel Jakub Dawidek round_robin = 0; 1871dba915cfSPawel Jakub Dawidek } else if (verify && disk->d_no == sc->sc_ndisks - 1) { 1872dba915cfSPawel Jakub Dawidek cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 18732d1661a5SPawel Jakub Dawidek } 18742d1661a5SPawel Jakub Dawidek break; 18752d1661a5SPawel Jakub Dawidek case BIO_WRITE: 18762d1661a5SPawel Jakub Dawidek case BIO_DELETE: 18772d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 18782d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 18792d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 18802d1661a5SPawel Jakub Dawidek /* 18812d1661a5SPawel Jakub Dawidek * Active parity component, mark it as such. 18822d1661a5SPawel Jakub Dawidek */ 18832d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 18842d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_PARITY; 18852d1661a5SPawel Jakub Dawidek } 18862d1661a5SPawel Jakub Dawidek } else { 18872d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 18882d1661a5SPawel Jakub Dawidek if (n == ndisks - 1) { 18892d1661a5SPawel Jakub Dawidek /* 18902d1661a5SPawel Jakub Dawidek * Parity component is not connected, 18912d1661a5SPawel Jakub Dawidek * so destroy its request. 18922d1661a5SPawel Jakub Dawidek */ 18932d1661a5SPawel Jakub Dawidek pbp->bio_pflags |= 18942d1661a5SPawel Jakub Dawidek G_RAID3_BIO_PFLAG_NOPARITY; 18952d1661a5SPawel Jakub Dawidek g_raid3_destroy_bio(sc, cbp); 18962d1661a5SPawel Jakub Dawidek cbp = NULL; 18972d1661a5SPawel Jakub Dawidek } else { 18982d1661a5SPawel Jakub Dawidek cbp->bio_cflags |= 18992d1661a5SPawel Jakub Dawidek G_RAID3_BIO_CFLAG_NODISK; 19002d1661a5SPawel Jakub Dawidek disk = NULL; 19012d1661a5SPawel Jakub Dawidek } 19022d1661a5SPawel Jakub Dawidek } 19032d1661a5SPawel Jakub Dawidek break; 19042d1661a5SPawel Jakub Dawidek } 19052d1661a5SPawel Jakub Dawidek if (cbp != NULL) 19062d1661a5SPawel Jakub Dawidek cbp->bio_caller2 = disk; 19072d1661a5SPawel Jakub Dawidek } 19082d1661a5SPawel Jakub Dawidek switch (pbp->bio_cmd) { 19092d1661a5SPawel Jakub Dawidek case BIO_READ: 1910f5a2f7feSPawel Jakub Dawidek if (round_robin) { 1911f5a2f7feSPawel Jakub Dawidek /* 1912f5a2f7feSPawel Jakub Dawidek * If we are in round-robin mode and 'round_robin' is 1913f5a2f7feSPawel Jakub Dawidek * still 1, it means, that we skipped parity component 1914f5a2f7feSPawel Jakub Dawidek * for this read and must reset sc_round_robin field. 1915f5a2f7feSPawel Jakub Dawidek */ 1916f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 1917f5a2f7feSPawel Jakub Dawidek } 1918ee40c7aaSPawel Jakub Dawidek G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 19192d1661a5SPawel Jakub Dawidek disk = cbp->bio_caller2; 19202d1661a5SPawel Jakub Dawidek cp = disk->d_consumer; 19212d1661a5SPawel Jakub Dawidek cbp->bio_to = cp->provider; 19222d1661a5SPawel Jakub Dawidek G_RAID3_LOGREQ(3, cbp, "Sending request."); 19233650be51SPawel Jakub Dawidek KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 19242d1661a5SPawel Jakub Dawidek ("Consumer %s not opened (r%dw%de%d).", 19252d1661a5SPawel Jakub Dawidek cp->provider->name, cp->acr, cp->acw, cp->ace)); 192679e61493SPawel Jakub Dawidek cp->index++; 19272d1661a5SPawel Jakub Dawidek g_io_request(cbp, cp); 19282d1661a5SPawel Jakub Dawidek } 19292d1661a5SPawel Jakub Dawidek break; 19302d1661a5SPawel Jakub Dawidek case BIO_WRITE: 19312d1661a5SPawel Jakub Dawidek case BIO_DELETE: 19322d1661a5SPawel Jakub Dawidek /* 19333650be51SPawel Jakub Dawidek * Put request onto inflight queue, so we can check if new 19343650be51SPawel Jakub Dawidek * synchronization requests don't collide with it. 19353650be51SPawel Jakub Dawidek */ 19363650be51SPawel Jakub Dawidek bioq_insert_tail(&sc->sc_inflight, pbp); 19373650be51SPawel Jakub Dawidek 19383650be51SPawel Jakub Dawidek /* 19392d1661a5SPawel Jakub Dawidek * Bump syncid on first write. 19402d1661a5SPawel Jakub Dawidek */ 1941ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) { 1942a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 1943d97d5ee9SPawel Jakub Dawidek g_raid3_bump_syncid(sc); 19442d1661a5SPawel Jakub Dawidek } 19452d1661a5SPawel Jakub Dawidek g_raid3_scatter(pbp); 19462d1661a5SPawel Jakub Dawidek break; 19472d1661a5SPawel Jakub Dawidek } 19482d1661a5SPawel Jakub Dawidek return (0); 19492d1661a5SPawel Jakub Dawidek } 19502d1661a5SPawel Jakub Dawidek 19512d1661a5SPawel Jakub Dawidek static int 19522d1661a5SPawel Jakub Dawidek g_raid3_can_destroy(struct g_raid3_softc *sc) 19532d1661a5SPawel Jakub Dawidek { 19542d1661a5SPawel Jakub Dawidek struct g_geom *gp; 19552d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 19562d1661a5SPawel Jakub Dawidek 19572d1661a5SPawel Jakub Dawidek g_topology_assert(); 19582d1661a5SPawel Jakub Dawidek gp = sc->sc_geom; 195918486a5eSPawel Jakub Dawidek if (gp->softc == NULL) 196018486a5eSPawel Jakub Dawidek return (1); 19612d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19622d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19632d1661a5SPawel Jakub Dawidek return (0); 19642d1661a5SPawel Jakub Dawidek } 19652d1661a5SPawel Jakub Dawidek gp = sc->sc_sync.ds_geom; 19662d1661a5SPawel Jakub Dawidek LIST_FOREACH(cp, &gp->consumer, consumer) { 19672d1661a5SPawel Jakub Dawidek if (g_raid3_is_busy(sc, cp)) 19682d1661a5SPawel Jakub Dawidek return (0); 19692d1661a5SPawel Jakub Dawidek } 19702d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 19712d1661a5SPawel Jakub Dawidek sc->sc_name); 19722d1661a5SPawel Jakub Dawidek return (1); 19732d1661a5SPawel Jakub Dawidek } 19742d1661a5SPawel Jakub Dawidek 19752d1661a5SPawel Jakub Dawidek static int 19762d1661a5SPawel Jakub Dawidek g_raid3_try_destroy(struct g_raid3_softc *sc) 19772d1661a5SPawel Jakub Dawidek { 19782d1661a5SPawel Jakub Dawidek 19793650be51SPawel Jakub Dawidek g_topology_assert_not(); 19803650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 19813650be51SPawel Jakub Dawidek 19824ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 19834ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 19844ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 19854ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 19864ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 19874ed854e8SPawel Jakub Dawidek } 19884ed854e8SPawel Jakub Dawidek 19892d1661a5SPawel Jakub Dawidek g_topology_lock(); 19902d1661a5SPawel Jakub Dawidek if (!g_raid3_can_destroy(sc)) { 19912d1661a5SPawel Jakub Dawidek g_topology_unlock(); 19922d1661a5SPawel Jakub Dawidek return (0); 19932d1661a5SPawel Jakub Dawidek } 199418486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 199518486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 1996a245a548SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) { 19972d1661a5SPawel Jakub Dawidek g_topology_unlock(); 19982d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 19992d1661a5SPawel Jakub Dawidek &sc->sc_worker); 20003650be51SPawel Jakub Dawidek /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 20013650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 20022d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_worker); 20032d1661a5SPawel Jakub Dawidek sc->sc_worker = NULL; 20042d1661a5SPawel Jakub Dawidek } else { 20052d1661a5SPawel Jakub Dawidek g_topology_unlock(); 20063650be51SPawel Jakub Dawidek g_raid3_destroy_device(sc); 20072d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 20082d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 20092d1661a5SPawel Jakub Dawidek } 20102d1661a5SPawel Jakub Dawidek return (1); 20112d1661a5SPawel Jakub Dawidek } 20122d1661a5SPawel Jakub Dawidek 20132d1661a5SPawel Jakub Dawidek /* 20142d1661a5SPawel Jakub Dawidek * Worker thread. 20152d1661a5SPawel Jakub Dawidek */ 20162d1661a5SPawel Jakub Dawidek static void 20172d1661a5SPawel Jakub Dawidek g_raid3_worker(void *arg) 20182d1661a5SPawel Jakub Dawidek { 20192d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 20202d1661a5SPawel Jakub Dawidek struct g_raid3_event *ep; 20212d1661a5SPawel Jakub Dawidek struct bio *bp; 20220962f942SPawel Jakub Dawidek int timeout; 20232d1661a5SPawel Jakub Dawidek 20242d1661a5SPawel Jakub Dawidek sc = arg; 2025982d11f8SJeff Roberson thread_lock(curthread); 202663710c4dSJohn Baldwin sched_prio(curthread, PRIBIO); 2027982d11f8SJeff Roberson thread_unlock(curthread); 20282d1661a5SPawel Jakub Dawidek 20293650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 20302d1661a5SPawel Jakub Dawidek for (;;) { 20312d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: Let's see...", __func__); 20322d1661a5SPawel Jakub Dawidek /* 20332d1661a5SPawel Jakub Dawidek * First take a look at events. 20342d1661a5SPawel Jakub Dawidek * This is important to handle events before any I/O requests. 20352d1661a5SPawel Jakub Dawidek */ 20362d1661a5SPawel Jakub Dawidek ep = g_raid3_event_get(sc); 20373650be51SPawel Jakub Dawidek if (ep != NULL) { 2038d97d5ee9SPawel Jakub Dawidek g_raid3_event_remove(sc, ep); 20392d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) { 20402d1661a5SPawel Jakub Dawidek /* Update only device status. */ 20412d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, 20422d1661a5SPawel Jakub Dawidek "Running event for device %s.", 20432d1661a5SPawel Jakub Dawidek sc->sc_name); 20442d1661a5SPawel Jakub Dawidek ep->e_error = 0; 2045d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 1); 20462d1661a5SPawel Jakub Dawidek } else { 20472d1661a5SPawel Jakub Dawidek /* Update disk status. */ 20482d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Running event for disk %s.", 20492d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(ep->e_disk)); 20502d1661a5SPawel Jakub Dawidek ep->e_error = g_raid3_update_disk(ep->e_disk, 2051d97d5ee9SPawel Jakub Dawidek ep->e_state); 20522d1661a5SPawel Jakub Dawidek if (ep->e_error == 0) 2053d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(sc, 0); 20542d1661a5SPawel Jakub Dawidek } 20552d1661a5SPawel Jakub Dawidek if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) { 20562d1661a5SPawel Jakub Dawidek KASSERT(ep->e_error == 0, 20572d1661a5SPawel Jakub Dawidek ("Error cannot be handled.")); 20582d1661a5SPawel Jakub Dawidek g_raid3_event_free(ep); 20592d1661a5SPawel Jakub Dawidek } else { 20602d1661a5SPawel Jakub Dawidek ep->e_flags |= G_RAID3_EVENT_DONE; 20612d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 20622d1661a5SPawel Jakub Dawidek ep); 20632d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_events_mtx); 20642d1661a5SPawel Jakub Dawidek wakeup(ep); 20652d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_events_mtx); 20662d1661a5SPawel Jakub Dawidek } 20672d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 20682d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 20693650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 20703650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 20713650be51SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 20723745c395SJulian Elischer kproc_exit(0); 20732d1661a5SPawel Jakub Dawidek } 20743650be51SPawel Jakub Dawidek } 20752d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__); 20762d1661a5SPawel Jakub Dawidek continue; 20772d1661a5SPawel Jakub Dawidek } 20782d1661a5SPawel Jakub Dawidek /* 20790962f942SPawel Jakub Dawidek * Check if we can mark array as CLEAN and if we can't take 20800962f942SPawel Jakub Dawidek * how much seconds should we wait. 20810962f942SPawel Jakub Dawidek */ 20823650be51SPawel Jakub Dawidek timeout = g_raid3_idle(sc, -1); 20830962f942SPawel Jakub Dawidek /* 20842d1661a5SPawel Jakub Dawidek * Now I/O requests. 20852d1661a5SPawel Jakub Dawidek */ 20862d1661a5SPawel Jakub Dawidek /* Get first request from the queue. */ 20872d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 20882d1661a5SPawel Jakub Dawidek bp = bioq_first(&sc->sc_queue); 20892d1661a5SPawel Jakub Dawidek if (bp == NULL) { 20902d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 20912d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 20922d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 20933650be51SPawel Jakub Dawidek if (g_raid3_try_destroy(sc)) { 20943650be51SPawel Jakub Dawidek curthread->td_pflags &= ~TDP_GEOM; 2095d7fad9f6SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Thread exiting."); 20963745c395SJulian Elischer kproc_exit(0); 20973650be51SPawel Jakub Dawidek } 20982d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 20992d1661a5SPawel Jakub Dawidek } 21003650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 2101a2fe5c66SPawel Jakub Dawidek /* 2102a2fe5c66SPawel Jakub Dawidek * XXX: We can miss an event here, because an event 2103a2fe5c66SPawel Jakub Dawidek * can be added without sx-device-lock and without 2104a2fe5c66SPawel Jakub Dawidek * mtx-queue-lock. Maybe I should just stop using 2105a2fe5c66SPawel Jakub Dawidek * dedicated mutex for events synchronization and 2106a2fe5c66SPawel Jakub Dawidek * stick with the queue lock? 2107a2fe5c66SPawel Jakub Dawidek * The event will hang here until next I/O request 2108a2fe5c66SPawel Jakub Dawidek * or next event is received. 2109a2fe5c66SPawel Jakub Dawidek */ 21100962f942SPawel Jakub Dawidek MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1", 21110962f942SPawel Jakub Dawidek timeout * hz); 21123650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21139bb09163SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__); 21142d1661a5SPawel Jakub Dawidek continue; 21152d1661a5SPawel Jakub Dawidek } 211684edb86dSPawel Jakub Dawidek process: 21172d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 21182d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 21192d1661a5SPawel Jakub Dawidek 21208e007c52SPawel Jakub Dawidek if (bp->bio_from->geom == sc->sc_sync.ds_geom && 21218e007c52SPawel Jakub Dawidek (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) { 21228e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* READ */ 21238e007c52SPawel Jakub Dawidek } else if (bp->bio_to != sc->sc_provider) { 21243650be51SPawel Jakub Dawidek if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 21252d1661a5SPawel Jakub Dawidek g_raid3_regular_request(bp); 21263650be51SPawel Jakub Dawidek else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) 21278e007c52SPawel Jakub Dawidek g_raid3_sync_request(bp); /* WRITE */ 2128de6f1c7cSPawel Jakub Dawidek else { 2129de6f1c7cSPawel Jakub Dawidek KASSERT(0, 21309a8fa125SWarner Losh ("Invalid request cflags=0x%hx to=%s.", 2131de6f1c7cSPawel Jakub Dawidek bp->bio_cflags, bp->bio_to->name)); 2132de6f1c7cSPawel Jakub Dawidek } 2133de6f1c7cSPawel Jakub Dawidek } else if (g_raid3_register_request(bp) != 0) { 21342d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 21353650be51SPawel Jakub Dawidek bioq_insert_head(&sc->sc_queue, bp); 213684edb86dSPawel Jakub Dawidek /* 213784edb86dSPawel Jakub Dawidek * We are short in memory, let see if there are finished 213884edb86dSPawel Jakub Dawidek * request we can free. 213984edb86dSPawel Jakub Dawidek */ 214084edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 214184edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) 214284edb86dSPawel Jakub Dawidek goto process; 21432d1661a5SPawel Jakub Dawidek } 214484edb86dSPawel Jakub Dawidek /* 214584edb86dSPawel Jakub Dawidek * No finished regular request, so at least keep 214684edb86dSPawel Jakub Dawidek * synchronization running. 214784edb86dSPawel Jakub Dawidek */ 214884edb86dSPawel Jakub Dawidek TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 214984edb86dSPawel Jakub Dawidek if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) 215084edb86dSPawel Jakub Dawidek goto process; 215184edb86dSPawel Jakub Dawidek } 215284edb86dSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 215384edb86dSPawel Jakub Dawidek MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP, 215484edb86dSPawel Jakub Dawidek "r3:lowmem", hz / 10); 215584edb86dSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 21562d1661a5SPawel Jakub Dawidek } 2157d97d5ee9SPawel Jakub Dawidek G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__); 21582d1661a5SPawel Jakub Dawidek } 21592d1661a5SPawel Jakub Dawidek } 21602d1661a5SPawel Jakub Dawidek 21612d1661a5SPawel Jakub Dawidek static void 21620962f942SPawel Jakub Dawidek g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk) 21632d1661a5SPawel Jakub Dawidek { 21642d1661a5SPawel Jakub Dawidek 21653650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 2166501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 2167501250baSPawel Jakub Dawidek return; 21680962f942SPawel Jakub Dawidek if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) { 21692d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 21703650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 21712d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 21720962f942SPawel Jakub Dawidek } else if (sc->sc_idle && 21730962f942SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) { 21742d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 21753650be51SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 21762d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 21772d1661a5SPawel Jakub Dawidek } 21782d1661a5SPawel Jakub Dawidek } 21792d1661a5SPawel Jakub Dawidek 21802d1661a5SPawel Jakub Dawidek static void 21812d1661a5SPawel Jakub Dawidek g_raid3_sync_start(struct g_raid3_softc *sc) 21822d1661a5SPawel Jakub Dawidek { 21832d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 21843650be51SPawel Jakub Dawidek struct g_consumer *cp; 21853650be51SPawel Jakub Dawidek struct bio *bp; 21862d1661a5SPawel Jakub Dawidek int error; 21872d1661a5SPawel Jakub Dawidek u_int n; 21882d1661a5SPawel Jakub Dawidek 21893650be51SPawel Jakub Dawidek g_topology_assert_not(); 21903650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 21912d1661a5SPawel Jakub Dawidek 21922d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 21932d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 21942d1661a5SPawel Jakub Dawidek sc->sc_state)); 21952d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).", 21962d1661a5SPawel Jakub Dawidek sc->sc_name, sc->sc_state)); 21972d1661a5SPawel Jakub Dawidek disk = NULL; 21982d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 21992d1661a5SPawel Jakub Dawidek if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 22002d1661a5SPawel Jakub Dawidek continue; 22012d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 22022d1661a5SPawel Jakub Dawidek break; 22032d1661a5SPawel Jakub Dawidek } 22042d1661a5SPawel Jakub Dawidek if (disk == NULL) 22052d1661a5SPawel Jakub Dawidek return; 22062d1661a5SPawel Jakub Dawidek 22073650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 22083650be51SPawel Jakub Dawidek g_topology_lock(); 22093650be51SPawel Jakub Dawidek cp = g_new_consumer(sc->sc_sync.ds_geom); 22103650be51SPawel Jakub Dawidek error = g_attach(cp, sc->sc_provider); 22113650be51SPawel Jakub Dawidek KASSERT(error == 0, 22123650be51SPawel Jakub Dawidek ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 22133650be51SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 22143650be51SPawel Jakub Dawidek KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 22153650be51SPawel Jakub Dawidek g_topology_unlock(); 22163650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 22173650be51SPawel Jakub Dawidek 22182d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 22192d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 2220501250baSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0) 22212d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 22222d1661a5SPawel Jakub Dawidek KASSERT(disk->d_sync.ds_consumer == NULL, 22232d1661a5SPawel Jakub Dawidek ("Sync consumer already exists (device=%s, disk=%s).", 22242d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk))); 22253650be51SPawel Jakub Dawidek 22263650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer = cp; 22272d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer->private = disk; 222879e61493SPawel Jakub Dawidek disk->d_sync.ds_consumer->index = 0; 22292d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = disk; 22303650be51SPawel Jakub Dawidek 22313650be51SPawel Jakub Dawidek /* 22323650be51SPawel Jakub Dawidek * Allocate memory for synchronization bios and initialize them. 22333650be51SPawel Jakub Dawidek */ 22343650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs, 22353650be51SPawel Jakub Dawidek M_RAID3, M_WAITOK); 22363650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22373650be51SPawel Jakub Dawidek bp = g_alloc_bio(); 22383650be51SPawel Jakub Dawidek disk->d_sync.ds_bios[n] = bp; 22393650be51SPawel Jakub Dawidek bp->bio_parent = NULL; 22403650be51SPawel Jakub Dawidek bp->bio_cmd = BIO_READ; 22413650be51SPawel Jakub Dawidek bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK); 22423650be51SPawel Jakub Dawidek bp->bio_cflags = 0; 22433650be51SPawel Jakub Dawidek bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1); 22443650be51SPawel Jakub Dawidek bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 22453650be51SPawel Jakub Dawidek disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 22463650be51SPawel Jakub Dawidek bp->bio_done = g_raid3_sync_done; 22473650be51SPawel Jakub Dawidek bp->bio_from = disk->d_sync.ds_consumer; 22483650be51SPawel Jakub Dawidek bp->bio_to = sc->sc_provider; 2249ef25813dSRuslan Ermilov bp->bio_caller1 = (void *)(uintptr_t)n; 22503650be51SPawel Jakub Dawidek } 22513650be51SPawel Jakub Dawidek 22523650be51SPawel Jakub Dawidek /* Set the number of in-flight synchronization requests. */ 22533650be51SPawel Jakub Dawidek disk->d_sync.ds_inflight = g_raid3_syncreqs; 22543650be51SPawel Jakub Dawidek 22553650be51SPawel Jakub Dawidek /* 22563650be51SPawel Jakub Dawidek * Fire off first synchronization requests. 22573650be51SPawel Jakub Dawidek */ 22583650be51SPawel Jakub Dawidek for (n = 0; n < g_raid3_syncreqs; n++) { 22593650be51SPawel Jakub Dawidek bp = disk->d_sync.ds_bios[n]; 22603650be51SPawel Jakub Dawidek G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 22613650be51SPawel Jakub Dawidek disk->d_sync.ds_consumer->index++; 22623650be51SPawel Jakub Dawidek /* 22633650be51SPawel Jakub Dawidek * Delay the request if it is colliding with a regular request. 22643650be51SPawel Jakub Dawidek */ 22653650be51SPawel Jakub Dawidek if (g_raid3_regular_collision(sc, bp)) 22663650be51SPawel Jakub Dawidek g_raid3_sync_delay(sc, bp); 22673650be51SPawel Jakub Dawidek else 22683650be51SPawel Jakub Dawidek g_io_request(bp, disk->d_sync.ds_consumer); 22693650be51SPawel Jakub Dawidek } 22702d1661a5SPawel Jakub Dawidek } 22712d1661a5SPawel Jakub Dawidek 22722d1661a5SPawel Jakub Dawidek /* 22732d1661a5SPawel Jakub Dawidek * Stop synchronization process. 22742d1661a5SPawel Jakub Dawidek * type: 0 - synchronization finished 22752d1661a5SPawel Jakub Dawidek * 1 - synchronization stopped 22762d1661a5SPawel Jakub Dawidek */ 22772d1661a5SPawel Jakub Dawidek static void 22782d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(struct g_raid3_softc *sc, int type) 22792d1661a5SPawel Jakub Dawidek { 22802d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 22813650be51SPawel Jakub Dawidek struct g_consumer *cp; 22822d1661a5SPawel Jakub Dawidek 22833650be51SPawel Jakub Dawidek g_topology_assert_not(); 22843650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 22853650be51SPawel Jakub Dawidek 22862d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 22872d1661a5SPawel Jakub Dawidek ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 22882d1661a5SPawel Jakub Dawidek sc->sc_state)); 22892d1661a5SPawel Jakub Dawidek disk = sc->sc_syncdisk; 22902d1661a5SPawel Jakub Dawidek sc->sc_syncdisk = NULL; 22912d1661a5SPawel Jakub Dawidek KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name)); 22922d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 22932d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 22942d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 22952d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_consumer == NULL) 22962d1661a5SPawel Jakub Dawidek return; 22972d1661a5SPawel Jakub Dawidek 22982d1661a5SPawel Jakub Dawidek if (type == 0) { 22992d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.", 23003650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 23012d1661a5SPawel Jakub Dawidek } else /* if (type == 1) */ { 23022d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 23033650be51SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 23042d1661a5SPawel Jakub Dawidek } 23053650be51SPawel Jakub Dawidek free(disk->d_sync.ds_bios, M_RAID3); 23063650be51SPawel Jakub Dawidek disk->d_sync.ds_bios = NULL; 23073650be51SPawel Jakub Dawidek cp = disk->d_sync.ds_consumer; 23082d1661a5SPawel Jakub Dawidek disk->d_sync.ds_consumer = NULL; 23092d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 23103650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 23113650be51SPawel Jakub Dawidek g_topology_lock(); 23123650be51SPawel Jakub Dawidek g_raid3_kill_consumer(sc, cp); 23133650be51SPawel Jakub Dawidek g_topology_unlock(); 23143650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 23152d1661a5SPawel Jakub Dawidek } 23162d1661a5SPawel Jakub Dawidek 23172d1661a5SPawel Jakub Dawidek static void 23182d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(struct g_raid3_softc *sc) 23192d1661a5SPawel Jakub Dawidek { 23202d1661a5SPawel Jakub Dawidek struct g_provider *pp; 2321113d8e50SAlexander Motin struct g_raid3_disk *disk; 2322113d8e50SAlexander Motin int n; 23232d1661a5SPawel Jakub Dawidek 23243650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_LOCKED); 23252d1661a5SPawel Jakub Dawidek 23263650be51SPawel Jakub Dawidek g_topology_lock(); 23272d1661a5SPawel Jakub Dawidek pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name); 23282d1661a5SPawel Jakub Dawidek pp->mediasize = sc->sc_mediasize; 23292d1661a5SPawel Jakub Dawidek pp->sectorsize = sc->sc_sectorsize; 2330113d8e50SAlexander Motin pp->stripesize = 0; 2331113d8e50SAlexander Motin pp->stripeoffset = 0; 2332113d8e50SAlexander Motin for (n = 0; n < sc->sc_ndisks; n++) { 2333113d8e50SAlexander Motin disk = &sc->sc_disks[n]; 2334113d8e50SAlexander Motin if (disk->d_consumer && disk->d_consumer->provider && 2335113d8e50SAlexander Motin disk->d_consumer->provider->stripesize > pp->stripesize) { 2336113d8e50SAlexander Motin pp->stripesize = disk->d_consumer->provider->stripesize; 2337113d8e50SAlexander Motin pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2338113d8e50SAlexander Motin } 2339113d8e50SAlexander Motin } 2340113d8e50SAlexander Motin pp->stripesize *= sc->sc_ndisks - 1; 2341113d8e50SAlexander Motin pp->stripeoffset *= sc->sc_ndisks - 1; 23422d1661a5SPawel Jakub Dawidek sc->sc_provider = pp; 23432d1661a5SPawel Jakub Dawidek g_error_provider(pp, 0); 23443650be51SPawel Jakub Dawidek g_topology_unlock(); 23450cca572eSJohn-Mark Gurney G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 23460cca572eSJohn-Mark Gurney g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks); 23470cca572eSJohn-Mark Gurney 23482d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED) 23492d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 23502d1661a5SPawel Jakub Dawidek } 23512d1661a5SPawel Jakub Dawidek 23522d1661a5SPawel Jakub Dawidek static void 23532d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(struct g_raid3_softc *sc) 23542d1661a5SPawel Jakub Dawidek { 23552d1661a5SPawel Jakub Dawidek struct bio *bp; 23562d1661a5SPawel Jakub Dawidek 23573650be51SPawel Jakub Dawidek g_topology_assert_not(); 23582d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 23592d1661a5SPawel Jakub Dawidek sc->sc_name)); 23602d1661a5SPawel Jakub Dawidek 23613650be51SPawel Jakub Dawidek g_topology_lock(); 23622d1661a5SPawel Jakub Dawidek g_error_provider(sc->sc_provider, ENXIO); 23632d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 23642d1661a5SPawel Jakub Dawidek while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 23652d1661a5SPawel Jakub Dawidek bioq_remove(&sc->sc_queue, bp); 23662d1661a5SPawel Jakub Dawidek g_io_deliver(bp, ENXIO); 23672d1661a5SPawel Jakub Dawidek } 23682d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 23692d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 23702d1661a5SPawel Jakub Dawidek sc->sc_provider->name); 23712d1661a5SPawel Jakub Dawidek sc->sc_provider->flags |= G_PF_WITHER; 23722d1661a5SPawel Jakub Dawidek g_orphan_provider(sc->sc_provider, ENXIO); 23733650be51SPawel Jakub Dawidek g_topology_unlock(); 23742d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 23752d1661a5SPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 23762d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 23772d1661a5SPawel Jakub Dawidek } 23782d1661a5SPawel Jakub Dawidek 23792d1661a5SPawel Jakub Dawidek static void 23802d1661a5SPawel Jakub Dawidek g_raid3_go(void *arg) 23812d1661a5SPawel Jakub Dawidek { 23822d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 23832d1661a5SPawel Jakub Dawidek 23842d1661a5SPawel Jakub Dawidek sc = arg; 23852d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 23862d1661a5SPawel Jakub Dawidek g_raid3_event_send(sc, 0, 23872d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE); 23882d1661a5SPawel Jakub Dawidek } 23892d1661a5SPawel Jakub Dawidek 23902d1661a5SPawel Jakub Dawidek static u_int 23912d1661a5SPawel Jakub Dawidek g_raid3_determine_state(struct g_raid3_disk *disk) 23922d1661a5SPawel Jakub Dawidek { 23932d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 23942d1661a5SPawel Jakub Dawidek u_int state; 23952d1661a5SPawel Jakub Dawidek 23962d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 23972d1661a5SPawel Jakub Dawidek if (sc->sc_syncid == disk->d_sync.ds_syncid) { 23982d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 23992d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) { 24002d1661a5SPawel Jakub Dawidek /* Disk does not need synchronization. */ 24012d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_ACTIVE; 24022d1661a5SPawel Jakub Dawidek } else { 24032d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & 24042d1661a5SPawel Jakub Dawidek G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24052d1661a5SPawel Jakub Dawidek (disk->d_flags & 24062d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24072d1661a5SPawel Jakub Dawidek /* 24082d1661a5SPawel Jakub Dawidek * We can start synchronization from 24092d1661a5SPawel Jakub Dawidek * the stored offset. 24102d1661a5SPawel Jakub Dawidek */ 24112d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24122d1661a5SPawel Jakub Dawidek } else { 24132d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24142d1661a5SPawel Jakub Dawidek } 24152d1661a5SPawel Jakub Dawidek } 24162d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 24172d1661a5SPawel Jakub Dawidek /* 24182d1661a5SPawel Jakub Dawidek * Reset all synchronization data for this disk, 24192d1661a5SPawel Jakub Dawidek * because if it even was synchronized, it was 24202d1661a5SPawel Jakub Dawidek * synchronized to disks with different syncid. 24212d1661a5SPawel Jakub Dawidek */ 24222d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 24232d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 24242d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 24252d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid = sc->sc_syncid; 24262d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 24272d1661a5SPawel Jakub Dawidek (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 24282d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_SYNCHRONIZING; 24292d1661a5SPawel Jakub Dawidek } else { 24302d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_STALE; 24312d1661a5SPawel Jakub Dawidek } 24322d1661a5SPawel Jakub Dawidek } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 24332d1661a5SPawel Jakub Dawidek /* 24342d1661a5SPawel Jakub Dawidek * Not good, NOT GOOD! 24352d1661a5SPawel Jakub Dawidek * It means that device was started on stale disks 24362d1661a5SPawel Jakub Dawidek * and more fresh disk just arrive. 24373c57a41dSPawel Jakub Dawidek * If there were writes, device is broken, sorry. 24382d1661a5SPawel Jakub Dawidek * I think the best choice here is don't touch 2439776fc0e9SYaroslav Tykhiy * this disk and inform the user loudly. 24402d1661a5SPawel Jakub Dawidek */ 24412d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s was started before the freshest " 24422d1661a5SPawel Jakub Dawidek "disk (%s) arrives!! It will not be connected to the " 24432d1661a5SPawel Jakub Dawidek "running device.", sc->sc_name, 24442d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk)); 24452d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 24462d1661a5SPawel Jakub Dawidek state = G_RAID3_DISK_STATE_NONE; 24472d1661a5SPawel Jakub Dawidek /* Return immediately, because disk was destroyed. */ 24482d1661a5SPawel Jakub Dawidek return (state); 24492d1661a5SPawel Jakub Dawidek } 24502d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "State for %s disk: %s.", 24512d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(state)); 24522d1661a5SPawel Jakub Dawidek return (state); 24532d1661a5SPawel Jakub Dawidek } 24542d1661a5SPawel Jakub Dawidek 24552d1661a5SPawel Jakub Dawidek /* 24562d1661a5SPawel Jakub Dawidek * Update device state. 24572d1661a5SPawel Jakub Dawidek */ 24582d1661a5SPawel Jakub Dawidek static void 2459d97d5ee9SPawel Jakub Dawidek g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force) 24602d1661a5SPawel Jakub Dawidek { 24612d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 24622d1661a5SPawel Jakub Dawidek u_int state; 24632d1661a5SPawel Jakub Dawidek 24643650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 24652d1661a5SPawel Jakub Dawidek 24662d1661a5SPawel Jakub Dawidek switch (sc->sc_state) { 24672d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_STARTING: 24682d1661a5SPawel Jakub Dawidek { 2469a245a548SPawel Jakub Dawidek u_int n, ndirty, ndisks, genid, syncid; 24702d1661a5SPawel Jakub Dawidek 24712d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_provider == NULL, 24722d1661a5SPawel Jakub Dawidek ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 24732d1661a5SPawel Jakub Dawidek /* 24742d1661a5SPawel Jakub Dawidek * Are we ready? We are, if all disks are connected or 24752d1661a5SPawel Jakub Dawidek * one disk is missing and 'force' is true. 24762d1661a5SPawel Jakub Dawidek */ 24772d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) { 24782d1661a5SPawel Jakub Dawidek if (!force) 24792d1661a5SPawel Jakub Dawidek callout_drain(&sc->sc_callout); 24802d1661a5SPawel Jakub Dawidek } else { 24812d1661a5SPawel Jakub Dawidek if (force) { 24822d1661a5SPawel Jakub Dawidek /* 24832d1661a5SPawel Jakub Dawidek * Timeout expired, so destroy device. 24842d1661a5SPawel Jakub Dawidek */ 24852d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 24864ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", 24874ed854e8SPawel Jakub Dawidek __LINE__, sc->sc_rootmount); 24884ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 24894ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 24902d1661a5SPawel Jakub Dawidek } 24912d1661a5SPawel Jakub Dawidek return; 24922d1661a5SPawel Jakub Dawidek } 24932d1661a5SPawel Jakub Dawidek 24942d1661a5SPawel Jakub Dawidek /* 2495a245a548SPawel Jakub Dawidek * Find the biggest genid. 2496a245a548SPawel Jakub Dawidek */ 2497a245a548SPawel Jakub Dawidek genid = 0; 2498a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2499a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2500a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2501a245a548SPawel Jakub Dawidek continue; 2502a245a548SPawel Jakub Dawidek if (disk->d_genid > genid) 2503a245a548SPawel Jakub Dawidek genid = disk->d_genid; 2504a245a548SPawel Jakub Dawidek } 2505a245a548SPawel Jakub Dawidek sc->sc_genid = genid; 2506a245a548SPawel Jakub Dawidek /* 2507a245a548SPawel Jakub Dawidek * Remove all disks without the biggest genid. 2508a245a548SPawel Jakub Dawidek */ 2509a245a548SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 2510a245a548SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 2511a245a548SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2512a245a548SPawel Jakub Dawidek continue; 2513a245a548SPawel Jakub Dawidek if (disk->d_genid < genid) { 2514a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2515a245a548SPawel Jakub Dawidek "Component %s (device %s) broken, skipping.", 2516a245a548SPawel Jakub Dawidek g_raid3_get_diskname(disk), sc->sc_name); 2517a245a548SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 2518a245a548SPawel Jakub Dawidek } 2519a245a548SPawel Jakub Dawidek } 2520a245a548SPawel Jakub Dawidek 2521a245a548SPawel Jakub Dawidek /* 25222d1661a5SPawel Jakub Dawidek * There must be at least 'sc->sc_ndisks - 1' components 25232d1661a5SPawel Jakub Dawidek * with the same syncid and without SYNCHRONIZING flag. 25242d1661a5SPawel Jakub Dawidek */ 25252d1661a5SPawel Jakub Dawidek 25262d1661a5SPawel Jakub Dawidek /* 25272d1661a5SPawel Jakub Dawidek * Find the biggest syncid, number of valid components and 25282d1661a5SPawel Jakub Dawidek * number of dirty components. 25292d1661a5SPawel Jakub Dawidek */ 25302d1661a5SPawel Jakub Dawidek ndirty = ndisks = syncid = 0; 25312d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25322d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25332d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 25342d1661a5SPawel Jakub Dawidek continue; 25352d1661a5SPawel Jakub Dawidek if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) 25362d1661a5SPawel Jakub Dawidek ndirty++; 25372d1661a5SPawel Jakub Dawidek if (disk->d_sync.ds_syncid > syncid) { 25382d1661a5SPawel Jakub Dawidek syncid = disk->d_sync.ds_syncid; 25392d1661a5SPawel Jakub Dawidek ndisks = 0; 25402d1661a5SPawel Jakub Dawidek } else if (disk->d_sync.ds_syncid < syncid) { 25412d1661a5SPawel Jakub Dawidek continue; 25422d1661a5SPawel Jakub Dawidek } 25432d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 25442d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) { 25452d1661a5SPawel Jakub Dawidek continue; 25462d1661a5SPawel Jakub Dawidek } 25472d1661a5SPawel Jakub Dawidek ndisks++; 25482d1661a5SPawel Jakub Dawidek } 25492d1661a5SPawel Jakub Dawidek /* 25502d1661a5SPawel Jakub Dawidek * Do we have enough valid components? 25512d1661a5SPawel Jakub Dawidek */ 25522d1661a5SPawel Jakub Dawidek if (ndisks + 1 < sc->sc_ndisks) { 25532d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, 25542d1661a5SPawel Jakub Dawidek "Device %s is broken, too few valid components.", 25552d1661a5SPawel Jakub Dawidek sc->sc_name); 25562d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 25572d1661a5SPawel Jakub Dawidek return; 25582d1661a5SPawel Jakub Dawidek } 25592d1661a5SPawel Jakub Dawidek /* 25602d1661a5SPawel Jakub Dawidek * If there is one DIRTY component and all disks are present, 25612d1661a5SPawel Jakub Dawidek * mark it for synchronization. If there is more than one DIRTY 25622d1661a5SPawel Jakub Dawidek * component, mark parity component for synchronization. 25632d1661a5SPawel Jakub Dawidek */ 25642d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks && ndirty == 1) { 25652d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25662d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25672d1661a5SPawel Jakub Dawidek if ((disk->d_flags & 25682d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_DIRTY) == 0) { 25692d1661a5SPawel Jakub Dawidek continue; 25702d1661a5SPawel Jakub Dawidek } 25712d1661a5SPawel Jakub Dawidek disk->d_flags |= 25722d1661a5SPawel Jakub Dawidek G_RAID3_DISK_FLAG_SYNCHRONIZING; 25732d1661a5SPawel Jakub Dawidek } 25742d1661a5SPawel Jakub Dawidek } else if (ndisks == sc->sc_ndisks && ndirty > 1) { 25752d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[sc->sc_ndisks - 1]; 25762d1661a5SPawel Jakub Dawidek disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 25772d1661a5SPawel Jakub Dawidek } 25782d1661a5SPawel Jakub Dawidek 25792d1661a5SPawel Jakub Dawidek sc->sc_syncid = syncid; 25802d1661a5SPawel Jakub Dawidek if (force) { 25812d1661a5SPawel Jakub Dawidek /* Remember to bump syncid on first write. */ 2582ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 25832d1661a5SPawel Jakub Dawidek } 25842d1661a5SPawel Jakub Dawidek if (ndisks == sc->sc_ndisks) 25852d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 25862d1661a5SPawel Jakub Dawidek else /* if (ndisks == sc->sc_ndisks - 1) */ 25872d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 25882d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.", 25892d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 25902d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 25912d1661a5SPawel Jakub Dawidek sc->sc_state = state; 25922d1661a5SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 25932d1661a5SPawel Jakub Dawidek disk = &sc->sc_disks[n]; 25942d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 25952d1661a5SPawel Jakub Dawidek continue; 25962d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 25972d1661a5SPawel Jakub Dawidek g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT); 2598a245a548SPawel Jakub Dawidek if (state == G_RAID3_DISK_STATE_STALE) 2599ea973705SPawel Jakub Dawidek sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 26002d1661a5SPawel Jakub Dawidek } 26012d1661a5SPawel Jakub Dawidek break; 26022d1661a5SPawel Jakub Dawidek } 26032d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_DEGRADED: 26042d1661a5SPawel Jakub Dawidek /* 2605ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26062d1661a5SPawel Jakub Dawidek */ 2607ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2608a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2609a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2610a245a548SPawel Jakub Dawidek } 2611a245a548SPawel Jakub Dawidek 26122d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26132d1661a5SPawel Jakub Dawidek return; 26142d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < 26152d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26162d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) 26172d1661a5SPawel Jakub Dawidek g_raid3_destroy_provider(sc); 26182d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 26192d1661a5SPawel Jakub Dawidek return; 26202d1661a5SPawel Jakub Dawidek } 26212d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26222d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 26232d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_COMPLETE; 26242d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 26252d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 26262d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26272d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26282d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26292d1661a5SPawel Jakub Dawidek } 26302d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 26312d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 26324ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 26334ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 26344ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 26354ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 26364ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 26374ed854e8SPawel Jakub Dawidek } 26382d1661a5SPawel Jakub Dawidek break; 26392d1661a5SPawel Jakub Dawidek case G_RAID3_DEVICE_STATE_COMPLETE: 26402d1661a5SPawel Jakub Dawidek /* 2641ea973705SPawel Jakub Dawidek * Genid need to be bumped immediately, so do it here. 26422d1661a5SPawel Jakub Dawidek */ 2643ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2644a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2645a245a548SPawel Jakub Dawidek g_raid3_bump_genid(sc); 2646a245a548SPawel Jakub Dawidek } 2647a245a548SPawel Jakub Dawidek 26482d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 26492d1661a5SPawel Jakub Dawidek return; 26502d1661a5SPawel Jakub Dawidek KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >= 26512d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1, 26522d1661a5SPawel Jakub Dawidek ("Too few ACTIVE components in COMPLETE state (device %s).", 26532d1661a5SPawel Jakub Dawidek sc->sc_name)); 26542d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 26552d1661a5SPawel Jakub Dawidek sc->sc_ndisks - 1) { 26562d1661a5SPawel Jakub Dawidek state = G_RAID3_DEVICE_STATE_DEGRADED; 26572d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 26582d1661a5SPawel Jakub Dawidek "Device %s state changed from %s to %s.", 26592d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_device_state2str(sc->sc_state), 26602d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(state)); 26612d1661a5SPawel Jakub Dawidek sc->sc_state = state; 26622d1661a5SPawel Jakub Dawidek } 26632d1661a5SPawel Jakub Dawidek if (sc->sc_provider == NULL) 26642d1661a5SPawel Jakub Dawidek g_raid3_launch_provider(sc); 26654ed854e8SPawel Jakub Dawidek if (sc->sc_rootmount != NULL) { 26664ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 26674ed854e8SPawel Jakub Dawidek sc->sc_rootmount); 26684ed854e8SPawel Jakub Dawidek root_mount_rel(sc->sc_rootmount); 26694ed854e8SPawel Jakub Dawidek sc->sc_rootmount = NULL; 26704ed854e8SPawel Jakub Dawidek } 26712d1661a5SPawel Jakub Dawidek break; 26722d1661a5SPawel Jakub Dawidek default: 26732d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name, 26742d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state))); 26752d1661a5SPawel Jakub Dawidek break; 26762d1661a5SPawel Jakub Dawidek } 26772d1661a5SPawel Jakub Dawidek } 26782d1661a5SPawel Jakub Dawidek 26792d1661a5SPawel Jakub Dawidek /* 26802d1661a5SPawel Jakub Dawidek * Update disk state and device state if needed. 26812d1661a5SPawel Jakub Dawidek */ 26822d1661a5SPawel Jakub Dawidek #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \ 26832d1661a5SPawel Jakub Dawidek "Disk %s state changed from %s to %s (device %s).", \ 26842d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), \ 26852d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state), \ 26862d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state), sc->sc_name) 26872d1661a5SPawel Jakub Dawidek static int 2688d97d5ee9SPawel Jakub Dawidek g_raid3_update_disk(struct g_raid3_disk *disk, u_int state) 26892d1661a5SPawel Jakub Dawidek { 26902d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 26912d1661a5SPawel Jakub Dawidek 26922d1661a5SPawel Jakub Dawidek sc = disk->d_softc; 26933650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 26943650be51SPawel Jakub Dawidek 26952d1661a5SPawel Jakub Dawidek again: 26962d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.", 26972d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state), 26982d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(state)); 26992d1661a5SPawel Jakub Dawidek switch (state) { 27002d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_NEW: 27012d1661a5SPawel Jakub Dawidek /* 27022d1661a5SPawel Jakub Dawidek * Possible scenarios: 27032d1661a5SPawel Jakub Dawidek * 1. New disk arrive. 27042d1661a5SPawel Jakub Dawidek */ 27052d1661a5SPawel Jakub Dawidek /* Previous state should be NONE. */ 27062d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE, 27072d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27082d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27092d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27102d1661a5SPawel Jakub Dawidek 27112d1661a5SPawel Jakub Dawidek disk->d_state = state; 27120cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s detected.", 27132d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27142d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) 27152d1661a5SPawel Jakub Dawidek break; 27162d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27172d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27182d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27192d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27202d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27212d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27222d1661a5SPawel Jakub Dawidek state = g_raid3_determine_state(disk); 27232d1661a5SPawel Jakub Dawidek if (state != G_RAID3_DISK_STATE_NONE) 27242d1661a5SPawel Jakub Dawidek goto again; 27252d1661a5SPawel Jakub Dawidek break; 27262d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_ACTIVE: 27272d1661a5SPawel Jakub Dawidek /* 27282d1661a5SPawel Jakub Dawidek * Possible scenarios: 27292d1661a5SPawel Jakub Dawidek * 1. New disk does not need synchronization. 27302d1661a5SPawel Jakub Dawidek * 2. Synchronization process finished successfully. 27312d1661a5SPawel Jakub Dawidek */ 27322d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27332d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27342d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27352d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27362d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27372d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27382d1661a5SPawel Jakub Dawidek /* Previous state should be NEW or SYNCHRONIZING. */ 27392d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW || 27402d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 27412d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27422d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27432d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27442d1661a5SPawel Jakub Dawidek 2745bf31327cSPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 27462d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING; 27472d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC; 27482d1661a5SPawel Jakub Dawidek g_raid3_sync_stop(sc, 0); 27492d1661a5SPawel Jakub Dawidek } 27502d1661a5SPawel Jakub Dawidek disk->d_state = state; 27512d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset = 0; 27522d1661a5SPawel Jakub Dawidek disk->d_sync.ds_offset_done = 0; 27530962f942SPawel Jakub Dawidek g_raid3_update_idle(sc, disk); 2754bf31327cSPawel Jakub Dawidek g_raid3_update_metadata(disk); 27550cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s: provider %s activated.", 27562d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27572d1661a5SPawel Jakub Dawidek break; 27582d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_STALE: 27592d1661a5SPawel Jakub Dawidek /* 27602d1661a5SPawel Jakub Dawidek * Possible scenarios: 27612d1661a5SPawel Jakub Dawidek * 1. Stale disk was connected. 27622d1661a5SPawel Jakub Dawidek */ 27632d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 27642d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 27652d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27662d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27672d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 27682d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 27692d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27702d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27712d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27722d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27732d1661a5SPawel Jakub Dawidek /* 27742d1661a5SPawel Jakub Dawidek * STALE state is only possible if device is marked 27752d1661a5SPawel Jakub Dawidek * NOAUTOSYNC. 27762d1661a5SPawel Jakub Dawidek */ 27772d1661a5SPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0, 27782d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 27792d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 27802d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 27812d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27822d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 27832d1661a5SPawel Jakub Dawidek 27842d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 27852d1661a5SPawel Jakub Dawidek disk->d_state = state; 27862d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 27872d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s is stale.", 27882d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 27892d1661a5SPawel Jakub Dawidek break; 27902d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_SYNCHRONIZING: 27912d1661a5SPawel Jakub Dawidek /* 27922d1661a5SPawel Jakub Dawidek * Possible scenarios: 27932d1661a5SPawel Jakub Dawidek * 1. Disk which needs synchronization was connected. 27942d1661a5SPawel Jakub Dawidek */ 27952d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 27962d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 27972d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 27982d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 27992d1661a5SPawel Jakub Dawidek KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28002d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 28012d1661a5SPawel Jakub Dawidek ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 28022d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28032d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28042d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28052d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28062d1661a5SPawel Jakub Dawidek 28072d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_NEW) 28082d1661a5SPawel Jakub Dawidek disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 28092d1661a5SPawel Jakub Dawidek disk->d_state = state; 28102d1661a5SPawel Jakub Dawidek if (sc->sc_provider != NULL) { 28112d1661a5SPawel Jakub Dawidek g_raid3_sync_start(sc); 28122d1661a5SPawel Jakub Dawidek g_raid3_update_metadata(disk); 28132d1661a5SPawel Jakub Dawidek } 28142d1661a5SPawel Jakub Dawidek break; 28152d1661a5SPawel Jakub Dawidek case G_RAID3_DISK_STATE_DISCONNECTED: 28162d1661a5SPawel Jakub Dawidek /* 28172d1661a5SPawel Jakub Dawidek * Possible scenarios: 28182d1661a5SPawel Jakub Dawidek * 1. Device wasn't running yet, but disk disappear. 28192d1661a5SPawel Jakub Dawidek * 2. Disk was active and disapppear. 28202d1661a5SPawel Jakub Dawidek * 3. Disk disappear during synchronization process. 28212d1661a5SPawel Jakub Dawidek */ 28222d1661a5SPawel Jakub Dawidek if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 28232d1661a5SPawel Jakub Dawidek sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 28242d1661a5SPawel Jakub Dawidek /* 28252d1661a5SPawel Jakub Dawidek * Previous state should be ACTIVE, STALE or 28262d1661a5SPawel Jakub Dawidek * SYNCHRONIZING. 28272d1661a5SPawel Jakub Dawidek */ 28282d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 28292d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_STALE || 28302d1661a5SPawel Jakub Dawidek disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 28312d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28322d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28332d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28342d1661a5SPawel Jakub Dawidek } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) { 28352d1661a5SPawel Jakub Dawidek /* Previous state should be NEW. */ 28362d1661a5SPawel Jakub Dawidek KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 28372d1661a5SPawel Jakub Dawidek ("Wrong disk state (%s, %s).", 28382d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28392d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28402d1661a5SPawel Jakub Dawidek /* 28412d1661a5SPawel Jakub Dawidek * Reset bumping syncid if disk disappeared in STARTING 28422d1661a5SPawel Jakub Dawidek * state. 28432d1661a5SPawel Jakub Dawidek */ 2844ea973705SPawel Jakub Dawidek if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) 2845a245a548SPawel Jakub Dawidek sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 28462d1661a5SPawel Jakub Dawidek #ifdef INVARIANTS 28472d1661a5SPawel Jakub Dawidek } else { 28482d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 28492d1661a5SPawel Jakub Dawidek sc->sc_name, 28502d1661a5SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state), 28512d1661a5SPawel Jakub Dawidek g_raid3_get_diskname(disk), 28522d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state))); 28532d1661a5SPawel Jakub Dawidek #endif 28542d1661a5SPawel Jakub Dawidek } 28552d1661a5SPawel Jakub Dawidek DISK_STATE_CHANGED(); 28562d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.", 28572d1661a5SPawel Jakub Dawidek sc->sc_name, g_raid3_get_diskname(disk)); 28582d1661a5SPawel Jakub Dawidek 28592d1661a5SPawel Jakub Dawidek g_raid3_destroy_disk(disk); 28602d1661a5SPawel Jakub Dawidek break; 28612d1661a5SPawel Jakub Dawidek default: 28622d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("Unknown state (%u).", state)); 28632d1661a5SPawel Jakub Dawidek break; 28642d1661a5SPawel Jakub Dawidek } 28652d1661a5SPawel Jakub Dawidek return (0); 28662d1661a5SPawel Jakub Dawidek } 28672d1661a5SPawel Jakub Dawidek #undef DISK_STATE_CHANGED 28682d1661a5SPawel Jakub Dawidek 2869ea973705SPawel Jakub Dawidek int 28702d1661a5SPawel Jakub Dawidek g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md) 28712d1661a5SPawel Jakub Dawidek { 28722d1661a5SPawel Jakub Dawidek struct g_provider *pp; 28732d1661a5SPawel Jakub Dawidek u_char *buf; 28742d1661a5SPawel Jakub Dawidek int error; 28752d1661a5SPawel Jakub Dawidek 28762d1661a5SPawel Jakub Dawidek g_topology_assert(); 28772d1661a5SPawel Jakub Dawidek 28782d1661a5SPawel Jakub Dawidek error = g_access(cp, 1, 0, 0); 28792d1661a5SPawel Jakub Dawidek if (error != 0) 28802d1661a5SPawel Jakub Dawidek return (error); 28812d1661a5SPawel Jakub Dawidek pp = cp->provider; 28822d1661a5SPawel Jakub Dawidek g_topology_unlock(); 28832d1661a5SPawel Jakub Dawidek /* Metadata are stored on last sector. */ 28842d1661a5SPawel Jakub Dawidek buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 28852d1661a5SPawel Jakub Dawidek &error); 28862d1661a5SPawel Jakub Dawidek g_topology_lock(); 28872d1661a5SPawel Jakub Dawidek g_access(cp, -1, 0, 0); 28888a4a44b5SMaxim Sobolev if (buf == NULL) { 2889a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2890a245a548SPawel Jakub Dawidek cp->provider->name, error); 28912d1661a5SPawel Jakub Dawidek return (error); 28922d1661a5SPawel Jakub Dawidek } 28932d1661a5SPawel Jakub Dawidek 28942d1661a5SPawel Jakub Dawidek /* Decode metadata. */ 28952d1661a5SPawel Jakub Dawidek error = raid3_metadata_decode(buf, md); 28962d1661a5SPawel Jakub Dawidek g_free(buf); 28972d1661a5SPawel Jakub Dawidek if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0) 28982d1661a5SPawel Jakub Dawidek return (EINVAL); 2899a245a548SPawel Jakub Dawidek if (md->md_version > G_RAID3_VERSION) { 2900a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, 2901a245a548SPawel Jakub Dawidek "Kernel module is too old to handle metadata from %s.", 2902a245a548SPawel Jakub Dawidek cp->provider->name); 2903a245a548SPawel Jakub Dawidek return (EINVAL); 2904a245a548SPawel Jakub Dawidek } 29052d1661a5SPawel Jakub Dawidek if (error != 0) { 29062d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 29072d1661a5SPawel Jakub Dawidek cp->provider->name); 29082d1661a5SPawel Jakub Dawidek return (error); 29092d1661a5SPawel Jakub Dawidek } 291095959703SAndrey V. Elsukov if (md->md_sectorsize > MAXPHYS) { 291195959703SAndrey V. Elsukov G_RAID3_DEBUG(0, "The blocksize is too big."); 291295959703SAndrey V. Elsukov return (EINVAL); 291395959703SAndrey V. Elsukov } 29142d1661a5SPawel Jakub Dawidek 29152d1661a5SPawel Jakub Dawidek return (0); 29162d1661a5SPawel Jakub Dawidek } 29172d1661a5SPawel Jakub Dawidek 29182d1661a5SPawel Jakub Dawidek static int 29192d1661a5SPawel Jakub Dawidek g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp, 29202d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 29212d1661a5SPawel Jakub Dawidek { 29222d1661a5SPawel Jakub Dawidek 29232d1661a5SPawel Jakub Dawidek if (md->md_no >= sc->sc_ndisks) { 29242d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.", 29252d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29262d1661a5SPawel Jakub Dawidek return (EINVAL); 29272d1661a5SPawel Jakub Dawidek } 29282d1661a5SPawel Jakub Dawidek if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) { 29292d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.", 29302d1661a5SPawel Jakub Dawidek pp->name, md->md_no); 29312d1661a5SPawel Jakub Dawidek return (EEXIST); 29322d1661a5SPawel Jakub Dawidek } 29332d1661a5SPawel Jakub Dawidek if (md->md_all != sc->sc_ndisks) { 29342d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29352d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29362d1661a5SPawel Jakub Dawidek "md_all", pp->name, sc->sc_name); 29372d1661a5SPawel Jakub Dawidek return (EINVAL); 29382d1661a5SPawel Jakub Dawidek } 293911b2174fSPawel Jakub Dawidek if ((md->md_mediasize % md->md_sectorsize) != 0) { 294011b2174fSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != " 294111b2174fSPawel Jakub Dawidek "0) on disk %s (device %s), skipping.", pp->name, 294211b2174fSPawel Jakub Dawidek sc->sc_name); 294311b2174fSPawel Jakub Dawidek return (EINVAL); 294411b2174fSPawel Jakub Dawidek } 29452d1661a5SPawel Jakub Dawidek if (md->md_mediasize != sc->sc_mediasize) { 29462d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29472d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29482d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29492d1661a5SPawel Jakub Dawidek return (EINVAL); 29502d1661a5SPawel Jakub Dawidek } 29512d1661a5SPawel Jakub Dawidek if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) { 29522d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29532d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29542d1661a5SPawel Jakub Dawidek "md_mediasize", pp->name, sc->sc_name); 29552d1661a5SPawel Jakub Dawidek return (EINVAL); 29562d1661a5SPawel Jakub Dawidek } 29572d1661a5SPawel Jakub Dawidek if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) { 29582d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29592d1661a5SPawel Jakub Dawidek "Invalid size of disk %s (device %s), skipping.", pp->name, 29602d1661a5SPawel Jakub Dawidek sc->sc_name); 29612d1661a5SPawel Jakub Dawidek return (EINVAL); 29622d1661a5SPawel Jakub Dawidek } 29632d1661a5SPawel Jakub Dawidek if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) { 29642d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29652d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29662d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 29672d1661a5SPawel Jakub Dawidek return (EINVAL); 29682d1661a5SPawel Jakub Dawidek } 29692d1661a5SPawel Jakub Dawidek if (md->md_sectorsize != sc->sc_sectorsize) { 29702d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29712d1661a5SPawel Jakub Dawidek "Invalid '%s' field on disk %s (device %s), skipping.", 29722d1661a5SPawel Jakub Dawidek "md_sectorsize", pp->name, sc->sc_name); 29732d1661a5SPawel Jakub Dawidek return (EINVAL); 29742d1661a5SPawel Jakub Dawidek } 29752d1661a5SPawel Jakub Dawidek if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 29762d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29772d1661a5SPawel Jakub Dawidek "Invalid sector size of disk %s (device %s), skipping.", 29782d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 29792d1661a5SPawel Jakub Dawidek return (EINVAL); 29802d1661a5SPawel Jakub Dawidek } 29812d1661a5SPawel Jakub Dawidek if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) { 29822d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29832d1661a5SPawel Jakub Dawidek "Invalid device flags on disk %s (device %s), skipping.", 29842d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 29852d1661a5SPawel Jakub Dawidek return (EINVAL); 29862d1661a5SPawel Jakub Dawidek } 2987dba915cfSPawel Jakub Dawidek if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 2988dba915cfSPawel Jakub Dawidek (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) { 2989dba915cfSPawel Jakub Dawidek /* 2990dba915cfSPawel Jakub Dawidek * VERIFY and ROUND-ROBIN options are mutally exclusive. 2991dba915cfSPawel Jakub Dawidek */ 2992dba915cfSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on " 2993dba915cfSPawel Jakub Dawidek "disk %s (device %s), skipping.", pp->name, sc->sc_name); 2994dba915cfSPawel Jakub Dawidek return (EINVAL); 2995dba915cfSPawel Jakub Dawidek } 29962d1661a5SPawel Jakub Dawidek if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) { 29972d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 29982d1661a5SPawel Jakub Dawidek "Invalid disk flags on disk %s (device %s), skipping.", 29992d1661a5SPawel Jakub Dawidek pp->name, sc->sc_name); 30002d1661a5SPawel Jakub Dawidek return (EINVAL); 30012d1661a5SPawel Jakub Dawidek } 30022d1661a5SPawel Jakub Dawidek return (0); 30032d1661a5SPawel Jakub Dawidek } 30042d1661a5SPawel Jakub Dawidek 3005ea973705SPawel Jakub Dawidek int 30062d1661a5SPawel Jakub Dawidek g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp, 30072d1661a5SPawel Jakub Dawidek struct g_raid3_metadata *md) 30082d1661a5SPawel Jakub Dawidek { 30092d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 30102d1661a5SPawel Jakub Dawidek int error; 30112d1661a5SPawel Jakub Dawidek 30123650be51SPawel Jakub Dawidek g_topology_assert_not(); 30132d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Adding disk %s.", pp->name); 30142d1661a5SPawel Jakub Dawidek 30152d1661a5SPawel Jakub Dawidek error = g_raid3_check_metadata(sc, pp, md); 30162d1661a5SPawel Jakub Dawidek if (error != 0) 30172d1661a5SPawel Jakub Dawidek return (error); 3018a245a548SPawel Jakub Dawidek if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING && 3019a245a548SPawel Jakub Dawidek md->md_genid < sc->sc_genid) { 3020a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.", 3021a245a548SPawel Jakub Dawidek pp->name, sc->sc_name); 3022a245a548SPawel Jakub Dawidek return (EINVAL); 3023a245a548SPawel Jakub Dawidek } 30242d1661a5SPawel Jakub Dawidek disk = g_raid3_init_disk(sc, pp, md, &error); 30252d1661a5SPawel Jakub Dawidek if (disk == NULL) 30262d1661a5SPawel Jakub Dawidek return (error); 30272d1661a5SPawel Jakub Dawidek error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW, 30282d1661a5SPawel Jakub Dawidek G_RAID3_EVENT_WAIT); 3029a245a548SPawel Jakub Dawidek if (error != 0) 30302d1661a5SPawel Jakub Dawidek return (error); 3031a245a548SPawel Jakub Dawidek if (md->md_version < G_RAID3_VERSION) { 3032a245a548SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 3033a245a548SPawel Jakub Dawidek pp->name, md->md_version, G_RAID3_VERSION); 3034a245a548SPawel Jakub Dawidek g_raid3_update_metadata(disk); 3035a245a548SPawel Jakub Dawidek } 3036a245a548SPawel Jakub Dawidek return (0); 30372d1661a5SPawel Jakub Dawidek } 30382d1661a5SPawel Jakub Dawidek 3039712fe9bdSPawel Jakub Dawidek static void 3040712fe9bdSPawel Jakub Dawidek g_raid3_destroy_delayed(void *arg, int flag) 3041712fe9bdSPawel Jakub Dawidek { 3042712fe9bdSPawel Jakub Dawidek struct g_raid3_softc *sc; 3043712fe9bdSPawel Jakub Dawidek int error; 3044712fe9bdSPawel Jakub Dawidek 3045712fe9bdSPawel Jakub Dawidek if (flag == EV_CANCEL) { 3046712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Destroying canceled."); 3047712fe9bdSPawel Jakub Dawidek return; 3048712fe9bdSPawel Jakub Dawidek } 3049712fe9bdSPawel Jakub Dawidek sc = arg; 3050712fe9bdSPawel Jakub Dawidek g_topology_unlock(); 3051712fe9bdSPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3052712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0, 3053712fe9bdSPawel Jakub Dawidek ("DESTROY flag set on %s.", sc->sc_name)); 3054712fe9bdSPawel Jakub Dawidek KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0, 3055712fe9bdSPawel Jakub Dawidek ("DESTROYING flag not set on %s.", sc->sc_name)); 3056712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name); 3057712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT); 3058712fe9bdSPawel Jakub Dawidek if (error != 0) { 3059712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 3060712fe9bdSPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 3061712fe9bdSPawel Jakub Dawidek } 3062712fe9bdSPawel Jakub Dawidek g_topology_lock(); 3063712fe9bdSPawel Jakub Dawidek } 3064712fe9bdSPawel Jakub Dawidek 30652d1661a5SPawel Jakub Dawidek static int 30662d1661a5SPawel Jakub Dawidek g_raid3_access(struct g_provider *pp, int acr, int acw, int ace) 30672d1661a5SPawel Jakub Dawidek { 30682d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 3069712fe9bdSPawel Jakub Dawidek int dcr, dcw, dce, error = 0; 30702d1661a5SPawel Jakub Dawidek 30712d1661a5SPawel Jakub Dawidek g_topology_assert(); 30722d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 30732d1661a5SPawel Jakub Dawidek acw, ace); 30742d1661a5SPawel Jakub Dawidek 30751f7fec3cSPawel Jakub Dawidek sc = pp->geom->softc; 30761f7fec3cSPawel Jakub Dawidek if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 30771f7fec3cSPawel Jakub Dawidek return (0); 30781f7fec3cSPawel Jakub Dawidek KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 30791f7fec3cSPawel Jakub Dawidek 30802d1661a5SPawel Jakub Dawidek dcr = pp->acr + acr; 30812d1661a5SPawel Jakub Dawidek dcw = pp->acw + acw; 30822d1661a5SPawel Jakub Dawidek dce = pp->ace + ace; 30832d1661a5SPawel Jakub Dawidek 30843650be51SPawel Jakub Dawidek g_topology_unlock(); 30853650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3086712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 || 30873650be51SPawel Jakub Dawidek g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) { 30883650be51SPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) 30893650be51SPawel Jakub Dawidek error = ENXIO; 30903650be51SPawel Jakub Dawidek goto end; 30912d1661a5SPawel Jakub Dawidek } 3092f62c1a47SAlexander Motin if (dcw == 0) 30933650be51SPawel Jakub Dawidek g_raid3_idle(sc, dcw); 3094712fe9bdSPawel Jakub Dawidek if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) { 3095712fe9bdSPawel Jakub Dawidek if (acr > 0 || acw > 0 || ace > 0) { 3096712fe9bdSPawel Jakub Dawidek error = ENXIO; 3097712fe9bdSPawel Jakub Dawidek goto end; 3098712fe9bdSPawel Jakub Dawidek } 3099712fe9bdSPawel Jakub Dawidek if (dcr == 0 && dcw == 0 && dce == 0) { 3100712fe9bdSPawel Jakub Dawidek g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK, 3101712fe9bdSPawel Jakub Dawidek sc, NULL); 3102712fe9bdSPawel Jakub Dawidek } 3103712fe9bdSPawel Jakub Dawidek } 31043650be51SPawel Jakub Dawidek end: 31053650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 31063650be51SPawel Jakub Dawidek g_topology_lock(); 31073650be51SPawel Jakub Dawidek return (error); 31082d1661a5SPawel Jakub Dawidek } 31092d1661a5SPawel Jakub Dawidek 31102d1661a5SPawel Jakub Dawidek static struct g_geom * 31112d1661a5SPawel Jakub Dawidek g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md) 31122d1661a5SPawel Jakub Dawidek { 31132d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 31142d1661a5SPawel Jakub Dawidek struct g_geom *gp; 31152d1661a5SPawel Jakub Dawidek int error, timeout; 31162d1661a5SPawel Jakub Dawidek u_int n; 31172d1661a5SPawel Jakub Dawidek 31182d1661a5SPawel Jakub Dawidek g_topology_assert(); 31192d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 31202d1661a5SPawel Jakub Dawidek 31212d1661a5SPawel Jakub Dawidek /* One disk is minimum. */ 31222d1661a5SPawel Jakub Dawidek if (md->md_all < 1) 31232d1661a5SPawel Jakub Dawidek return (NULL); 31242d1661a5SPawel Jakub Dawidek /* 31252d1661a5SPawel Jakub Dawidek * Action geom. 31262d1661a5SPawel Jakub Dawidek */ 31272d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s", md->md_name); 31282d1661a5SPawel Jakub Dawidek sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO); 31292d1661a5SPawel Jakub Dawidek sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3, 31302d1661a5SPawel Jakub Dawidek M_WAITOK | M_ZERO); 31312d1661a5SPawel Jakub Dawidek gp->start = g_raid3_start; 31322d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 31332d1661a5SPawel Jakub Dawidek gp->access = g_raid3_access; 31342d1661a5SPawel Jakub Dawidek gp->dumpconf = g_raid3_dumpconf; 31352d1661a5SPawel Jakub Dawidek 31362d1661a5SPawel Jakub Dawidek sc->sc_id = md->md_id; 31372d1661a5SPawel Jakub Dawidek sc->sc_mediasize = md->md_mediasize; 31382d1661a5SPawel Jakub Dawidek sc->sc_sectorsize = md->md_sectorsize; 31392d1661a5SPawel Jakub Dawidek sc->sc_ndisks = md->md_all; 3140f5a2f7feSPawel Jakub Dawidek sc->sc_round_robin = 0; 31412d1661a5SPawel Jakub Dawidek sc->sc_flags = md->md_mflags; 3142a245a548SPawel Jakub Dawidek sc->sc_bump_id = 0; 31430962f942SPawel Jakub Dawidek sc->sc_idle = 1; 314401f1f41cSPawel Jakub Dawidek sc->sc_last_write = time_uptime; 31450962f942SPawel Jakub Dawidek sc->sc_writes = 0; 3146afd05d74SPawel Jakub Dawidek for (n = 0; n < sc->sc_ndisks; n++) { 3147afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_softc = sc; 3148afd05d74SPawel Jakub Dawidek sc->sc_disks[n].d_no = n; 31492d1661a5SPawel Jakub Dawidek sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK; 3150afd05d74SPawel Jakub Dawidek } 31513650be51SPawel Jakub Dawidek sx_init(&sc->sc_lock, "graid3:lock"); 31522d1661a5SPawel Jakub Dawidek bioq_init(&sc->sc_queue); 31532d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF); 31543650be51SPawel Jakub Dawidek bioq_init(&sc->sc_regular_delayed); 31553650be51SPawel Jakub Dawidek bioq_init(&sc->sc_inflight); 31563650be51SPawel Jakub Dawidek bioq_init(&sc->sc_sync_delayed); 31572d1661a5SPawel Jakub Dawidek TAILQ_INIT(&sc->sc_events); 31582d1661a5SPawel Jakub Dawidek mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF); 3159fd90e2edSJung-uk Kim callout_init(&sc->sc_callout, 1); 31602d1661a5SPawel Jakub Dawidek sc->sc_state = G_RAID3_DEVICE_STATE_STARTING; 31612d1661a5SPawel Jakub Dawidek gp->softc = sc; 31622d1661a5SPawel Jakub Dawidek sc->sc_geom = gp; 31632d1661a5SPawel Jakub Dawidek sc->sc_provider = NULL; 31642d1661a5SPawel Jakub Dawidek /* 31652d1661a5SPawel Jakub Dawidek * Synchronization geom. 31662d1661a5SPawel Jakub Dawidek */ 31672d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "%s.sync", md->md_name); 31682d1661a5SPawel Jakub Dawidek gp->softc = sc; 31692d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_orphan; 31702d1661a5SPawel Jakub Dawidek sc->sc_sync.ds_geom = gp; 31713650be51SPawel Jakub Dawidek 3172ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3173ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k", 3174ed940a82SPawel Jakub Dawidek 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3175ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31763650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0; 31773650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k; 31783650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested = 31793650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0; 3180ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k", 3181ed940a82SPawel Jakub Dawidek 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3182ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31833650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0; 31843650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k; 31853650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested = 31863650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0; 3187ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k", 3188ed940a82SPawel Jakub Dawidek 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3189ed940a82SPawel Jakub Dawidek UMA_ALIGN_PTR, 0); 31903650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0; 31913650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k; 31923650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested = 31933650be51SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0; 3194ed940a82SPawel Jakub Dawidek } 31953650be51SPawel Jakub Dawidek 31963745c395SJulian Elischer error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0, 31972d1661a5SPawel Jakub Dawidek "g_raid3 %s", md->md_name); 31982d1661a5SPawel Jakub Dawidek if (error != 0) { 31992d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.", 32002d1661a5SPawel Jakub Dawidek sc->sc_name); 3201ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 32023650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 32033650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 32043650be51SPawel Jakub Dawidek uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 3205ed940a82SPawel Jakub Dawidek } 32062d1661a5SPawel Jakub Dawidek g_destroy_geom(sc->sc_sync.ds_geom); 32072d1661a5SPawel Jakub Dawidek mtx_destroy(&sc->sc_events_mtx); 32082d1661a5SPawel Jakub Dawidek mtx_destroy(&sc->sc_queue_mtx); 32093650be51SPawel Jakub Dawidek sx_destroy(&sc->sc_lock); 32102d1661a5SPawel Jakub Dawidek g_destroy_geom(sc->sc_geom); 32112d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 32122d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 32132d1661a5SPawel Jakub Dawidek return (NULL); 32142d1661a5SPawel Jakub Dawidek } 32152d1661a5SPawel Jakub Dawidek 32160cca572eSJohn-Mark Gurney G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).", 32170cca572eSJohn-Mark Gurney sc->sc_name, sc->sc_ndisks, sc->sc_id); 32182d1661a5SPawel Jakub Dawidek 3219853a10a5SAndrew Thompson sc->sc_rootmount = root_mount_hold("GRAID3"); 32204ed854e8SPawel Jakub Dawidek G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 32214ed854e8SPawel Jakub Dawidek 32222d1661a5SPawel Jakub Dawidek /* 32232d1661a5SPawel Jakub Dawidek * Run timeout. 32242d1661a5SPawel Jakub Dawidek */ 32252d1661a5SPawel Jakub Dawidek timeout = atomic_load_acq_int(&g_raid3_timeout); 32262d1661a5SPawel Jakub Dawidek callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc); 32272d1661a5SPawel Jakub Dawidek return (sc->sc_geom); 32282d1661a5SPawel Jakub Dawidek } 32292d1661a5SPawel Jakub Dawidek 32302d1661a5SPawel Jakub Dawidek int 3231712fe9bdSPawel Jakub Dawidek g_raid3_destroy(struct g_raid3_softc *sc, int how) 32322d1661a5SPawel Jakub Dawidek { 32332d1661a5SPawel Jakub Dawidek struct g_provider *pp; 32342d1661a5SPawel Jakub Dawidek 32353650be51SPawel Jakub Dawidek g_topology_assert_not(); 32362d1661a5SPawel Jakub Dawidek if (sc == NULL) 32372d1661a5SPawel Jakub Dawidek return (ENXIO); 32383650be51SPawel Jakub Dawidek sx_assert(&sc->sc_lock, SX_XLOCKED); 32393650be51SPawel Jakub Dawidek 32402d1661a5SPawel Jakub Dawidek pp = sc->sc_provider; 32412d1661a5SPawel Jakub Dawidek if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 3242712fe9bdSPawel Jakub Dawidek switch (how) { 3243712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_SOFT: 32442d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, 32452d1661a5SPawel Jakub Dawidek "Device %s is still open (r%dw%de%d).", pp->name, 32462d1661a5SPawel Jakub Dawidek pp->acr, pp->acw, pp->ace); 32472d1661a5SPawel Jakub Dawidek return (EBUSY); 3248712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_DELAYED: 3249712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, 3250712fe9bdSPawel Jakub Dawidek "Device %s will be destroyed on last close.", 3251712fe9bdSPawel Jakub Dawidek pp->name); 3252712fe9bdSPawel Jakub Dawidek if (sc->sc_syncdisk != NULL) 3253712fe9bdSPawel Jakub Dawidek g_raid3_sync_stop(sc, 1); 3254712fe9bdSPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING; 3255712fe9bdSPawel Jakub Dawidek return (EBUSY); 3256712fe9bdSPawel Jakub Dawidek case G_RAID3_DESTROY_HARD: 3257712fe9bdSPawel Jakub Dawidek G_RAID3_DEBUG(1, "Device %s is still open, so it " 3258712fe9bdSPawel Jakub Dawidek "can't be definitely removed.", pp->name); 3259712fe9bdSPawel Jakub Dawidek break; 32602d1661a5SPawel Jakub Dawidek } 32612d1661a5SPawel Jakub Dawidek } 32622d1661a5SPawel Jakub Dawidek 326318486a5eSPawel Jakub Dawidek g_topology_lock(); 326418486a5eSPawel Jakub Dawidek if (sc->sc_geom->softc == NULL) { 326518486a5eSPawel Jakub Dawidek g_topology_unlock(); 326618486a5eSPawel Jakub Dawidek return (0); 326718486a5eSPawel Jakub Dawidek } 326818486a5eSPawel Jakub Dawidek sc->sc_geom->softc = NULL; 326918486a5eSPawel Jakub Dawidek sc->sc_sync.ds_geom->softc = NULL; 327018486a5eSPawel Jakub Dawidek g_topology_unlock(); 327118486a5eSPawel Jakub Dawidek 32722d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 32732d1661a5SPawel Jakub Dawidek sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT; 32742d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 32753650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 32762d1661a5SPawel Jakub Dawidek mtx_lock(&sc->sc_queue_mtx); 32772d1661a5SPawel Jakub Dawidek wakeup(sc); 32782d1661a5SPawel Jakub Dawidek wakeup(&sc->sc_queue); 32792d1661a5SPawel Jakub Dawidek mtx_unlock(&sc->sc_queue_mtx); 32802d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 32812d1661a5SPawel Jakub Dawidek while (sc->sc_worker != NULL) 32822d1661a5SPawel Jakub Dawidek tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5); 32832d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 32843650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 32852d1661a5SPawel Jakub Dawidek g_raid3_destroy_device(sc); 32862d1661a5SPawel Jakub Dawidek free(sc->sc_disks, M_RAID3); 32872d1661a5SPawel Jakub Dawidek free(sc, M_RAID3); 32882d1661a5SPawel Jakub Dawidek return (0); 32892d1661a5SPawel Jakub Dawidek } 32902d1661a5SPawel Jakub Dawidek 32912d1661a5SPawel Jakub Dawidek static void 32922d1661a5SPawel Jakub Dawidek g_raid3_taste_orphan(struct g_consumer *cp) 32932d1661a5SPawel Jakub Dawidek { 32942d1661a5SPawel Jakub Dawidek 32952d1661a5SPawel Jakub Dawidek KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 32962d1661a5SPawel Jakub Dawidek cp->provider->name)); 32972d1661a5SPawel Jakub Dawidek } 32982d1661a5SPawel Jakub Dawidek 32992d1661a5SPawel Jakub Dawidek static struct g_geom * 33002d1661a5SPawel Jakub Dawidek g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 33012d1661a5SPawel Jakub Dawidek { 33022d1661a5SPawel Jakub Dawidek struct g_raid3_metadata md; 33032d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 33042d1661a5SPawel Jakub Dawidek struct g_consumer *cp; 33052d1661a5SPawel Jakub Dawidek struct g_geom *gp; 33062d1661a5SPawel Jakub Dawidek int error; 33072d1661a5SPawel Jakub Dawidek 33082d1661a5SPawel Jakub Dawidek g_topology_assert(); 33092d1661a5SPawel Jakub Dawidek g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 33102d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(2, "Tasting %s.", pp->name); 33112d1661a5SPawel Jakub Dawidek 33122d1661a5SPawel Jakub Dawidek gp = g_new_geomf(mp, "raid3:taste"); 33132d1661a5SPawel Jakub Dawidek /* This orphan function should be never called. */ 33142d1661a5SPawel Jakub Dawidek gp->orphan = g_raid3_taste_orphan; 33152d1661a5SPawel Jakub Dawidek cp = g_new_consumer(gp); 33162d1661a5SPawel Jakub Dawidek g_attach(cp, pp); 33172d1661a5SPawel Jakub Dawidek error = g_raid3_read_metadata(cp, &md); 33182d1661a5SPawel Jakub Dawidek g_detach(cp); 33192d1661a5SPawel Jakub Dawidek g_destroy_consumer(cp); 33202d1661a5SPawel Jakub Dawidek g_destroy_geom(gp); 33212d1661a5SPawel Jakub Dawidek if (error != 0) 33222d1661a5SPawel Jakub Dawidek return (NULL); 33232d1661a5SPawel Jakub Dawidek gp = NULL; 33242d1661a5SPawel Jakub Dawidek 332590f2be24SAlexander Motin if (md.md_provider[0] != '\0' && 332690f2be24SAlexander Motin !g_compare_names(md.md_provider, pp->name)) 33272d1661a5SPawel Jakub Dawidek return (NULL); 3328e6890985SPawel Jakub Dawidek if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3329e6890985SPawel Jakub Dawidek return (NULL); 33302d1661a5SPawel Jakub Dawidek if (g_raid3_debug >= 2) 33312d1661a5SPawel Jakub Dawidek raid3_metadata_dump(&md); 33322d1661a5SPawel Jakub Dawidek 33332d1661a5SPawel Jakub Dawidek /* 33342d1661a5SPawel Jakub Dawidek * Let's check if device already exists. 33352d1661a5SPawel Jakub Dawidek */ 333645d5e85aSPawel Jakub Dawidek sc = NULL; 33372d1661a5SPawel Jakub Dawidek LIST_FOREACH(gp, &mp->geom, geom) { 33382d1661a5SPawel Jakub Dawidek sc = gp->softc; 33392d1661a5SPawel Jakub Dawidek if (sc == NULL) 33402d1661a5SPawel Jakub Dawidek continue; 33412d1661a5SPawel Jakub Dawidek if (sc->sc_sync.ds_geom == gp) 33422d1661a5SPawel Jakub Dawidek continue; 33432d1661a5SPawel Jakub Dawidek if (strcmp(md.md_name, sc->sc_name) != 0) 33442d1661a5SPawel Jakub Dawidek continue; 33452d1661a5SPawel Jakub Dawidek if (md.md_id != sc->sc_id) { 33462d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Device %s already configured.", 33472d1661a5SPawel Jakub Dawidek sc->sc_name); 33482d1661a5SPawel Jakub Dawidek return (NULL); 33492d1661a5SPawel Jakub Dawidek } 33502d1661a5SPawel Jakub Dawidek break; 33512d1661a5SPawel Jakub Dawidek } 33522d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33532d1661a5SPawel Jakub Dawidek gp = g_raid3_create(mp, &md); 33542d1661a5SPawel Jakub Dawidek if (gp == NULL) { 33552d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot create device %s.", 33562d1661a5SPawel Jakub Dawidek md.md_name); 33572d1661a5SPawel Jakub Dawidek return (NULL); 33582d1661a5SPawel Jakub Dawidek } 33592d1661a5SPawel Jakub Dawidek sc = gp->softc; 33602d1661a5SPawel Jakub Dawidek } 33612d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 33623650be51SPawel Jakub Dawidek g_topology_unlock(); 33633650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 33642d1661a5SPawel Jakub Dawidek error = g_raid3_add_disk(sc, pp, &md); 33652d1661a5SPawel Jakub Dawidek if (error != 0) { 33662d1661a5SPawel Jakub Dawidek G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 33672d1661a5SPawel Jakub Dawidek pp->name, gp->name, error); 33682d1661a5SPawel Jakub Dawidek if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) == 33692d1661a5SPawel Jakub Dawidek sc->sc_ndisks) { 3370712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 33713525bb6bSPawel Jakub Dawidek g_raid3_destroy(sc, G_RAID3_DESTROY_HARD); 33723650be51SPawel Jakub Dawidek g_topology_lock(); 33732d1661a5SPawel Jakub Dawidek return (NULL); 33742d1661a5SPawel Jakub Dawidek } 33753650be51SPawel Jakub Dawidek gp = NULL; 33763650be51SPawel Jakub Dawidek } 33773650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 33783650be51SPawel Jakub Dawidek g_topology_lock(); 33792d1661a5SPawel Jakub Dawidek return (gp); 33802d1661a5SPawel Jakub Dawidek } 33812d1661a5SPawel Jakub Dawidek 33822d1661a5SPawel Jakub Dawidek static int 33832d1661a5SPawel Jakub Dawidek g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 33842d1661a5SPawel Jakub Dawidek struct g_geom *gp) 33852d1661a5SPawel Jakub Dawidek { 33863650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 33873650be51SPawel Jakub Dawidek int error; 33882d1661a5SPawel Jakub Dawidek 33893650be51SPawel Jakub Dawidek g_topology_unlock(); 33903650be51SPawel Jakub Dawidek sc = gp->softc; 33913650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3392712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 33933525bb6bSPawel Jakub Dawidek error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT); 33943650be51SPawel Jakub Dawidek if (error != 0) 33953650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 33963650be51SPawel Jakub Dawidek g_topology_lock(); 33973650be51SPawel Jakub Dawidek return (error); 33982d1661a5SPawel Jakub Dawidek } 33992d1661a5SPawel Jakub Dawidek 34002d1661a5SPawel Jakub Dawidek static void 34012d1661a5SPawel Jakub Dawidek g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 34022d1661a5SPawel Jakub Dawidek struct g_consumer *cp, struct g_provider *pp) 34032d1661a5SPawel Jakub Dawidek { 34042d1661a5SPawel Jakub Dawidek struct g_raid3_softc *sc; 34052d1661a5SPawel Jakub Dawidek 34062d1661a5SPawel Jakub Dawidek g_topology_assert(); 34072d1661a5SPawel Jakub Dawidek 34082d1661a5SPawel Jakub Dawidek sc = gp->softc; 34092d1661a5SPawel Jakub Dawidek if (sc == NULL) 34102d1661a5SPawel Jakub Dawidek return; 34112d1661a5SPawel Jakub Dawidek /* Skip synchronization geom. */ 34122d1661a5SPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 34132d1661a5SPawel Jakub Dawidek return; 34142d1661a5SPawel Jakub Dawidek if (pp != NULL) { 34152d1661a5SPawel Jakub Dawidek /* Nothing here. */ 34162d1661a5SPawel Jakub Dawidek } else if (cp != NULL) { 34172d1661a5SPawel Jakub Dawidek struct g_raid3_disk *disk; 34182d1661a5SPawel Jakub Dawidek 34192d1661a5SPawel Jakub Dawidek disk = cp->private; 34202d1661a5SPawel Jakub Dawidek if (disk == NULL) 34212d1661a5SPawel Jakub Dawidek return; 34223650be51SPawel Jakub Dawidek g_topology_unlock(); 34233650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 34242d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Type>", indent); 34252d1661a5SPawel Jakub Dawidek if (disk->d_no == sc->sc_ndisks - 1) 34262d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "PARITY"); 34272d1661a5SPawel Jakub Dawidek else 34282d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "DATA"); 34292d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "</Type>\n"); 34302d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 34312d1661a5SPawel Jakub Dawidek (u_int)disk->d_no); 34322d1661a5SPawel Jakub Dawidek if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 34332d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Synchronized>", indent); 34343650be51SPawel Jakub Dawidek if (disk->d_sync.ds_offset == 0) 34352d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "0%%"); 34362d1661a5SPawel Jakub Dawidek else { 34372d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%u%%", 34383650be51SPawel Jakub Dawidek (u_int)((disk->d_sync.ds_offset * 100) / 3439c0d68b6eSPawel Jakub Dawidek (sc->sc_mediasize / (sc->sc_ndisks - 1)))); 34402d1661a5SPawel Jakub Dawidek } 34412d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "</Synchronized>\n"); 34424a7f7b10SGleb Smirnoff if (disk->d_sync.ds_offset > 0) { 34434a7f7b10SGleb Smirnoff sbuf_printf(sb, "%s<BytesSynced>%jd" 34444a7f7b10SGleb Smirnoff "</BytesSynced>\n", indent, 34454a7f7b10SGleb Smirnoff (intmax_t)disk->d_sync.ds_offset); 34464a7f7b10SGleb Smirnoff } 34472d1661a5SPawel Jakub Dawidek } 34482d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 34492d1661a5SPawel Jakub Dawidek disk->d_sync.ds_syncid); 3450a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid); 34512d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 34522d1661a5SPawel Jakub Dawidek if (disk->d_flags == 0) 34532d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "NONE"); 34542d1661a5SPawel Jakub Dawidek else { 34552d1661a5SPawel Jakub Dawidek int first = 1; 34562d1661a5SPawel Jakub Dawidek 34572d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 34582d1661a5SPawel Jakub Dawidek if ((disk->d_flags & (flag)) != 0) { \ 34592d1661a5SPawel Jakub Dawidek if (!first) \ 34602d1661a5SPawel Jakub Dawidek sbuf_printf(sb, ", "); \ 34612d1661a5SPawel Jakub Dawidek else \ 34622d1661a5SPawel Jakub Dawidek first = 0; \ 34632d1661a5SPawel Jakub Dawidek sbuf_printf(sb, name); \ 34642d1661a5SPawel Jakub Dawidek } \ 34652d1661a5SPawel Jakub Dawidek } while (0) 34662d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY"); 34672d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED"); 34682d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING, 34692d1661a5SPawel Jakub Dawidek "SYNCHRONIZING"); 34702d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 34713aae74ecSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN"); 34722d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 34732d1661a5SPawel Jakub Dawidek } 34742d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "</Flags>\n"); 34752d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 34762d1661a5SPawel Jakub Dawidek g_raid3_disk_state2str(disk->d_state)); 34773650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 34783650be51SPawel Jakub Dawidek g_topology_lock(); 34792d1661a5SPawel Jakub Dawidek } else { 34803650be51SPawel Jakub Dawidek g_topology_unlock(); 34813650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3482ed940a82SPawel Jakub Dawidek if (!g_raid3_use_malloc) { 3483ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3484ed940a82SPawel Jakub Dawidek "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent, 3485ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_requested); 3486ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3487ed940a82SPawel Jakub Dawidek "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent, 3488ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_4K].sz_failed); 3489ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3490ed940a82SPawel Jakub Dawidek "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent, 3491ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_requested); 3492ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3493ed940a82SPawel Jakub Dawidek "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent, 3494ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_16K].sz_failed); 3495ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3496ed940a82SPawel Jakub Dawidek "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent, 3497ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_requested); 3498ed940a82SPawel Jakub Dawidek sbuf_printf(sb, 3499ed940a82SPawel Jakub Dawidek "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent, 3500ed940a82SPawel Jakub Dawidek sc->sc_zones[G_RAID3_ZONE_64K].sz_failed); 3501ed940a82SPawel Jakub Dawidek } 35022d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 35032d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3504a245a548SPawel Jakub Dawidek sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 35052d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Flags>", indent); 35062d1661a5SPawel Jakub Dawidek if (sc->sc_flags == 0) 35072d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "NONE"); 35082d1661a5SPawel Jakub Dawidek else { 35092d1661a5SPawel Jakub Dawidek int first = 1; 35102d1661a5SPawel Jakub Dawidek 35112d1661a5SPawel Jakub Dawidek #define ADD_FLAG(flag, name) do { \ 35122d1661a5SPawel Jakub Dawidek if ((sc->sc_flags & (flag)) != 0) { \ 35132d1661a5SPawel Jakub Dawidek if (!first) \ 35142d1661a5SPawel Jakub Dawidek sbuf_printf(sb, ", "); \ 35152d1661a5SPawel Jakub Dawidek else \ 35162d1661a5SPawel Jakub Dawidek first = 0; \ 35172d1661a5SPawel Jakub Dawidek sbuf_printf(sb, name); \ 35182d1661a5SPawel Jakub Dawidek } \ 35192d1661a5SPawel Jakub Dawidek } while (0) 3520501250baSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 35212d1661a5SPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3522f5a2f7feSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN, 3523f5a2f7feSPawel Jakub Dawidek "ROUND-ROBIN"); 3524dba915cfSPawel Jakub Dawidek ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY"); 35252d1661a5SPawel Jakub Dawidek #undef ADD_FLAG 35262d1661a5SPawel Jakub Dawidek } 35272d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "</Flags>\n"); 35282d1661a5SPawel Jakub Dawidek sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 35292d1661a5SPawel Jakub Dawidek sc->sc_ndisks); 353028b31df7SPawel Jakub Dawidek sbuf_printf(sb, "%s<State>%s</State>\n", indent, 353128b31df7SPawel Jakub Dawidek g_raid3_device_state2str(sc->sc_state)); 35323650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35333650be51SPawel Jakub Dawidek g_topology_lock(); 35342d1661a5SPawel Jakub Dawidek } 35352d1661a5SPawel Jakub Dawidek } 35362d1661a5SPawel Jakub Dawidek 35379da3072cSPawel Jakub Dawidek static void 3538f62c1a47SAlexander Motin g_raid3_shutdown_post_sync(void *arg, int howto) 35399da3072cSPawel Jakub Dawidek { 35409da3072cSPawel Jakub Dawidek struct g_class *mp; 35419da3072cSPawel Jakub Dawidek struct g_geom *gp, *gp2; 35423650be51SPawel Jakub Dawidek struct g_raid3_softc *sc; 3543712fe9bdSPawel Jakub Dawidek int error; 35449da3072cSPawel Jakub Dawidek 35459da3072cSPawel Jakub Dawidek mp = arg; 3546fdc3c6ceSPawel Jakub Dawidek DROP_GIANT(); 35479da3072cSPawel Jakub Dawidek g_topology_lock(); 3548f62c1a47SAlexander Motin g_raid3_shutdown = 1; 35499da3072cSPawel Jakub Dawidek LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 35503650be51SPawel Jakub Dawidek if ((sc = gp->softc) == NULL) 35519da3072cSPawel Jakub Dawidek continue; 3552712fe9bdSPawel Jakub Dawidek /* Skip synchronization geom. */ 3553712fe9bdSPawel Jakub Dawidek if (gp == sc->sc_sync.ds_geom) 3554712fe9bdSPawel Jakub Dawidek continue; 35553650be51SPawel Jakub Dawidek g_topology_unlock(); 35563650be51SPawel Jakub Dawidek sx_xlock(&sc->sc_lock); 3557f62c1a47SAlexander Motin g_raid3_idle(sc, -1); 3558712fe9bdSPawel Jakub Dawidek g_cancel_event(sc); 3559712fe9bdSPawel Jakub Dawidek error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED); 3560712fe9bdSPawel Jakub Dawidek if (error != 0) 35613650be51SPawel Jakub Dawidek sx_xunlock(&sc->sc_lock); 35623650be51SPawel Jakub Dawidek g_topology_lock(); 35633650be51SPawel Jakub Dawidek } 35643650be51SPawel Jakub Dawidek g_topology_unlock(); 35653650be51SPawel Jakub Dawidek PICKUP_GIANT(); 35663650be51SPawel Jakub Dawidek } 35673650be51SPawel Jakub Dawidek 35683650be51SPawel Jakub Dawidek static void 35699da3072cSPawel Jakub Dawidek g_raid3_init(struct g_class *mp) 35709da3072cSPawel Jakub Dawidek { 35719da3072cSPawel Jakub Dawidek 3572f62c1a47SAlexander Motin g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3573f62c1a47SAlexander Motin g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3574f62c1a47SAlexander Motin if (g_raid3_post_sync == NULL) 35759da3072cSPawel Jakub Dawidek G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event."); 35769da3072cSPawel Jakub Dawidek } 35779da3072cSPawel Jakub Dawidek 35789da3072cSPawel Jakub Dawidek static void 35799da3072cSPawel Jakub Dawidek g_raid3_fini(struct g_class *mp) 35809da3072cSPawel Jakub Dawidek { 35819da3072cSPawel Jakub Dawidek 3582f62c1a47SAlexander Motin if (g_raid3_post_sync != NULL) 3583f62c1a47SAlexander Motin EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync); 35849da3072cSPawel Jakub Dawidek } 35859da3072cSPawel Jakub Dawidek 35862d1661a5SPawel Jakub Dawidek DECLARE_GEOM_CLASS(g_raid3_class, g_raid3); 3587