189b17223SAlexander Motin /*- 289b17223SAlexander Motin * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 389b17223SAlexander Motin * All rights reserved. 489b17223SAlexander Motin * 589b17223SAlexander Motin * Redistribution and use in source and binary forms, with or without 689b17223SAlexander Motin * modification, are permitted provided that the following conditions 789b17223SAlexander Motin * are met: 889b17223SAlexander Motin * 1. Redistributions of source code must retain the above copyright 989b17223SAlexander Motin * notice, this list of conditions and the following disclaimer. 1089b17223SAlexander Motin * 2. Redistributions in binary form must reproduce the above copyright 1189b17223SAlexander Motin * notice, this list of conditions and the following disclaimer in the 1289b17223SAlexander Motin * documentation and/or other materials provided with the distribution. 1389b17223SAlexander Motin * 1489b17223SAlexander Motin * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 1589b17223SAlexander Motin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1689b17223SAlexander Motin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1789b17223SAlexander Motin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 1889b17223SAlexander Motin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1989b17223SAlexander Motin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2089b17223SAlexander Motin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2189b17223SAlexander Motin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2289b17223SAlexander Motin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2389b17223SAlexander Motin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2489b17223SAlexander Motin * SUCH DAMAGE. 2589b17223SAlexander Motin */ 2689b17223SAlexander Motin 2789b17223SAlexander Motin #include <sys/cdefs.h> 2889b17223SAlexander Motin __FBSDID("$FreeBSD$"); 2989b17223SAlexander Motin 3089b17223SAlexander Motin #include <sys/param.h> 3189b17223SAlexander Motin #include <sys/systm.h> 3289b17223SAlexander Motin #include <sys/kernel.h> 3389b17223SAlexander Motin #include <sys/module.h> 3489b17223SAlexander Motin #include <sys/limits.h> 3589b17223SAlexander Motin #include <sys/lock.h> 3689b17223SAlexander Motin #include <sys/mutex.h> 3789b17223SAlexander Motin #include <sys/bio.h> 385d807a0eSAndrey V. Elsukov #include <sys/sbuf.h> 3989b17223SAlexander Motin #include <sys/sysctl.h> 4089b17223SAlexander Motin #include <sys/malloc.h> 4189b17223SAlexander Motin #include <sys/eventhandler.h> 4289b17223SAlexander Motin #include <vm/uma.h> 4389b17223SAlexander Motin #include <geom/geom.h> 4489b17223SAlexander Motin #include <sys/proc.h> 4589b17223SAlexander Motin #include <sys/kthread.h> 4689b17223SAlexander Motin #include <sys/sched.h> 4789b17223SAlexander Motin #include <geom/raid/g_raid.h> 4889b17223SAlexander Motin #include "g_raid_md_if.h" 4989b17223SAlexander Motin #include "g_raid_tr_if.h" 5089b17223SAlexander Motin 5189b17223SAlexander Motin static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data"); 5289b17223SAlexander Motin 5389b17223SAlexander Motin SYSCTL_DECL(_kern_geom); 5489b17223SAlexander Motin SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff"); 55c89d2fbeSAlexander Motin int g_raid_enable = 1; 56af3b2549SHans Petter Selasky SYSCTL_INT(_kern_geom_raid, OID_AUTO, enable, CTLFLAG_RWTUN, 57c89d2fbeSAlexander Motin &g_raid_enable, 0, "Enable on-disk metadata taste"); 5889b17223SAlexander Motin u_int g_raid_aggressive_spare = 0; 59af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RWTUN, 6089b17223SAlexander Motin &g_raid_aggressive_spare, 0, "Use disks without metadata as spare"); 61fe51d6c1SAlexander Motin u_int g_raid_debug = 0; 62af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid_debug, 0, 6389b17223SAlexander Motin "Debug level"); 6489b17223SAlexander Motin int g_raid_read_err_thresh = 10; 65af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RWTUN, 6689b17223SAlexander Motin &g_raid_read_err_thresh, 0, 6789b17223SAlexander Motin "Number of read errors equated to disk failure"); 6889b17223SAlexander Motin u_int g_raid_start_timeout = 30; 69af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RWTUN, 7089b17223SAlexander Motin &g_raid_start_timeout, 0, 7189b17223SAlexander Motin "Time to wait for all array components"); 7289b17223SAlexander Motin static u_int g_raid_clean_time = 5; 73af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RWTUN, 7489b17223SAlexander Motin &g_raid_clean_time, 0, "Mark volume as clean when idling"); 7589b17223SAlexander Motin static u_int g_raid_disconnect_on_failure = 1; 76af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 7789b17223SAlexander Motin &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 7889b17223SAlexander Motin static u_int g_raid_name_format = 0; 79af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RWTUN, 8089b17223SAlexander Motin &g_raid_name_format, 0, "Providers name format."); 8189b17223SAlexander Motin static u_int g_raid_idle_threshold = 1000000; 82af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RWTUN, 8389b17223SAlexander Motin &g_raid_idle_threshold, 1000000, 8489b17223SAlexander Motin "Time in microseconds to consider a volume idle."); 85bd9fba0cSSean Bruno static u_int ar_legacy_aliases = 1; 86af3b2549SHans Petter Selasky SYSCTL_INT(_kern_geom_raid, OID_AUTO, legacy_aliases, CTLFLAG_RWTUN, 87bd9fba0cSSean Bruno &ar_legacy_aliases, 0, "Create aliases named as the legacy ataraid style."); 88bd9fba0cSSean Bruno 8989b17223SAlexander Motin 9089b17223SAlexander Motin #define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \ 9189b17223SAlexander Motin G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 9289b17223SAlexander Motin rv = msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 9389b17223SAlexander Motin G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 9489b17223SAlexander Motin } while (0) 9589b17223SAlexander Motin 9689b17223SAlexander Motin LIST_HEAD(, g_raid_md_class) g_raid_md_classes = 9789b17223SAlexander Motin LIST_HEAD_INITIALIZER(g_raid_md_classes); 9889b17223SAlexander Motin 9989b17223SAlexander Motin LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes = 10089b17223SAlexander Motin LIST_HEAD_INITIALIZER(g_raid_tr_classes); 10189b17223SAlexander Motin 10289b17223SAlexander Motin LIST_HEAD(, g_raid_volume) g_raid_volumes = 10389b17223SAlexander Motin LIST_HEAD_INITIALIZER(g_raid_volumes); 10489b17223SAlexander Motin 105a479c51bSAlexander Motin static eventhandler_tag g_raid_post_sync = NULL; 10689b17223SAlexander Motin static int g_raid_started = 0; 107a479c51bSAlexander Motin static int g_raid_shutdown = 0; 10889b17223SAlexander Motin 10989b17223SAlexander Motin static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp, 11089b17223SAlexander Motin struct g_geom *gp); 11189b17223SAlexander Motin static g_taste_t g_raid_taste; 11289b17223SAlexander Motin static void g_raid_init(struct g_class *mp); 11389b17223SAlexander Motin static void g_raid_fini(struct g_class *mp); 11489b17223SAlexander Motin 11589b17223SAlexander Motin struct g_class g_raid_class = { 11689b17223SAlexander Motin .name = G_RAID_CLASS_NAME, 11789b17223SAlexander Motin .version = G_VERSION, 11889b17223SAlexander Motin .ctlreq = g_raid_ctl, 11989b17223SAlexander Motin .taste = g_raid_taste, 12089b17223SAlexander Motin .destroy_geom = g_raid_destroy_geom, 12189b17223SAlexander Motin .init = g_raid_init, 12289b17223SAlexander Motin .fini = g_raid_fini 12389b17223SAlexander Motin }; 12489b17223SAlexander Motin 12589b17223SAlexander Motin static void g_raid_destroy_provider(struct g_raid_volume *vol); 12689b17223SAlexander Motin static int g_raid_update_disk(struct g_raid_disk *disk, u_int event); 12789b17223SAlexander Motin static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event); 12889b17223SAlexander Motin static int g_raid_update_volume(struct g_raid_volume *vol, u_int event); 12989b17223SAlexander Motin static int g_raid_update_node(struct g_raid_softc *sc, u_int event); 13089b17223SAlexander Motin static void g_raid_dumpconf(struct sbuf *sb, const char *indent, 13189b17223SAlexander Motin struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 13289b17223SAlexander Motin static void g_raid_start(struct bio *bp); 13389b17223SAlexander Motin static void g_raid_start_request(struct bio *bp); 13489b17223SAlexander Motin static void g_raid_disk_done(struct bio *bp); 13589b17223SAlexander Motin static void g_raid_poll(struct g_raid_softc *sc); 13689b17223SAlexander Motin 13789b17223SAlexander Motin static const char * 13889b17223SAlexander Motin g_raid_node_event2str(int event) 13989b17223SAlexander Motin { 14089b17223SAlexander Motin 14189b17223SAlexander Motin switch (event) { 14289b17223SAlexander Motin case G_RAID_NODE_E_WAKE: 14389b17223SAlexander Motin return ("WAKE"); 14489b17223SAlexander Motin case G_RAID_NODE_E_START: 14589b17223SAlexander Motin return ("START"); 14689b17223SAlexander Motin default: 14789b17223SAlexander Motin return ("INVALID"); 14889b17223SAlexander Motin } 14989b17223SAlexander Motin } 15089b17223SAlexander Motin 15189b17223SAlexander Motin const char * 15289b17223SAlexander Motin g_raid_disk_state2str(int state) 15389b17223SAlexander Motin { 15489b17223SAlexander Motin 15589b17223SAlexander Motin switch (state) { 15689b17223SAlexander Motin case G_RAID_DISK_S_NONE: 15789b17223SAlexander Motin return ("NONE"); 15889b17223SAlexander Motin case G_RAID_DISK_S_OFFLINE: 15989b17223SAlexander Motin return ("OFFLINE"); 16026c538bcSAlexander Motin case G_RAID_DISK_S_DISABLED: 16126c538bcSAlexander Motin return ("DISABLED"); 16289b17223SAlexander Motin case G_RAID_DISK_S_FAILED: 16389b17223SAlexander Motin return ("FAILED"); 16489b17223SAlexander Motin case G_RAID_DISK_S_STALE_FAILED: 16589b17223SAlexander Motin return ("STALE_FAILED"); 16689b17223SAlexander Motin case G_RAID_DISK_S_SPARE: 16789b17223SAlexander Motin return ("SPARE"); 16889b17223SAlexander Motin case G_RAID_DISK_S_STALE: 16989b17223SAlexander Motin return ("STALE"); 17089b17223SAlexander Motin case G_RAID_DISK_S_ACTIVE: 17189b17223SAlexander Motin return ("ACTIVE"); 17289b17223SAlexander Motin default: 17389b17223SAlexander Motin return ("INVALID"); 17489b17223SAlexander Motin } 17589b17223SAlexander Motin } 17689b17223SAlexander Motin 17789b17223SAlexander Motin static const char * 17889b17223SAlexander Motin g_raid_disk_event2str(int event) 17989b17223SAlexander Motin { 18089b17223SAlexander Motin 18189b17223SAlexander Motin switch (event) { 18289b17223SAlexander Motin case G_RAID_DISK_E_DISCONNECTED: 18389b17223SAlexander Motin return ("DISCONNECTED"); 18489b17223SAlexander Motin default: 18589b17223SAlexander Motin return ("INVALID"); 18689b17223SAlexander Motin } 18789b17223SAlexander Motin } 18889b17223SAlexander Motin 18989b17223SAlexander Motin const char * 19089b17223SAlexander Motin g_raid_subdisk_state2str(int state) 19189b17223SAlexander Motin { 19289b17223SAlexander Motin 19389b17223SAlexander Motin switch (state) { 19489b17223SAlexander Motin case G_RAID_SUBDISK_S_NONE: 19589b17223SAlexander Motin return ("NONE"); 19689b17223SAlexander Motin case G_RAID_SUBDISK_S_FAILED: 19789b17223SAlexander Motin return ("FAILED"); 19889b17223SAlexander Motin case G_RAID_SUBDISK_S_NEW: 19989b17223SAlexander Motin return ("NEW"); 20089b17223SAlexander Motin case G_RAID_SUBDISK_S_REBUILD: 20189b17223SAlexander Motin return ("REBUILD"); 20289b17223SAlexander Motin case G_RAID_SUBDISK_S_UNINITIALIZED: 20389b17223SAlexander Motin return ("UNINITIALIZED"); 20489b17223SAlexander Motin case G_RAID_SUBDISK_S_STALE: 20589b17223SAlexander Motin return ("STALE"); 20689b17223SAlexander Motin case G_RAID_SUBDISK_S_RESYNC: 20789b17223SAlexander Motin return ("RESYNC"); 20889b17223SAlexander Motin case G_RAID_SUBDISK_S_ACTIVE: 20989b17223SAlexander Motin return ("ACTIVE"); 21089b17223SAlexander Motin default: 21189b17223SAlexander Motin return ("INVALID"); 21289b17223SAlexander Motin } 21389b17223SAlexander Motin } 21489b17223SAlexander Motin 21589b17223SAlexander Motin static const char * 21689b17223SAlexander Motin g_raid_subdisk_event2str(int event) 21789b17223SAlexander Motin { 21889b17223SAlexander Motin 21989b17223SAlexander Motin switch (event) { 22089b17223SAlexander Motin case G_RAID_SUBDISK_E_NEW: 22189b17223SAlexander Motin return ("NEW"); 222d9d68496SAlexander Motin case G_RAID_SUBDISK_E_FAILED: 223d9d68496SAlexander Motin return ("FAILED"); 22489b17223SAlexander Motin case G_RAID_SUBDISK_E_DISCONNECTED: 22589b17223SAlexander Motin return ("DISCONNECTED"); 22689b17223SAlexander Motin default: 22789b17223SAlexander Motin return ("INVALID"); 22889b17223SAlexander Motin } 22989b17223SAlexander Motin } 23089b17223SAlexander Motin 23189b17223SAlexander Motin const char * 23289b17223SAlexander Motin g_raid_volume_state2str(int state) 23389b17223SAlexander Motin { 23489b17223SAlexander Motin 23589b17223SAlexander Motin switch (state) { 23689b17223SAlexander Motin case G_RAID_VOLUME_S_STARTING: 23789b17223SAlexander Motin return ("STARTING"); 23889b17223SAlexander Motin case G_RAID_VOLUME_S_BROKEN: 23989b17223SAlexander Motin return ("BROKEN"); 24089b17223SAlexander Motin case G_RAID_VOLUME_S_DEGRADED: 24189b17223SAlexander Motin return ("DEGRADED"); 24289b17223SAlexander Motin case G_RAID_VOLUME_S_SUBOPTIMAL: 24389b17223SAlexander Motin return ("SUBOPTIMAL"); 24489b17223SAlexander Motin case G_RAID_VOLUME_S_OPTIMAL: 24589b17223SAlexander Motin return ("OPTIMAL"); 24689b17223SAlexander Motin case G_RAID_VOLUME_S_UNSUPPORTED: 24789b17223SAlexander Motin return ("UNSUPPORTED"); 24889b17223SAlexander Motin case G_RAID_VOLUME_S_STOPPED: 24989b17223SAlexander Motin return ("STOPPED"); 25089b17223SAlexander Motin default: 25189b17223SAlexander Motin return ("INVALID"); 25289b17223SAlexander Motin } 25389b17223SAlexander Motin } 25489b17223SAlexander Motin 25589b17223SAlexander Motin static const char * 25689b17223SAlexander Motin g_raid_volume_event2str(int event) 25789b17223SAlexander Motin { 25889b17223SAlexander Motin 25989b17223SAlexander Motin switch (event) { 26089b17223SAlexander Motin case G_RAID_VOLUME_E_UP: 26189b17223SAlexander Motin return ("UP"); 26289b17223SAlexander Motin case G_RAID_VOLUME_E_DOWN: 26389b17223SAlexander Motin return ("DOWN"); 26489b17223SAlexander Motin case G_RAID_VOLUME_E_START: 26589b17223SAlexander Motin return ("START"); 26689b17223SAlexander Motin case G_RAID_VOLUME_E_STARTMD: 26789b17223SAlexander Motin return ("STARTMD"); 26889b17223SAlexander Motin default: 26989b17223SAlexander Motin return ("INVALID"); 27089b17223SAlexander Motin } 27189b17223SAlexander Motin } 27289b17223SAlexander Motin 27389b17223SAlexander Motin const char * 27489b17223SAlexander Motin g_raid_volume_level2str(int level, int qual) 27589b17223SAlexander Motin { 27689b17223SAlexander Motin 27789b17223SAlexander Motin switch (level) { 27889b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID0: 27989b17223SAlexander Motin return ("RAID0"); 28089b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID1: 28189b17223SAlexander Motin return ("RAID1"); 28289b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID3: 283dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R3P0) 284dbb2e755SAlexander Motin return ("RAID3-P0"); 285dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R3PN) 286dbb2e755SAlexander Motin return ("RAID3-PN"); 28789b17223SAlexander Motin return ("RAID3"); 28889b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID4: 289dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R4P0) 290bafd0b5bSAlexander Motin return ("RAID4-P0"); 291dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R4PN) 292bafd0b5bSAlexander Motin return ("RAID4-PN"); 29389b17223SAlexander Motin return ("RAID4"); 29489b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID5: 295fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RA) 296dbb2e755SAlexander Motin return ("RAID5-RA"); 297fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RS) 298dbb2e755SAlexander Motin return ("RAID5-RS"); 299fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5LA) 300dbb2e755SAlexander Motin return ("RAID5-LA"); 301fc1de960SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5LS) 302dbb2e755SAlexander Motin return ("RAID5-LS"); 30389b17223SAlexander Motin return ("RAID5"); 30489b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID6: 305dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6RA) 306dbb2e755SAlexander Motin return ("RAID6-RA"); 307dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6RS) 308dbb2e755SAlexander Motin return ("RAID6-RS"); 309dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6LA) 310dbb2e755SAlexander Motin return ("RAID6-LA"); 311dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R6LS) 312dbb2e755SAlexander Motin return ("RAID6-LS"); 31389b17223SAlexander Motin return ("RAID6"); 314dbb2e755SAlexander Motin case G_RAID_VOLUME_RL_RAIDMDF: 315dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFRA) 316dbb2e755SAlexander Motin return ("RAIDMDF-RA"); 317dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFRS) 318dbb2e755SAlexander Motin return ("RAIDMDF-RS"); 319dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFLA) 320dbb2e755SAlexander Motin return ("RAIDMDF-LA"); 321dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_RMDFLS) 322dbb2e755SAlexander Motin return ("RAIDMDF-LS"); 323dbb2e755SAlexander Motin return ("RAIDMDF"); 32489b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID1E: 325dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R1EA) 326dbb2e755SAlexander Motin return ("RAID1E-A"); 327dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R1EO) 328dbb2e755SAlexander Motin return ("RAID1E-O"); 32989b17223SAlexander Motin return ("RAID1E"); 33089b17223SAlexander Motin case G_RAID_VOLUME_RL_SINGLE: 33189b17223SAlexander Motin return ("SINGLE"); 33289b17223SAlexander Motin case G_RAID_VOLUME_RL_CONCAT: 33389b17223SAlexander Motin return ("CONCAT"); 33489b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID5E: 335dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ERA) 336dbb2e755SAlexander Motin return ("RAID5E-RA"); 337dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ERS) 338dbb2e755SAlexander Motin return ("RAID5E-RS"); 339dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ELA) 340dbb2e755SAlexander Motin return ("RAID5E-LA"); 341dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5ELS) 342dbb2e755SAlexander Motin return ("RAID5E-LS"); 34389b17223SAlexander Motin return ("RAID5E"); 34489b17223SAlexander Motin case G_RAID_VOLUME_RL_RAID5EE: 345dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EERA) 346dbb2e755SAlexander Motin return ("RAID5EE-RA"); 347dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EERS) 348dbb2e755SAlexander Motin return ("RAID5EE-RS"); 349dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EELA) 350dbb2e755SAlexander Motin return ("RAID5EE-LA"); 351dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5EELS) 352dbb2e755SAlexander Motin return ("RAID5EE-LS"); 35389b17223SAlexander Motin return ("RAID5EE"); 354dbb2e755SAlexander Motin case G_RAID_VOLUME_RL_RAID5R: 355dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RRA) 356dbb2e755SAlexander Motin return ("RAID5R-RA"); 357dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RRS) 358dbb2e755SAlexander Motin return ("RAID5R-RS"); 359dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RLA) 360dbb2e755SAlexander Motin return ("RAID5R-LA"); 361dbb2e755SAlexander Motin if (qual == G_RAID_VOLUME_RLQ_R5RLS) 362dbb2e755SAlexander Motin return ("RAID5R-LS"); 363dbb2e755SAlexander Motin return ("RAID5E"); 36489b17223SAlexander Motin default: 36589b17223SAlexander Motin return ("UNKNOWN"); 36689b17223SAlexander Motin } 36789b17223SAlexander Motin } 36889b17223SAlexander Motin 36989b17223SAlexander Motin int 37089b17223SAlexander Motin g_raid_volume_str2level(const char *str, int *level, int *qual) 37189b17223SAlexander Motin { 37289b17223SAlexander Motin 37389b17223SAlexander Motin *level = G_RAID_VOLUME_RL_UNKNOWN; 37489b17223SAlexander Motin *qual = G_RAID_VOLUME_RLQ_NONE; 37589b17223SAlexander Motin if (strcasecmp(str, "RAID0") == 0) 37689b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID0; 37789b17223SAlexander Motin else if (strcasecmp(str, "RAID1") == 0) 37889b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID1; 379dbb2e755SAlexander Motin else if (strcasecmp(str, "RAID3-P0") == 0) { 38089b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID3; 381dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R3P0; 3824b97ff61SAlexander Motin } else if (strcasecmp(str, "RAID3-PN") == 0 || 383dbb2e755SAlexander Motin strcasecmp(str, "RAID3") == 0) { 384dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID3; 3854b97ff61SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R3PN; 386dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID4-P0") == 0) { 38789b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID4; 388dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R4P0; 3894b97ff61SAlexander Motin } else if (strcasecmp(str, "RAID4-PN") == 0 || 390dbb2e755SAlexander Motin strcasecmp(str, "RAID4") == 0) { 391dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID4; 3924b97ff61SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R4PN; 393dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5-RA") == 0) { 39489b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 395fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RA; 396dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5-RS") == 0) { 397fc1de960SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 398fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RS; 399fc1de960SAlexander Motin } else if (strcasecmp(str, "RAID5") == 0 || 400dbb2e755SAlexander Motin strcasecmp(str, "RAID5-LA") == 0) { 401fc1de960SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 402fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5LA; 403dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5-LS") == 0) { 404fc1de960SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5; 405fc1de960SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5LS; 406dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6-RA") == 0) { 40789b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 408dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6RA; 409dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6-RS") == 0) { 410dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 411dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6RS; 412dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6") == 0 || 413dbb2e755SAlexander Motin strcasecmp(str, "RAID6-LA") == 0) { 414dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 415dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6LA; 416dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID6-LS") == 0) { 417dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID6; 418dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R6LS; 419dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF-RA") == 0) { 420dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 421dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFRA; 422dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF-RS") == 0) { 423dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 424dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFRS; 425dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF") == 0 || 426dbb2e755SAlexander Motin strcasecmp(str, "RAIDMDF-LA") == 0) { 427dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 428dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFLA; 429dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAIDMDF-LS") == 0) { 430dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAIDMDF; 431dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_RMDFLS; 432dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID10") == 0 || 433dbb2e755SAlexander Motin strcasecmp(str, "RAID1E") == 0 || 434dbb2e755SAlexander Motin strcasecmp(str, "RAID1E-A") == 0) { 43589b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID1E; 436dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R1EA; 437dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID1E-O") == 0) { 438dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID1E; 439dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R1EO; 440dbb2e755SAlexander Motin } else if (strcasecmp(str, "SINGLE") == 0) 44189b17223SAlexander Motin *level = G_RAID_VOLUME_RL_SINGLE; 44289b17223SAlexander Motin else if (strcasecmp(str, "CONCAT") == 0) 44389b17223SAlexander Motin *level = G_RAID_VOLUME_RL_CONCAT; 444dbb2e755SAlexander Motin else if (strcasecmp(str, "RAID5E-RA") == 0) { 44589b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 446dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ERA; 447dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5E-RS") == 0) { 448dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 449dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ERS; 450dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5E") == 0 || 451dbb2e755SAlexander Motin strcasecmp(str, "RAID5E-LA") == 0) { 452dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 453dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ELA; 454dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5E-LS") == 0) { 455dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5E; 456dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5ELS; 457dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE-RA") == 0) { 45889b17223SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 459dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EERA; 460dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE-RS") == 0) { 461dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 462dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EERS; 463dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE") == 0 || 464dbb2e755SAlexander Motin strcasecmp(str, "RAID5EE-LA") == 0) { 465dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 466dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EELA; 467dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5EE-LS") == 0) { 468dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5EE; 469dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5EELS; 470dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R-RA") == 0) { 471dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 472dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RRA; 473dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R-RS") == 0) { 474dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 475dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RRS; 476dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R") == 0 || 477dbb2e755SAlexander Motin strcasecmp(str, "RAID5R-LA") == 0) { 478dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 479dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RLA; 480dbb2e755SAlexander Motin } else if (strcasecmp(str, "RAID5R-LS") == 0) { 481dbb2e755SAlexander Motin *level = G_RAID_VOLUME_RL_RAID5R; 482dbb2e755SAlexander Motin *qual = G_RAID_VOLUME_RLQ_R5RLS; 483dbb2e755SAlexander Motin } else 48489b17223SAlexander Motin return (-1); 48589b17223SAlexander Motin return (0); 48689b17223SAlexander Motin } 48789b17223SAlexander Motin 48889b17223SAlexander Motin const char * 48989b17223SAlexander Motin g_raid_get_diskname(struct g_raid_disk *disk) 49089b17223SAlexander Motin { 49189b17223SAlexander Motin 49289b17223SAlexander Motin if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 49389b17223SAlexander Motin return ("[unknown]"); 49489b17223SAlexander Motin return (disk->d_consumer->provider->name); 49589b17223SAlexander Motin } 49689b17223SAlexander Motin 49789b17223SAlexander Motin void 498609a7474SAlexander Motin g_raid_get_disk_info(struct g_raid_disk *disk) 499609a7474SAlexander Motin { 500609a7474SAlexander Motin struct g_consumer *cp = disk->d_consumer; 501609a7474SAlexander Motin int error, len; 502609a7474SAlexander Motin 503609a7474SAlexander Motin /* Read kernel dumping information. */ 504609a7474SAlexander Motin disk->d_kd.offset = 0; 505609a7474SAlexander Motin disk->d_kd.length = OFF_MAX; 506609a7474SAlexander Motin len = sizeof(disk->d_kd); 507609a7474SAlexander Motin error = g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd); 508609a7474SAlexander Motin if (error) 509609a7474SAlexander Motin disk->d_kd.di.dumper = NULL; 510609a7474SAlexander Motin if (disk->d_kd.di.dumper == NULL) 511609a7474SAlexander Motin G_RAID_DEBUG1(2, disk->d_softc, 512609a7474SAlexander Motin "Dumping not supported by %s: %d.", 513609a7474SAlexander Motin cp->provider->name, error); 514609a7474SAlexander Motin 515609a7474SAlexander Motin /* Read BIO_DELETE support. */ 516609a7474SAlexander Motin error = g_getattr("GEOM::candelete", cp, &disk->d_candelete); 517609a7474SAlexander Motin if (error) 518609a7474SAlexander Motin disk->d_candelete = 0; 519609a7474SAlexander Motin if (!disk->d_candelete) 520609a7474SAlexander Motin G_RAID_DEBUG1(2, disk->d_softc, 521609a7474SAlexander Motin "BIO_DELETE not supported by %s: %d.", 522609a7474SAlexander Motin cp->provider->name, error); 523609a7474SAlexander Motin } 524609a7474SAlexander Motin 525609a7474SAlexander Motin void 52689b17223SAlexander Motin g_raid_report_disk_state(struct g_raid_disk *disk) 52789b17223SAlexander Motin { 52889b17223SAlexander Motin struct g_raid_subdisk *sd; 52989b17223SAlexander Motin int len, state; 53089b17223SAlexander Motin uint32_t s; 53189b17223SAlexander Motin 53289b17223SAlexander Motin if (disk->d_consumer == NULL) 53389b17223SAlexander Motin return; 53426c538bcSAlexander Motin if (disk->d_state == G_RAID_DISK_S_DISABLED) { 535b99586c2SAlexander Motin s = G_STATE_ACTIVE; /* XXX */ 53626c538bcSAlexander Motin } else if (disk->d_state == G_RAID_DISK_S_FAILED || 53789b17223SAlexander Motin disk->d_state == G_RAID_DISK_S_STALE_FAILED) { 53889b17223SAlexander Motin s = G_STATE_FAILED; 53989b17223SAlexander Motin } else { 54089b17223SAlexander Motin state = G_RAID_SUBDISK_S_ACTIVE; 54189b17223SAlexander Motin TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { 54289b17223SAlexander Motin if (sd->sd_state < state) 54389b17223SAlexander Motin state = sd->sd_state; 54489b17223SAlexander Motin } 54589b17223SAlexander Motin if (state == G_RAID_SUBDISK_S_FAILED) 54689b17223SAlexander Motin s = G_STATE_FAILED; 54789b17223SAlexander Motin else if (state == G_RAID_SUBDISK_S_NEW || 54889b17223SAlexander Motin state == G_RAID_SUBDISK_S_REBUILD) 54989b17223SAlexander Motin s = G_STATE_REBUILD; 55089b17223SAlexander Motin else if (state == G_RAID_SUBDISK_S_STALE || 55189b17223SAlexander Motin state == G_RAID_SUBDISK_S_RESYNC) 55289b17223SAlexander Motin s = G_STATE_RESYNC; 55389b17223SAlexander Motin else 55489b17223SAlexander Motin s = G_STATE_ACTIVE; 55589b17223SAlexander Motin } 55689b17223SAlexander Motin len = sizeof(s); 55789b17223SAlexander Motin g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s); 55889b17223SAlexander Motin G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.", 55989b17223SAlexander Motin g_raid_get_diskname(disk), s); 56089b17223SAlexander Motin } 56189b17223SAlexander Motin 56289b17223SAlexander Motin void 56389b17223SAlexander Motin g_raid_change_disk_state(struct g_raid_disk *disk, int state) 56489b17223SAlexander Motin { 56589b17223SAlexander Motin 56689b17223SAlexander Motin G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.", 56789b17223SAlexander Motin g_raid_get_diskname(disk), 56889b17223SAlexander Motin g_raid_disk_state2str(disk->d_state), 56989b17223SAlexander Motin g_raid_disk_state2str(state)); 57089b17223SAlexander Motin disk->d_state = state; 57189b17223SAlexander Motin g_raid_report_disk_state(disk); 57289b17223SAlexander Motin } 57389b17223SAlexander Motin 57489b17223SAlexander Motin void 57589b17223SAlexander Motin g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state) 57689b17223SAlexander Motin { 57789b17223SAlexander Motin 57889b17223SAlexander Motin G_RAID_DEBUG1(0, sd->sd_softc, 57989b17223SAlexander Motin "Subdisk %s:%d-%s state changed from %s to %s.", 58089b17223SAlexander Motin sd->sd_volume->v_name, sd->sd_pos, 58189b17223SAlexander Motin sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]", 58289b17223SAlexander Motin g_raid_subdisk_state2str(sd->sd_state), 58389b17223SAlexander Motin g_raid_subdisk_state2str(state)); 58489b17223SAlexander Motin sd->sd_state = state; 58589b17223SAlexander Motin if (sd->sd_disk) 58689b17223SAlexander Motin g_raid_report_disk_state(sd->sd_disk); 58789b17223SAlexander Motin } 58889b17223SAlexander Motin 58989b17223SAlexander Motin void 59089b17223SAlexander Motin g_raid_change_volume_state(struct g_raid_volume *vol, int state) 59189b17223SAlexander Motin { 59289b17223SAlexander Motin 59389b17223SAlexander Motin G_RAID_DEBUG1(0, vol->v_softc, 59489b17223SAlexander Motin "Volume %s state changed from %s to %s.", 59589b17223SAlexander Motin vol->v_name, 59689b17223SAlexander Motin g_raid_volume_state2str(vol->v_state), 59789b17223SAlexander Motin g_raid_volume_state2str(state)); 59889b17223SAlexander Motin vol->v_state = state; 59989b17223SAlexander Motin } 60089b17223SAlexander Motin 60189b17223SAlexander Motin /* 60289b17223SAlexander Motin * --- Events handling functions --- 60389b17223SAlexander Motin * Events in geom_raid are used to maintain subdisks and volumes status 60489b17223SAlexander Motin * from one thread to simplify locking. 60589b17223SAlexander Motin */ 60689b17223SAlexander Motin static void 60789b17223SAlexander Motin g_raid_event_free(struct g_raid_event *ep) 60889b17223SAlexander Motin { 60989b17223SAlexander Motin 61089b17223SAlexander Motin free(ep, M_RAID); 61189b17223SAlexander Motin } 61289b17223SAlexander Motin 61389b17223SAlexander Motin int 61489b17223SAlexander Motin g_raid_event_send(void *arg, int event, int flags) 61589b17223SAlexander Motin { 61689b17223SAlexander Motin struct g_raid_softc *sc; 61789b17223SAlexander Motin struct g_raid_event *ep; 61889b17223SAlexander Motin int error; 61989b17223SAlexander Motin 62089b17223SAlexander Motin if ((flags & G_RAID_EVENT_VOLUME) != 0) { 62189b17223SAlexander Motin sc = ((struct g_raid_volume *)arg)->v_softc; 62289b17223SAlexander Motin } else if ((flags & G_RAID_EVENT_DISK) != 0) { 62389b17223SAlexander Motin sc = ((struct g_raid_disk *)arg)->d_softc; 62489b17223SAlexander Motin } else if ((flags & G_RAID_EVENT_SUBDISK) != 0) { 62589b17223SAlexander Motin sc = ((struct g_raid_subdisk *)arg)->sd_softc; 62689b17223SAlexander Motin } else { 62789b17223SAlexander Motin sc = arg; 62889b17223SAlexander Motin } 62989b17223SAlexander Motin ep = malloc(sizeof(*ep), M_RAID, 63089b17223SAlexander Motin sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT); 63189b17223SAlexander Motin if (ep == NULL) 63289b17223SAlexander Motin return (ENOMEM); 63389b17223SAlexander Motin ep->e_tgt = arg; 63489b17223SAlexander Motin ep->e_event = event; 63589b17223SAlexander Motin ep->e_flags = flags; 63689b17223SAlexander Motin ep->e_error = 0; 63789b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc); 63889b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 63989b17223SAlexander Motin TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 64089b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 64189b17223SAlexander Motin wakeup(sc); 64289b17223SAlexander Motin 64389b17223SAlexander Motin if ((flags & G_RAID_EVENT_WAIT) == 0) 64489b17223SAlexander Motin return (0); 64589b17223SAlexander Motin 64689b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 64789b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep); 64889b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 64989b17223SAlexander Motin while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) { 65089b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 65189b17223SAlexander Motin MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event", 65289b17223SAlexander Motin hz * 5); 65389b17223SAlexander Motin } 65489b17223SAlexander Motin error = ep->e_error; 65589b17223SAlexander Motin g_raid_event_free(ep); 65689b17223SAlexander Motin sx_xlock(&sc->sc_lock); 65789b17223SAlexander Motin return (error); 65889b17223SAlexander Motin } 65989b17223SAlexander Motin 66089b17223SAlexander Motin static void 66189b17223SAlexander Motin g_raid_event_cancel(struct g_raid_softc *sc, void *tgt) 66289b17223SAlexander Motin { 66389b17223SAlexander Motin struct g_raid_event *ep, *tmpep; 66489b17223SAlexander Motin 66589b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 66689b17223SAlexander Motin 66789b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 66889b17223SAlexander Motin TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 66989b17223SAlexander Motin if (ep->e_tgt != tgt) 67089b17223SAlexander Motin continue; 67189b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_events, ep, e_next); 67289b17223SAlexander Motin if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) 67389b17223SAlexander Motin g_raid_event_free(ep); 67489b17223SAlexander Motin else { 67589b17223SAlexander Motin ep->e_error = ECANCELED; 67689b17223SAlexander Motin wakeup(ep); 67789b17223SAlexander Motin } 67889b17223SAlexander Motin } 67989b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 68089b17223SAlexander Motin } 68189b17223SAlexander Motin 68289b17223SAlexander Motin static int 68389b17223SAlexander Motin g_raid_event_check(struct g_raid_softc *sc, void *tgt) 68489b17223SAlexander Motin { 68589b17223SAlexander Motin struct g_raid_event *ep; 68689b17223SAlexander Motin int res = 0; 68789b17223SAlexander Motin 68889b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 68989b17223SAlexander Motin 69089b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 69189b17223SAlexander Motin TAILQ_FOREACH(ep, &sc->sc_events, e_next) { 69289b17223SAlexander Motin if (ep->e_tgt != tgt) 69389b17223SAlexander Motin continue; 69489b17223SAlexander Motin res = 1; 69589b17223SAlexander Motin break; 69689b17223SAlexander Motin } 69789b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 69889b17223SAlexander Motin return (res); 69989b17223SAlexander Motin } 70089b17223SAlexander Motin 70189b17223SAlexander Motin /* 70289b17223SAlexander Motin * Return the number of disks in given state. 70389b17223SAlexander Motin * If state is equal to -1, count all connected disks. 70489b17223SAlexander Motin */ 70589b17223SAlexander Motin u_int 70689b17223SAlexander Motin g_raid_ndisks(struct g_raid_softc *sc, int state) 70789b17223SAlexander Motin { 70889b17223SAlexander Motin struct g_raid_disk *disk; 70989b17223SAlexander Motin u_int n; 71089b17223SAlexander Motin 71189b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 71289b17223SAlexander Motin 71389b17223SAlexander Motin n = 0; 71489b17223SAlexander Motin TAILQ_FOREACH(disk, &sc->sc_disks, d_next) { 71589b17223SAlexander Motin if (disk->d_state == state || state == -1) 71689b17223SAlexander Motin n++; 71789b17223SAlexander Motin } 71889b17223SAlexander Motin return (n); 71989b17223SAlexander Motin } 72089b17223SAlexander Motin 72189b17223SAlexander Motin /* 72289b17223SAlexander Motin * Return the number of subdisks in given state. 72389b17223SAlexander Motin * If state is equal to -1, count all connected disks. 72489b17223SAlexander Motin */ 72589b17223SAlexander Motin u_int 72689b17223SAlexander Motin g_raid_nsubdisks(struct g_raid_volume *vol, int state) 72789b17223SAlexander Motin { 72889b17223SAlexander Motin struct g_raid_subdisk *subdisk; 72989b17223SAlexander Motin struct g_raid_softc *sc; 73089b17223SAlexander Motin u_int i, n ; 73189b17223SAlexander Motin 73289b17223SAlexander Motin sc = vol->v_softc; 73389b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 73489b17223SAlexander Motin 73589b17223SAlexander Motin n = 0; 73689b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 73789b17223SAlexander Motin subdisk = &vol->v_subdisks[i]; 73889b17223SAlexander Motin if ((state == -1 && 73989b17223SAlexander Motin subdisk->sd_state != G_RAID_SUBDISK_S_NONE) || 74089b17223SAlexander Motin subdisk->sd_state == state) 74189b17223SAlexander Motin n++; 74289b17223SAlexander Motin } 74389b17223SAlexander Motin return (n); 74489b17223SAlexander Motin } 74589b17223SAlexander Motin 74689b17223SAlexander Motin /* 74789b17223SAlexander Motin * Return the first subdisk in given state. 74889b17223SAlexander Motin * If state is equal to -1, then the first connected disks. 74989b17223SAlexander Motin */ 75089b17223SAlexander Motin struct g_raid_subdisk * 75189b17223SAlexander Motin g_raid_get_subdisk(struct g_raid_volume *vol, int state) 75289b17223SAlexander Motin { 75389b17223SAlexander Motin struct g_raid_subdisk *sd; 75489b17223SAlexander Motin struct g_raid_softc *sc; 75589b17223SAlexander Motin u_int i; 75689b17223SAlexander Motin 75789b17223SAlexander Motin sc = vol->v_softc; 75889b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 75989b17223SAlexander Motin 76089b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 76189b17223SAlexander Motin sd = &vol->v_subdisks[i]; 76289b17223SAlexander Motin if ((state == -1 && 76389b17223SAlexander Motin sd->sd_state != G_RAID_SUBDISK_S_NONE) || 76489b17223SAlexander Motin sd->sd_state == state) 76589b17223SAlexander Motin return (sd); 76689b17223SAlexander Motin } 76789b17223SAlexander Motin return (NULL); 76889b17223SAlexander Motin } 76989b17223SAlexander Motin 77089b17223SAlexander Motin struct g_consumer * 77189b17223SAlexander Motin g_raid_open_consumer(struct g_raid_softc *sc, const char *name) 77289b17223SAlexander Motin { 77389b17223SAlexander Motin struct g_consumer *cp; 77489b17223SAlexander Motin struct g_provider *pp; 77589b17223SAlexander Motin 77689b17223SAlexander Motin g_topology_assert(); 77789b17223SAlexander Motin 77889b17223SAlexander Motin if (strncmp(name, "/dev/", 5) == 0) 77989b17223SAlexander Motin name += 5; 78089b17223SAlexander Motin pp = g_provider_by_name(name); 78189b17223SAlexander Motin if (pp == NULL) 78289b17223SAlexander Motin return (NULL); 78389b17223SAlexander Motin cp = g_new_consumer(sc->sc_geom); 78440ea77a0SAlexander Motin cp->flags |= G_CF_DIRECT_RECEIVE; 78589b17223SAlexander Motin if (g_attach(cp, pp) != 0) { 78689b17223SAlexander Motin g_destroy_consumer(cp); 78789b17223SAlexander Motin return (NULL); 78889b17223SAlexander Motin } 78989b17223SAlexander Motin if (g_access(cp, 1, 1, 1) != 0) { 79089b17223SAlexander Motin g_detach(cp); 79189b17223SAlexander Motin g_destroy_consumer(cp); 79289b17223SAlexander Motin return (NULL); 79389b17223SAlexander Motin } 79489b17223SAlexander Motin return (cp); 79589b17223SAlexander Motin } 79689b17223SAlexander Motin 79789b17223SAlexander Motin static u_int 79889b17223SAlexander Motin g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp) 79989b17223SAlexander Motin { 80089b17223SAlexander Motin struct bio *bp; 80189b17223SAlexander Motin u_int nreqs = 0; 80289b17223SAlexander Motin 80389b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 80489b17223SAlexander Motin TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 80589b17223SAlexander Motin if (bp->bio_from == cp) 80689b17223SAlexander Motin nreqs++; 80789b17223SAlexander Motin } 80889b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 80989b17223SAlexander Motin return (nreqs); 81089b17223SAlexander Motin } 81189b17223SAlexander Motin 81289b17223SAlexander Motin u_int 81389b17223SAlexander Motin g_raid_nopens(struct g_raid_softc *sc) 81489b17223SAlexander Motin { 81589b17223SAlexander Motin struct g_raid_volume *vol; 81689b17223SAlexander Motin u_int opens; 81789b17223SAlexander Motin 81889b17223SAlexander Motin opens = 0; 81989b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 82089b17223SAlexander Motin if (vol->v_provider_open != 0) 82189b17223SAlexander Motin opens++; 82289b17223SAlexander Motin } 82389b17223SAlexander Motin return (opens); 82489b17223SAlexander Motin } 82589b17223SAlexander Motin 82689b17223SAlexander Motin static int 82789b17223SAlexander Motin g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp) 82889b17223SAlexander Motin { 82989b17223SAlexander Motin 83089b17223SAlexander Motin if (cp->index > 0) { 83189b17223SAlexander Motin G_RAID_DEBUG1(2, sc, 83289b17223SAlexander Motin "I/O requests for %s exist, can't destroy it now.", 83389b17223SAlexander Motin cp->provider->name); 83489b17223SAlexander Motin return (1); 83589b17223SAlexander Motin } 83689b17223SAlexander Motin if (g_raid_nrequests(sc, cp) > 0) { 83789b17223SAlexander Motin G_RAID_DEBUG1(2, sc, 83889b17223SAlexander Motin "I/O requests for %s in queue, can't destroy it now.", 83989b17223SAlexander Motin cp->provider->name); 84089b17223SAlexander Motin return (1); 84189b17223SAlexander Motin } 84289b17223SAlexander Motin return (0); 84389b17223SAlexander Motin } 84489b17223SAlexander Motin 84589b17223SAlexander Motin static void 84689b17223SAlexander Motin g_raid_destroy_consumer(void *arg, int flags __unused) 84789b17223SAlexander Motin { 84889b17223SAlexander Motin struct g_consumer *cp; 84989b17223SAlexander Motin 85089b17223SAlexander Motin g_topology_assert(); 85189b17223SAlexander Motin 85289b17223SAlexander Motin cp = arg; 85389b17223SAlexander Motin G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 85489b17223SAlexander Motin g_detach(cp); 85589b17223SAlexander Motin g_destroy_consumer(cp); 85689b17223SAlexander Motin } 85789b17223SAlexander Motin 85889b17223SAlexander Motin void 85989b17223SAlexander Motin g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp) 86089b17223SAlexander Motin { 86189b17223SAlexander Motin struct g_provider *pp; 86289b17223SAlexander Motin int retaste_wait; 86389b17223SAlexander Motin 86489b17223SAlexander Motin g_topology_assert_not(); 86589b17223SAlexander Motin 86689b17223SAlexander Motin g_topology_lock(); 86789b17223SAlexander Motin cp->private = NULL; 86889b17223SAlexander Motin if (g_raid_consumer_is_busy(sc, cp)) 86989b17223SAlexander Motin goto out; 87089b17223SAlexander Motin pp = cp->provider; 87189b17223SAlexander Motin retaste_wait = 0; 87289b17223SAlexander Motin if (cp->acw == 1) { 87389b17223SAlexander Motin if ((pp->geom->flags & G_GEOM_WITHER) == 0) 87489b17223SAlexander Motin retaste_wait = 1; 87589b17223SAlexander Motin } 87689b17223SAlexander Motin if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 87789b17223SAlexander Motin g_access(cp, -cp->acr, -cp->acw, -cp->ace); 87889b17223SAlexander Motin if (retaste_wait) { 87989b17223SAlexander Motin /* 88089b17223SAlexander Motin * After retaste event was send (inside g_access()), we can send 88189b17223SAlexander Motin * event to detach and destroy consumer. 88289b17223SAlexander Motin * A class, which has consumer to the given provider connected 88389b17223SAlexander Motin * will not receive retaste event for the provider. 88489b17223SAlexander Motin * This is the way how I ignore retaste events when I close 88589b17223SAlexander Motin * consumers opened for write: I detach and destroy consumer 88689b17223SAlexander Motin * after retaste event is sent. 88789b17223SAlexander Motin */ 88889b17223SAlexander Motin g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL); 88989b17223SAlexander Motin goto out; 89089b17223SAlexander Motin } 89189b17223SAlexander Motin G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name); 89289b17223SAlexander Motin g_detach(cp); 89389b17223SAlexander Motin g_destroy_consumer(cp); 89489b17223SAlexander Motin out: 89589b17223SAlexander Motin g_topology_unlock(); 89689b17223SAlexander Motin } 89789b17223SAlexander Motin 89889b17223SAlexander Motin static void 89989b17223SAlexander Motin g_raid_orphan(struct g_consumer *cp) 90089b17223SAlexander Motin { 90189b17223SAlexander Motin struct g_raid_disk *disk; 90289b17223SAlexander Motin 90389b17223SAlexander Motin g_topology_assert(); 90489b17223SAlexander Motin 90589b17223SAlexander Motin disk = cp->private; 90689b17223SAlexander Motin if (disk == NULL) 90789b17223SAlexander Motin return; 90889b17223SAlexander Motin g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED, 90989b17223SAlexander Motin G_RAID_EVENT_DISK); 91089b17223SAlexander Motin } 91189b17223SAlexander Motin 912a479c51bSAlexander Motin static void 91389b17223SAlexander Motin g_raid_clean(struct g_raid_volume *vol, int acw) 91489b17223SAlexander Motin { 91589b17223SAlexander Motin struct g_raid_softc *sc; 91689b17223SAlexander Motin int timeout; 91789b17223SAlexander Motin 91889b17223SAlexander Motin sc = vol->v_softc; 91989b17223SAlexander Motin g_topology_assert_not(); 92089b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 92189b17223SAlexander Motin 92289b17223SAlexander Motin // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0) 923a479c51bSAlexander Motin // return; 92489b17223SAlexander Motin if (!vol->v_dirty) 925a479c51bSAlexander Motin return; 92689b17223SAlexander Motin if (vol->v_writes > 0) 927a479c51bSAlexander Motin return; 92889b17223SAlexander Motin if (acw > 0 || (acw == -1 && 92989b17223SAlexander Motin vol->v_provider != NULL && vol->v_provider->acw > 0)) { 93089b17223SAlexander Motin timeout = g_raid_clean_time - (time_uptime - vol->v_last_write); 931a479c51bSAlexander Motin if (!g_raid_shutdown && timeout > 0) 932a479c51bSAlexander Motin return; 93389b17223SAlexander Motin } 93489b17223SAlexander Motin vol->v_dirty = 0; 93589b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.", 93689b17223SAlexander Motin vol->v_name); 93789b17223SAlexander Motin g_raid_write_metadata(sc, vol, NULL, NULL); 93889b17223SAlexander Motin } 93989b17223SAlexander Motin 94089b17223SAlexander Motin static void 94189b17223SAlexander Motin g_raid_dirty(struct g_raid_volume *vol) 94289b17223SAlexander Motin { 94389b17223SAlexander Motin struct g_raid_softc *sc; 94489b17223SAlexander Motin 94589b17223SAlexander Motin sc = vol->v_softc; 94689b17223SAlexander Motin g_topology_assert_not(); 94789b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 94889b17223SAlexander Motin 94989b17223SAlexander Motin // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0) 95089b17223SAlexander Motin // return; 95189b17223SAlexander Motin vol->v_dirty = 1; 95289b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.", 95389b17223SAlexander Motin vol->v_name); 95489b17223SAlexander Motin g_raid_write_metadata(sc, vol, NULL, NULL); 95589b17223SAlexander Motin } 95689b17223SAlexander Motin 95789b17223SAlexander Motin void 95889b17223SAlexander Motin g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp) 95989b17223SAlexander Motin { 96089b17223SAlexander Motin struct g_raid_softc *sc; 96189b17223SAlexander Motin struct g_raid_volume *vol; 96289b17223SAlexander Motin struct g_raid_subdisk *sd; 96389b17223SAlexander Motin struct bio_queue_head queue; 96489b17223SAlexander Motin struct bio *cbp; 96589b17223SAlexander Motin int i; 96689b17223SAlexander Motin 96789b17223SAlexander Motin vol = tr->tro_volume; 96889b17223SAlexander Motin sc = vol->v_softc; 96989b17223SAlexander Motin 97089b17223SAlexander Motin /* 97189b17223SAlexander Motin * Allocate all bios before sending any request, so we can return 97289b17223SAlexander Motin * ENOMEM in nice and clean way. 97389b17223SAlexander Motin */ 97489b17223SAlexander Motin bioq_init(&queue); 97589b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 97689b17223SAlexander Motin sd = &vol->v_subdisks[i]; 97789b17223SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_NONE || 97889b17223SAlexander Motin sd->sd_state == G_RAID_SUBDISK_S_FAILED) 97989b17223SAlexander Motin continue; 98089b17223SAlexander Motin cbp = g_clone_bio(bp); 98189b17223SAlexander Motin if (cbp == NULL) 98289b17223SAlexander Motin goto failure; 98389b17223SAlexander Motin cbp->bio_caller1 = sd; 98489b17223SAlexander Motin bioq_insert_tail(&queue, cbp); 98589b17223SAlexander Motin } 986b43560abSAlexander Motin while ((cbp = bioq_takefirst(&queue)) != NULL) { 98789b17223SAlexander Motin sd = cbp->bio_caller1; 98889b17223SAlexander Motin cbp->bio_caller1 = NULL; 98989b17223SAlexander Motin g_raid_subdisk_iostart(sd, cbp); 99089b17223SAlexander Motin } 99189b17223SAlexander Motin return; 99289b17223SAlexander Motin failure: 993b43560abSAlexander Motin while ((cbp = bioq_takefirst(&queue)) != NULL) 99489b17223SAlexander Motin g_destroy_bio(cbp); 99589b17223SAlexander Motin if (bp->bio_error == 0) 99689b17223SAlexander Motin bp->bio_error = ENOMEM; 99789b17223SAlexander Motin g_raid_iodone(bp, bp->bio_error); 99889b17223SAlexander Motin } 99989b17223SAlexander Motin 100089b17223SAlexander Motin static void 100189b17223SAlexander Motin g_raid_tr_kerneldump_common_done(struct bio *bp) 100289b17223SAlexander Motin { 100389b17223SAlexander Motin 100489b17223SAlexander Motin bp->bio_flags |= BIO_DONE; 100589b17223SAlexander Motin } 100689b17223SAlexander Motin 100789b17223SAlexander Motin int 100889b17223SAlexander Motin g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr, 100989b17223SAlexander Motin void *virtual, vm_offset_t physical, off_t offset, size_t length) 101089b17223SAlexander Motin { 101189b17223SAlexander Motin struct g_raid_softc *sc; 101289b17223SAlexander Motin struct g_raid_volume *vol; 101389b17223SAlexander Motin struct bio bp; 101489b17223SAlexander Motin 101589b17223SAlexander Motin vol = tr->tro_volume; 101689b17223SAlexander Motin sc = vol->v_softc; 101789b17223SAlexander Motin 101889b17223SAlexander Motin bzero(&bp, sizeof(bp)); 101989b17223SAlexander Motin bp.bio_cmd = BIO_WRITE; 102089b17223SAlexander Motin bp.bio_done = g_raid_tr_kerneldump_common_done; 102189b17223SAlexander Motin bp.bio_attribute = NULL; 102289b17223SAlexander Motin bp.bio_offset = offset; 102389b17223SAlexander Motin bp.bio_length = length; 102489b17223SAlexander Motin bp.bio_data = virtual; 102589b17223SAlexander Motin bp.bio_to = vol->v_provider; 102689b17223SAlexander Motin 102789b17223SAlexander Motin g_raid_start(&bp); 102889b17223SAlexander Motin while (!(bp.bio_flags & BIO_DONE)) { 102989b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Poll..."); 103089b17223SAlexander Motin g_raid_poll(sc); 103189b17223SAlexander Motin DELAY(10); 103289b17223SAlexander Motin } 103389b17223SAlexander Motin 103489b17223SAlexander Motin return (bp.bio_error != 0 ? EIO : 0); 103589b17223SAlexander Motin } 103689b17223SAlexander Motin 103789b17223SAlexander Motin static int 103889b17223SAlexander Motin g_raid_dump(void *arg, 103989b17223SAlexander Motin void *virtual, vm_offset_t physical, off_t offset, size_t length) 104089b17223SAlexander Motin { 104189b17223SAlexander Motin struct g_raid_volume *vol; 104289b17223SAlexander Motin int error; 104389b17223SAlexander Motin 104489b17223SAlexander Motin vol = (struct g_raid_volume *)arg; 104589b17223SAlexander Motin G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.", 104689b17223SAlexander Motin (long long unsigned)offset, (long long unsigned)length); 104789b17223SAlexander Motin 104889b17223SAlexander Motin error = G_RAID_TR_KERNELDUMP(vol->v_tr, 104989b17223SAlexander Motin virtual, physical, offset, length); 105089b17223SAlexander Motin return (error); 105189b17223SAlexander Motin } 105289b17223SAlexander Motin 105389b17223SAlexander Motin static void 105489b17223SAlexander Motin g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp) 105589b17223SAlexander Motin { 105689b17223SAlexander Motin struct g_kerneldump *gkd; 105789b17223SAlexander Motin struct g_provider *pp; 105889b17223SAlexander Motin struct g_raid_volume *vol; 105989b17223SAlexander Motin 106089b17223SAlexander Motin gkd = (struct g_kerneldump*)bp->bio_data; 106189b17223SAlexander Motin pp = bp->bio_to; 106289b17223SAlexander Motin vol = pp->private; 106389b17223SAlexander Motin g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)", 106489b17223SAlexander Motin pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length); 106589b17223SAlexander Motin gkd->di.dumper = g_raid_dump; 106689b17223SAlexander Motin gkd->di.priv = vol; 106789b17223SAlexander Motin gkd->di.blocksize = vol->v_sectorsize; 106889b17223SAlexander Motin gkd->di.maxiosize = DFLTPHYS; 106989b17223SAlexander Motin gkd->di.mediaoffset = gkd->offset; 107089b17223SAlexander Motin if ((gkd->offset + gkd->length) > vol->v_mediasize) 107189b17223SAlexander Motin gkd->length = vol->v_mediasize - gkd->offset; 107289b17223SAlexander Motin gkd->di.mediasize = gkd->length; 107389b17223SAlexander Motin g_io_deliver(bp, 0); 107489b17223SAlexander Motin } 107589b17223SAlexander Motin 107689b17223SAlexander Motin static void 1077609a7474SAlexander Motin g_raid_candelete(struct g_raid_softc *sc, struct bio *bp) 1078609a7474SAlexander Motin { 1079609a7474SAlexander Motin struct g_provider *pp; 1080609a7474SAlexander Motin struct g_raid_volume *vol; 1081609a7474SAlexander Motin struct g_raid_subdisk *sd; 1082609a7474SAlexander Motin int *val; 1083609a7474SAlexander Motin int i; 1084609a7474SAlexander Motin 1085609a7474SAlexander Motin val = (int *)bp->bio_data; 1086609a7474SAlexander Motin pp = bp->bio_to; 1087609a7474SAlexander Motin vol = pp->private; 1088609a7474SAlexander Motin *val = 0; 1089609a7474SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 1090609a7474SAlexander Motin sd = &vol->v_subdisks[i]; 1091609a7474SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_NONE) 1092609a7474SAlexander Motin continue; 1093609a7474SAlexander Motin if (sd->sd_disk->d_candelete) { 1094609a7474SAlexander Motin *val = 1; 1095609a7474SAlexander Motin break; 1096609a7474SAlexander Motin } 1097609a7474SAlexander Motin } 1098609a7474SAlexander Motin g_io_deliver(bp, 0); 1099609a7474SAlexander Motin } 1100609a7474SAlexander Motin 1101609a7474SAlexander Motin static void 110289b17223SAlexander Motin g_raid_start(struct bio *bp) 110389b17223SAlexander Motin { 110489b17223SAlexander Motin struct g_raid_softc *sc; 110589b17223SAlexander Motin 110689b17223SAlexander Motin sc = bp->bio_to->geom->softc; 110789b17223SAlexander Motin /* 110889b17223SAlexander Motin * If sc == NULL or there are no valid disks, provider's error 110989b17223SAlexander Motin * should be set and g_raid_start() should not be called at all. 111089b17223SAlexander Motin */ 111189b17223SAlexander Motin // KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING, 111289b17223SAlexander Motin // ("Provider's error should be set (error=%d)(mirror=%s).", 111389b17223SAlexander Motin // bp->bio_to->error, bp->bio_to->name)); 111489b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Request received."); 111589b17223SAlexander Motin 111689b17223SAlexander Motin switch (bp->bio_cmd) { 111789b17223SAlexander Motin case BIO_READ: 111889b17223SAlexander Motin case BIO_WRITE: 111989b17223SAlexander Motin case BIO_DELETE: 112089b17223SAlexander Motin case BIO_FLUSH: 112189b17223SAlexander Motin break; 112289b17223SAlexander Motin case BIO_GETATTR: 1123609a7474SAlexander Motin if (!strcmp(bp->bio_attribute, "GEOM::candelete")) 1124609a7474SAlexander Motin g_raid_candelete(sc, bp); 1125609a7474SAlexander Motin else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump")) 112689b17223SAlexander Motin g_raid_kerneldump(sc, bp); 112789b17223SAlexander Motin else 112889b17223SAlexander Motin g_io_deliver(bp, EOPNOTSUPP); 112989b17223SAlexander Motin return; 113089b17223SAlexander Motin default: 113189b17223SAlexander Motin g_io_deliver(bp, EOPNOTSUPP); 113289b17223SAlexander Motin return; 113389b17223SAlexander Motin } 113489b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 113589b17223SAlexander Motin bioq_disksort(&sc->sc_queue, bp); 113689b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 113789b17223SAlexander Motin if (!dumping) { 113889b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Waking up %p.", sc); 113989b17223SAlexander Motin wakeup(sc); 114089b17223SAlexander Motin } 114189b17223SAlexander Motin } 114289b17223SAlexander Motin 114389b17223SAlexander Motin static int 114489b17223SAlexander Motin g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len) 114589b17223SAlexander Motin { 114689b17223SAlexander Motin /* 114789b17223SAlexander Motin * 5 cases: 114889b17223SAlexander Motin * (1) bp entirely below NO 114989b17223SAlexander Motin * (2) bp entirely above NO 115089b17223SAlexander Motin * (3) bp start below, but end in range YES 115189b17223SAlexander Motin * (4) bp entirely within YES 115289b17223SAlexander Motin * (5) bp starts within, ends above YES 115389b17223SAlexander Motin * 115489b17223SAlexander Motin * lock range 10-19 (offset 10 length 10) 115589b17223SAlexander Motin * (1) 1-5: first if kicks it out 115689b17223SAlexander Motin * (2) 30-35: second if kicks it out 115789b17223SAlexander Motin * (3) 5-15: passes both ifs 115889b17223SAlexander Motin * (4) 12-14: passes both ifs 115989b17223SAlexander Motin * (5) 19-20: passes both 116089b17223SAlexander Motin */ 116189b17223SAlexander Motin off_t lend = lstart + len - 1; 116289b17223SAlexander Motin off_t bstart = bp->bio_offset; 116389b17223SAlexander Motin off_t bend = bp->bio_offset + bp->bio_length - 1; 116489b17223SAlexander Motin 116589b17223SAlexander Motin if (bend < lstart) 116689b17223SAlexander Motin return (0); 116789b17223SAlexander Motin if (lend < bstart) 116889b17223SAlexander Motin return (0); 116989b17223SAlexander Motin return (1); 117089b17223SAlexander Motin } 117189b17223SAlexander Motin 117289b17223SAlexander Motin static int 117389b17223SAlexander Motin g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp) 117489b17223SAlexander Motin { 117589b17223SAlexander Motin struct g_raid_lock *lp; 117689b17223SAlexander Motin 117789b17223SAlexander Motin sx_assert(&vol->v_softc->sc_lock, SX_LOCKED); 117889b17223SAlexander Motin 117989b17223SAlexander Motin LIST_FOREACH(lp, &vol->v_locks, l_next) { 118089b17223SAlexander Motin if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length)) 118189b17223SAlexander Motin return (1); 118289b17223SAlexander Motin } 118389b17223SAlexander Motin return (0); 118489b17223SAlexander Motin } 118589b17223SAlexander Motin 118689b17223SAlexander Motin static void 118789b17223SAlexander Motin g_raid_start_request(struct bio *bp) 118889b17223SAlexander Motin { 118989b17223SAlexander Motin struct g_raid_softc *sc; 119089b17223SAlexander Motin struct g_raid_volume *vol; 119189b17223SAlexander Motin 119289b17223SAlexander Motin sc = bp->bio_to->geom->softc; 119389b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 119489b17223SAlexander Motin vol = bp->bio_to->private; 119589b17223SAlexander Motin 119689b17223SAlexander Motin /* 119789b17223SAlexander Motin * Check to see if this item is in a locked range. If so, 119889b17223SAlexander Motin * queue it to our locked queue and return. We'll requeue 119989b17223SAlexander Motin * it when the range is unlocked. Internal I/O for the 120089b17223SAlexander Motin * rebuild/rescan/recovery process is excluded from this 120189b17223SAlexander Motin * check so we can actually do the recovery. 120289b17223SAlexander Motin */ 120389b17223SAlexander Motin if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) && 120489b17223SAlexander Motin g_raid_is_in_locked_range(vol, bp)) { 120589b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Defer request."); 120689b17223SAlexander Motin bioq_insert_tail(&vol->v_locked, bp); 120789b17223SAlexander Motin return; 120889b17223SAlexander Motin } 120989b17223SAlexander Motin 121089b17223SAlexander Motin /* 121189b17223SAlexander Motin * If we're actually going to do the write/delete, then 121289b17223SAlexander Motin * update the idle stats for the volume. 121389b17223SAlexander Motin */ 121489b17223SAlexander Motin if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) { 121589b17223SAlexander Motin if (!vol->v_dirty) 121689b17223SAlexander Motin g_raid_dirty(vol); 121789b17223SAlexander Motin vol->v_writes++; 121889b17223SAlexander Motin } 121989b17223SAlexander Motin 122089b17223SAlexander Motin /* 122189b17223SAlexander Motin * Put request onto inflight queue, so we can check if new 122289b17223SAlexander Motin * synchronization requests don't collide with it. Then tell 122389b17223SAlexander Motin * the transformation layer to start the I/O. 122489b17223SAlexander Motin */ 122589b17223SAlexander Motin bioq_insert_tail(&vol->v_inflight, bp); 122689b17223SAlexander Motin G_RAID_LOGREQ(4, bp, "Request started"); 122789b17223SAlexander Motin G_RAID_TR_IOSTART(vol->v_tr, bp); 122889b17223SAlexander Motin } 122989b17223SAlexander Motin 123089b17223SAlexander Motin static void 123189b17223SAlexander Motin g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp) 123289b17223SAlexander Motin { 123389b17223SAlexander Motin off_t off, len; 123489b17223SAlexander Motin struct bio *nbp; 123589b17223SAlexander Motin struct g_raid_lock *lp; 123689b17223SAlexander Motin 123789b17223SAlexander Motin vol->v_pending_lock = 0; 123889b17223SAlexander Motin LIST_FOREACH(lp, &vol->v_locks, l_next) { 123989b17223SAlexander Motin if (lp->l_pending) { 124089b17223SAlexander Motin off = lp->l_offset; 124189b17223SAlexander Motin len = lp->l_length; 124289b17223SAlexander Motin lp->l_pending = 0; 124389b17223SAlexander Motin TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) { 124489b17223SAlexander Motin if (g_raid_bio_overlaps(nbp, off, len)) 124589b17223SAlexander Motin lp->l_pending++; 124689b17223SAlexander Motin } 124789b17223SAlexander Motin if (lp->l_pending) { 124889b17223SAlexander Motin vol->v_pending_lock = 1; 124989b17223SAlexander Motin G_RAID_DEBUG1(4, vol->v_softc, 125089b17223SAlexander Motin "Deferred lock(%jd, %jd) has %d pending", 125189b17223SAlexander Motin (intmax_t)off, (intmax_t)(off + len), 125289b17223SAlexander Motin lp->l_pending); 125389b17223SAlexander Motin continue; 125489b17223SAlexander Motin } 125589b17223SAlexander Motin G_RAID_DEBUG1(4, vol->v_softc, 125689b17223SAlexander Motin "Deferred lock of %jd to %jd completed", 125789b17223SAlexander Motin (intmax_t)off, (intmax_t)(off + len)); 125889b17223SAlexander Motin G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg); 125989b17223SAlexander Motin } 126089b17223SAlexander Motin } 126189b17223SAlexander Motin } 126289b17223SAlexander Motin 126389b17223SAlexander Motin void 126489b17223SAlexander Motin g_raid_iodone(struct bio *bp, int error) 126589b17223SAlexander Motin { 126689b17223SAlexander Motin struct g_raid_softc *sc; 126789b17223SAlexander Motin struct g_raid_volume *vol; 126889b17223SAlexander Motin 126989b17223SAlexander Motin sc = bp->bio_to->geom->softc; 127089b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 127189b17223SAlexander Motin vol = bp->bio_to->private; 127289b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Request done: %d.", error); 127389b17223SAlexander Motin 127489b17223SAlexander Motin /* Update stats if we done write/delete. */ 127589b17223SAlexander Motin if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) { 127689b17223SAlexander Motin vol->v_writes--; 127789b17223SAlexander Motin vol->v_last_write = time_uptime; 127889b17223SAlexander Motin } 127989b17223SAlexander Motin 128089b17223SAlexander Motin bioq_remove(&vol->v_inflight, bp); 128189b17223SAlexander Motin if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp)) 128289b17223SAlexander Motin g_raid_finish_with_locked_ranges(vol, bp); 128389b17223SAlexander Motin getmicrouptime(&vol->v_last_done); 128489b17223SAlexander Motin g_io_deliver(bp, error); 128589b17223SAlexander Motin } 128689b17223SAlexander Motin 128789b17223SAlexander Motin int 128889b17223SAlexander Motin g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len, 128989b17223SAlexander Motin struct bio *ignore, void *argp) 129089b17223SAlexander Motin { 129189b17223SAlexander Motin struct g_raid_softc *sc; 129289b17223SAlexander Motin struct g_raid_lock *lp; 129389b17223SAlexander Motin struct bio *bp; 129489b17223SAlexander Motin 129589b17223SAlexander Motin sc = vol->v_softc; 129689b17223SAlexander Motin lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO); 129789b17223SAlexander Motin LIST_INSERT_HEAD(&vol->v_locks, lp, l_next); 129889b17223SAlexander Motin lp->l_offset = off; 129989b17223SAlexander Motin lp->l_length = len; 130089b17223SAlexander Motin lp->l_callback_arg = argp; 130189b17223SAlexander Motin 130289b17223SAlexander Motin lp->l_pending = 0; 130389b17223SAlexander Motin TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) { 130489b17223SAlexander Motin if (bp != ignore && g_raid_bio_overlaps(bp, off, len)) 130589b17223SAlexander Motin lp->l_pending++; 130689b17223SAlexander Motin } 130789b17223SAlexander Motin 130889b17223SAlexander Motin /* 130989b17223SAlexander Motin * If there are any writes that are pending, we return EBUSY. All 131089b17223SAlexander Motin * callers will have to wait until all pending writes clear. 131189b17223SAlexander Motin */ 131289b17223SAlexander Motin if (lp->l_pending > 0) { 131389b17223SAlexander Motin vol->v_pending_lock = 1; 131489b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend", 131589b17223SAlexander Motin (intmax_t)off, (intmax_t)(off+len), lp->l_pending); 131689b17223SAlexander Motin return (EBUSY); 131789b17223SAlexander Motin } 131889b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd", 131989b17223SAlexander Motin (intmax_t)off, (intmax_t)(off+len)); 132089b17223SAlexander Motin G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg); 132189b17223SAlexander Motin return (0); 132289b17223SAlexander Motin } 132389b17223SAlexander Motin 132489b17223SAlexander Motin int 132589b17223SAlexander Motin g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len) 132689b17223SAlexander Motin { 132789b17223SAlexander Motin struct g_raid_lock *lp; 132889b17223SAlexander Motin struct g_raid_softc *sc; 132989b17223SAlexander Motin struct bio *bp; 133089b17223SAlexander Motin 133189b17223SAlexander Motin sc = vol->v_softc; 133289b17223SAlexander Motin LIST_FOREACH(lp, &vol->v_locks, l_next) { 133389b17223SAlexander Motin if (lp->l_offset == off && lp->l_length == len) { 133489b17223SAlexander Motin LIST_REMOVE(lp, l_next); 133589b17223SAlexander Motin /* XXX 133689b17223SAlexander Motin * Right now we just put them all back on the queue 133789b17223SAlexander Motin * and hope for the best. We hope this because any 133889b17223SAlexander Motin * locked ranges will go right back on this list 133989b17223SAlexander Motin * when the worker thread runs. 134089b17223SAlexander Motin * XXX 134189b17223SAlexander Motin */ 134289b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd", 134389b17223SAlexander Motin (intmax_t)lp->l_offset, 134489b17223SAlexander Motin (intmax_t)(lp->l_offset+lp->l_length)); 134589b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 134689b17223SAlexander Motin while ((bp = bioq_takefirst(&vol->v_locked)) != NULL) 134789b17223SAlexander Motin bioq_disksort(&sc->sc_queue, bp); 134889b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 134989b17223SAlexander Motin free(lp, M_RAID); 135089b17223SAlexander Motin return (0); 135189b17223SAlexander Motin } 135289b17223SAlexander Motin } 135389b17223SAlexander Motin return (EINVAL); 135489b17223SAlexander Motin } 135589b17223SAlexander Motin 135689b17223SAlexander Motin void 135789b17223SAlexander Motin g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp) 135889b17223SAlexander Motin { 135989b17223SAlexander Motin struct g_consumer *cp; 136089b17223SAlexander Motin struct g_raid_disk *disk, *tdisk; 136189b17223SAlexander Motin 136289b17223SAlexander Motin bp->bio_caller1 = sd; 136389b17223SAlexander Motin 136489b17223SAlexander Motin /* 136589b17223SAlexander Motin * Make sure that the disk is present. Generally it is a task of 136689b17223SAlexander Motin * transformation layers to not send requests to absent disks, but 136789b17223SAlexander Motin * it is better to be safe and report situation then sorry. 136889b17223SAlexander Motin */ 136989b17223SAlexander Motin if (sd->sd_disk == NULL) { 137089b17223SAlexander Motin G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!"); 137189b17223SAlexander Motin nodisk: 137289b17223SAlexander Motin bp->bio_from = NULL; 137389b17223SAlexander Motin bp->bio_to = NULL; 137489b17223SAlexander Motin bp->bio_error = ENXIO; 137589b17223SAlexander Motin g_raid_disk_done(bp); 137689b17223SAlexander Motin return; 137789b17223SAlexander Motin } 137889b17223SAlexander Motin disk = sd->sd_disk; 137989b17223SAlexander Motin if (disk->d_state != G_RAID_DISK_S_ACTIVE && 138089b17223SAlexander Motin disk->d_state != G_RAID_DISK_S_FAILED) { 138189b17223SAlexander Motin G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a " 138289b17223SAlexander Motin "wrong state (%s)!", g_raid_disk_state2str(disk->d_state)); 138389b17223SAlexander Motin goto nodisk; 138489b17223SAlexander Motin } 138589b17223SAlexander Motin 138689b17223SAlexander Motin cp = disk->d_consumer; 138789b17223SAlexander Motin bp->bio_from = cp; 138889b17223SAlexander Motin bp->bio_to = cp->provider; 138989b17223SAlexander Motin cp->index++; 139089b17223SAlexander Motin 139189b17223SAlexander Motin /* Update average disks load. */ 139289b17223SAlexander Motin TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) { 139389b17223SAlexander Motin if (tdisk->d_consumer == NULL) 139489b17223SAlexander Motin tdisk->d_load = 0; 139589b17223SAlexander Motin else 139689b17223SAlexander Motin tdisk->d_load = (tdisk->d_consumer->index * 139789b17223SAlexander Motin G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8; 139889b17223SAlexander Motin } 139989b17223SAlexander Motin 140089b17223SAlexander Motin disk->d_last_offset = bp->bio_offset + bp->bio_length; 140189b17223SAlexander Motin if (dumping) { 140289b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Sending dumping request."); 140389b17223SAlexander Motin if (bp->bio_cmd == BIO_WRITE) { 140489b17223SAlexander Motin bp->bio_error = g_raid_subdisk_kerneldump(sd, 140589b17223SAlexander Motin bp->bio_data, 0, bp->bio_offset, bp->bio_length); 140689b17223SAlexander Motin } else 140789b17223SAlexander Motin bp->bio_error = EOPNOTSUPP; 140889b17223SAlexander Motin g_raid_disk_done(bp); 140989b17223SAlexander Motin } else { 141089b17223SAlexander Motin bp->bio_done = g_raid_disk_done; 141189b17223SAlexander Motin bp->bio_offset += sd->sd_offset; 141289b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Sending request."); 141389b17223SAlexander Motin g_io_request(bp, cp); 141489b17223SAlexander Motin } 141589b17223SAlexander Motin } 141689b17223SAlexander Motin 141789b17223SAlexander Motin int 141889b17223SAlexander Motin g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd, 141989b17223SAlexander Motin void *virtual, vm_offset_t physical, off_t offset, size_t length) 142089b17223SAlexander Motin { 142189b17223SAlexander Motin 142289b17223SAlexander Motin if (sd->sd_disk == NULL) 142389b17223SAlexander Motin return (ENXIO); 142489b17223SAlexander Motin if (sd->sd_disk->d_kd.di.dumper == NULL) 142589b17223SAlexander Motin return (EOPNOTSUPP); 142689b17223SAlexander Motin return (dump_write(&sd->sd_disk->d_kd.di, 142789b17223SAlexander Motin virtual, physical, 142889b17223SAlexander Motin sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset, 142989b17223SAlexander Motin length)); 143089b17223SAlexander Motin } 143189b17223SAlexander Motin 143289b17223SAlexander Motin static void 143389b17223SAlexander Motin g_raid_disk_done(struct bio *bp) 143489b17223SAlexander Motin { 143589b17223SAlexander Motin struct g_raid_softc *sc; 143689b17223SAlexander Motin struct g_raid_subdisk *sd; 143789b17223SAlexander Motin 143889b17223SAlexander Motin sd = bp->bio_caller1; 143989b17223SAlexander Motin sc = sd->sd_softc; 144089b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 144189b17223SAlexander Motin bioq_disksort(&sc->sc_queue, bp); 144289b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 144389b17223SAlexander Motin if (!dumping) 144489b17223SAlexander Motin wakeup(sc); 144589b17223SAlexander Motin } 144689b17223SAlexander Motin 144789b17223SAlexander Motin static void 144889b17223SAlexander Motin g_raid_disk_done_request(struct bio *bp) 144989b17223SAlexander Motin { 145089b17223SAlexander Motin struct g_raid_softc *sc; 145189b17223SAlexander Motin struct g_raid_disk *disk; 145289b17223SAlexander Motin struct g_raid_subdisk *sd; 145389b17223SAlexander Motin struct g_raid_volume *vol; 145489b17223SAlexander Motin 145589b17223SAlexander Motin g_topology_assert_not(); 145689b17223SAlexander Motin 145789b17223SAlexander Motin G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error); 145889b17223SAlexander Motin sd = bp->bio_caller1; 145989b17223SAlexander Motin sc = sd->sd_softc; 146089b17223SAlexander Motin vol = sd->sd_volume; 146189b17223SAlexander Motin if (bp->bio_from != NULL) { 146289b17223SAlexander Motin bp->bio_from->index--; 146389b17223SAlexander Motin disk = bp->bio_from->private; 146489b17223SAlexander Motin if (disk == NULL) 146589b17223SAlexander Motin g_raid_kill_consumer(sc, bp->bio_from); 146689b17223SAlexander Motin } 146789b17223SAlexander Motin bp->bio_offset -= sd->sd_offset; 146889b17223SAlexander Motin 146989b17223SAlexander Motin G_RAID_TR_IODONE(vol->v_tr, sd, bp); 147089b17223SAlexander Motin } 147189b17223SAlexander Motin 147289b17223SAlexander Motin static void 147389b17223SAlexander Motin g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep) 147489b17223SAlexander Motin { 147589b17223SAlexander Motin 147689b17223SAlexander Motin if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0) 147789b17223SAlexander Motin ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event); 147889b17223SAlexander Motin else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0) 147989b17223SAlexander Motin ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event); 148089b17223SAlexander Motin else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0) 148189b17223SAlexander Motin ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event); 148289b17223SAlexander Motin else 148389b17223SAlexander Motin ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event); 148489b17223SAlexander Motin if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) { 148589b17223SAlexander Motin KASSERT(ep->e_error == 0, 148689b17223SAlexander Motin ("Error cannot be handled.")); 148789b17223SAlexander Motin g_raid_event_free(ep); 148889b17223SAlexander Motin } else { 148989b17223SAlexander Motin ep->e_flags |= G_RAID_EVENT_DONE; 149089b17223SAlexander Motin G_RAID_DEBUG1(4, sc, "Waking up %p.", ep); 149189b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 149289b17223SAlexander Motin wakeup(ep); 149389b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 149489b17223SAlexander Motin } 149589b17223SAlexander Motin } 149689b17223SAlexander Motin 149789b17223SAlexander Motin /* 149889b17223SAlexander Motin * Worker thread. 149989b17223SAlexander Motin */ 150089b17223SAlexander Motin static void 150189b17223SAlexander Motin g_raid_worker(void *arg) 150289b17223SAlexander Motin { 150389b17223SAlexander Motin struct g_raid_softc *sc; 150489b17223SAlexander Motin struct g_raid_event *ep; 150589b17223SAlexander Motin struct g_raid_volume *vol; 150689b17223SAlexander Motin struct bio *bp; 150789b17223SAlexander Motin struct timeval now, t; 150889b17223SAlexander Motin int timeout, rv; 150989b17223SAlexander Motin 151089b17223SAlexander Motin sc = arg; 151189b17223SAlexander Motin thread_lock(curthread); 151289b17223SAlexander Motin sched_prio(curthread, PRIBIO); 151389b17223SAlexander Motin thread_unlock(curthread); 151489b17223SAlexander Motin 151589b17223SAlexander Motin sx_xlock(&sc->sc_lock); 151689b17223SAlexander Motin for (;;) { 151789b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 151889b17223SAlexander Motin /* 151989b17223SAlexander Motin * First take a look at events. 152089b17223SAlexander Motin * This is important to handle events before any I/O requests. 152189b17223SAlexander Motin */ 152289b17223SAlexander Motin bp = NULL; 152389b17223SAlexander Motin vol = NULL; 152489b17223SAlexander Motin rv = 0; 152589b17223SAlexander Motin ep = TAILQ_FIRST(&sc->sc_events); 152689b17223SAlexander Motin if (ep != NULL) 152789b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_events, ep, e_next); 152889b17223SAlexander Motin else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) 152989b17223SAlexander Motin ; 153089b17223SAlexander Motin else { 153189b17223SAlexander Motin getmicrouptime(&now); 153289b17223SAlexander Motin t = now; 153389b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 153489b17223SAlexander Motin if (bioq_first(&vol->v_inflight) == NULL && 153589b17223SAlexander Motin vol->v_tr && 153689b17223SAlexander Motin timevalcmp(&vol->v_last_done, &t, < )) 153789b17223SAlexander Motin t = vol->v_last_done; 153889b17223SAlexander Motin } 153989b17223SAlexander Motin timevalsub(&t, &now); 154089b17223SAlexander Motin timeout = g_raid_idle_threshold + 154189b17223SAlexander Motin t.tv_sec * 1000000 + t.tv_usec; 154289b17223SAlexander Motin if (timeout > 0) { 154389b17223SAlexander Motin /* 154489b17223SAlexander Motin * Two steps to avoid overflows at HZ=1000 154589b17223SAlexander Motin * and idle timeouts > 2.1s. Some rounding 154689b17223SAlexander Motin * errors can occur, but they are < 1tick, 154789b17223SAlexander Motin * which is deemed to be close enough for 154889b17223SAlexander Motin * this purpose. 154989b17223SAlexander Motin */ 155089b17223SAlexander Motin int micpertic = 1000000 / hz; 155189b17223SAlexander Motin timeout = (timeout + micpertic - 1) / micpertic; 155289b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 155389b17223SAlexander Motin MSLEEP(rv, sc, &sc->sc_queue_mtx, 155489b17223SAlexander Motin PRIBIO | PDROP, "-", timeout); 155589b17223SAlexander Motin sx_xlock(&sc->sc_lock); 155689b17223SAlexander Motin goto process; 155789b17223SAlexander Motin } else 155889b17223SAlexander Motin rv = EWOULDBLOCK; 155989b17223SAlexander Motin } 156089b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 156189b17223SAlexander Motin process: 156289b17223SAlexander Motin if (ep != NULL) { 156389b17223SAlexander Motin g_raid_handle_event(sc, ep); 156489b17223SAlexander Motin } else if (bp != NULL) { 156589b17223SAlexander Motin if (bp->bio_to != NULL && 156689b17223SAlexander Motin bp->bio_to->geom == sc->sc_geom) 156789b17223SAlexander Motin g_raid_start_request(bp); 156889b17223SAlexander Motin else 156989b17223SAlexander Motin g_raid_disk_done_request(bp); 157089b17223SAlexander Motin } else if (rv == EWOULDBLOCK) { 157189b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 157289b17223SAlexander Motin g_raid_clean(vol, -1); 157389b17223SAlexander Motin if (bioq_first(&vol->v_inflight) == NULL && 157489b17223SAlexander Motin vol->v_tr) { 157589b17223SAlexander Motin t.tv_sec = g_raid_idle_threshold / 1000000; 157689b17223SAlexander Motin t.tv_usec = g_raid_idle_threshold % 1000000; 157789b17223SAlexander Motin timevaladd(&t, &vol->v_last_done); 157889b17223SAlexander Motin getmicrouptime(&now); 157989b17223SAlexander Motin if (timevalcmp(&t, &now, <= )) { 158089b17223SAlexander Motin G_RAID_TR_IDLE(vol->v_tr); 158189b17223SAlexander Motin vol->v_last_done = now; 158289b17223SAlexander Motin } 158389b17223SAlexander Motin } 158489b17223SAlexander Motin } 158589b17223SAlexander Motin } 158689b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_HARD) 158789b17223SAlexander Motin g_raid_destroy_node(sc, 1); /* May not return. */ 158889b17223SAlexander Motin } 158989b17223SAlexander Motin } 159089b17223SAlexander Motin 159189b17223SAlexander Motin static void 159289b17223SAlexander Motin g_raid_poll(struct g_raid_softc *sc) 159389b17223SAlexander Motin { 159489b17223SAlexander Motin struct g_raid_event *ep; 159589b17223SAlexander Motin struct bio *bp; 159689b17223SAlexander Motin 159789b17223SAlexander Motin sx_xlock(&sc->sc_lock); 159889b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 159989b17223SAlexander Motin /* 160089b17223SAlexander Motin * First take a look at events. 160189b17223SAlexander Motin * This is important to handle events before any I/O requests. 160289b17223SAlexander Motin */ 160389b17223SAlexander Motin ep = TAILQ_FIRST(&sc->sc_events); 160489b17223SAlexander Motin if (ep != NULL) { 160589b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_events, ep, e_next); 160689b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 160789b17223SAlexander Motin g_raid_handle_event(sc, ep); 160889b17223SAlexander Motin goto out; 160989b17223SAlexander Motin } 161089b17223SAlexander Motin bp = bioq_takefirst(&sc->sc_queue); 161189b17223SAlexander Motin if (bp != NULL) { 161289b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 161389b17223SAlexander Motin if (bp->bio_from == NULL || 161489b17223SAlexander Motin bp->bio_from->geom != sc->sc_geom) 161589b17223SAlexander Motin g_raid_start_request(bp); 161689b17223SAlexander Motin else 161789b17223SAlexander Motin g_raid_disk_done_request(bp); 161889b17223SAlexander Motin } 161989b17223SAlexander Motin out: 162089b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 162189b17223SAlexander Motin } 162289b17223SAlexander Motin 162389b17223SAlexander Motin static void 162489b17223SAlexander Motin g_raid_launch_provider(struct g_raid_volume *vol) 162589b17223SAlexander Motin { 162689b17223SAlexander Motin struct g_raid_disk *disk; 1627b43560abSAlexander Motin struct g_raid_subdisk *sd; 162889b17223SAlexander Motin struct g_raid_softc *sc; 162989b17223SAlexander Motin struct g_provider *pp; 163089b17223SAlexander Motin char name[G_RAID_MAX_VOLUMENAME]; 1631bd9fba0cSSean Bruno char announce_buf[80], buf1[32]; 163289b17223SAlexander Motin off_t off; 1633b43560abSAlexander Motin int i; 163489b17223SAlexander Motin 163589b17223SAlexander Motin sc = vol->v_softc; 163689b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_LOCKED); 163789b17223SAlexander Motin 163889b17223SAlexander Motin g_topology_lock(); 163989b17223SAlexander Motin /* Try to name provider with volume name. */ 164089b17223SAlexander Motin snprintf(name, sizeof(name), "raid/%s", vol->v_name); 164189b17223SAlexander Motin if (g_raid_name_format == 0 || vol->v_name[0] == 0 || 164289b17223SAlexander Motin g_provider_by_name(name) != NULL) { 164389b17223SAlexander Motin /* Otherwise use sequential volume number. */ 164489b17223SAlexander Motin snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id); 164589b17223SAlexander Motin } 1646bd9fba0cSSean Bruno 1647bd9fba0cSSean Bruno /* 1648bd9fba0cSSean Bruno * Create a /dev/ar%d that the old ataraid(4) stack once 1649bd9fba0cSSean Bruno * created as an alias for /dev/raid/r%d if requested. 1650bd9fba0cSSean Bruno * This helps going from stable/7 ataraid devices to newer 1651bd9fba0cSSean Bruno * FreeBSD releases. sbruno 07 MAY 2013 1652bd9fba0cSSean Bruno */ 1653bd9fba0cSSean Bruno 1654bd9fba0cSSean Bruno if (ar_legacy_aliases) { 1655bd9fba0cSSean Bruno snprintf(announce_buf, sizeof(announce_buf), 1656bd9fba0cSSean Bruno "kern.devalias.%s", name); 1657bd9fba0cSSean Bruno snprintf(buf1, sizeof(buf1), 1658bd9fba0cSSean Bruno "ar%d", vol->v_global_id); 1659bd9fba0cSSean Bruno setenv(announce_buf, buf1); 1660bd9fba0cSSean Bruno } 1661bd9fba0cSSean Bruno 166289b17223SAlexander Motin pp = g_new_providerf(sc->sc_geom, "%s", name); 166340ea77a0SAlexander Motin pp->flags |= G_PF_DIRECT_RECEIVE; 1664b43560abSAlexander Motin if (vol->v_tr->tro_class->trc_accept_unmapped) { 1665b43560abSAlexander Motin pp->flags |= G_PF_ACCEPT_UNMAPPED; 1666b43560abSAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 1667b43560abSAlexander Motin sd = &vol->v_subdisks[i]; 1668b43560abSAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_NONE) 1669b43560abSAlexander Motin continue; 1670b43560abSAlexander Motin if ((sd->sd_disk->d_consumer->provider->flags & 1671b43560abSAlexander Motin G_PF_ACCEPT_UNMAPPED) == 0) 1672b43560abSAlexander Motin pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 1673b43560abSAlexander Motin } 1674b43560abSAlexander Motin } 167589b17223SAlexander Motin pp->private = vol; 167689b17223SAlexander Motin pp->mediasize = vol->v_mediasize; 167789b17223SAlexander Motin pp->sectorsize = vol->v_sectorsize; 167889b17223SAlexander Motin pp->stripesize = 0; 167989b17223SAlexander Motin pp->stripeoffset = 0; 168089b17223SAlexander Motin if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 || 168189b17223SAlexander Motin vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 || 168289b17223SAlexander Motin vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE || 168389b17223SAlexander Motin vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) { 168489b17223SAlexander Motin if ((disk = vol->v_subdisks[0].sd_disk) != NULL && 168589b17223SAlexander Motin disk->d_consumer != NULL && 168689b17223SAlexander Motin disk->d_consumer->provider != NULL) { 168789b17223SAlexander Motin pp->stripesize = disk->d_consumer->provider->stripesize; 168889b17223SAlexander Motin off = disk->d_consumer->provider->stripeoffset; 168989b17223SAlexander Motin pp->stripeoffset = off + vol->v_subdisks[0].sd_offset; 169089b17223SAlexander Motin if (off > 0) 169189b17223SAlexander Motin pp->stripeoffset %= off; 169289b17223SAlexander Motin } 169389b17223SAlexander Motin if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) { 169489b17223SAlexander Motin pp->stripesize *= (vol->v_disks_count - 1); 169589b17223SAlexander Motin pp->stripeoffset *= (vol->v_disks_count - 1); 169689b17223SAlexander Motin } 169789b17223SAlexander Motin } else 169889b17223SAlexander Motin pp->stripesize = vol->v_strip_size; 169989b17223SAlexander Motin vol->v_provider = pp; 170089b17223SAlexander Motin g_error_provider(pp, 0); 170189b17223SAlexander Motin g_topology_unlock(); 170289b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.", 170389b17223SAlexander Motin pp->name, vol->v_name); 170489b17223SAlexander Motin } 170589b17223SAlexander Motin 170689b17223SAlexander Motin static void 170789b17223SAlexander Motin g_raid_destroy_provider(struct g_raid_volume *vol) 170889b17223SAlexander Motin { 170989b17223SAlexander Motin struct g_raid_softc *sc; 171089b17223SAlexander Motin struct g_provider *pp; 171189b17223SAlexander Motin struct bio *bp, *tmp; 171289b17223SAlexander Motin 171389b17223SAlexander Motin g_topology_assert_not(); 171489b17223SAlexander Motin sc = vol->v_softc; 171589b17223SAlexander Motin pp = vol->v_provider; 171689b17223SAlexander Motin KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name)); 171789b17223SAlexander Motin 171889b17223SAlexander Motin g_topology_lock(); 171989b17223SAlexander Motin g_error_provider(pp, ENXIO); 172089b17223SAlexander Motin mtx_lock(&sc->sc_queue_mtx); 172189b17223SAlexander Motin TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) { 172289b17223SAlexander Motin if (bp->bio_to != pp) 172389b17223SAlexander Motin continue; 172489b17223SAlexander Motin bioq_remove(&sc->sc_queue, bp); 172589b17223SAlexander Motin g_io_deliver(bp, ENXIO); 172689b17223SAlexander Motin } 172789b17223SAlexander Motin mtx_unlock(&sc->sc_queue_mtx); 172889b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.", 172989b17223SAlexander Motin pp->name, vol->v_name); 173089b17223SAlexander Motin g_wither_provider(pp, ENXIO); 173189b17223SAlexander Motin g_topology_unlock(); 173289b17223SAlexander Motin vol->v_provider = NULL; 173389b17223SAlexander Motin } 173489b17223SAlexander Motin 173589b17223SAlexander Motin /* 173689b17223SAlexander Motin * Update device state. 173789b17223SAlexander Motin */ 173889b17223SAlexander Motin static int 173989b17223SAlexander Motin g_raid_update_volume(struct g_raid_volume *vol, u_int event) 174089b17223SAlexander Motin { 174189b17223SAlexander Motin struct g_raid_softc *sc; 174289b17223SAlexander Motin 174389b17223SAlexander Motin sc = vol->v_softc; 174489b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 174589b17223SAlexander Motin 174689b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for volume %s.", 174789b17223SAlexander Motin g_raid_volume_event2str(event), 174889b17223SAlexander Motin vol->v_name); 174989b17223SAlexander Motin switch (event) { 175089b17223SAlexander Motin case G_RAID_VOLUME_E_DOWN: 175189b17223SAlexander Motin if (vol->v_provider != NULL) 175289b17223SAlexander Motin g_raid_destroy_provider(vol); 175389b17223SAlexander Motin break; 175489b17223SAlexander Motin case G_RAID_VOLUME_E_UP: 175589b17223SAlexander Motin if (vol->v_provider == NULL) 175689b17223SAlexander Motin g_raid_launch_provider(vol); 175789b17223SAlexander Motin break; 175889b17223SAlexander Motin case G_RAID_VOLUME_E_START: 175989b17223SAlexander Motin if (vol->v_tr) 176089b17223SAlexander Motin G_RAID_TR_START(vol->v_tr); 176189b17223SAlexander Motin return (0); 176289b17223SAlexander Motin default: 176389b17223SAlexander Motin if (sc->sc_md) 176489b17223SAlexander Motin G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event); 176589b17223SAlexander Motin return (0); 176689b17223SAlexander Motin } 176789b17223SAlexander Motin 176889b17223SAlexander Motin /* Manage root mount release. */ 176989b17223SAlexander Motin if (vol->v_starting) { 177089b17223SAlexander Motin vol->v_starting = 0; 177189b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount); 177289b17223SAlexander Motin root_mount_rel(vol->v_rootmount); 177389b17223SAlexander Motin vol->v_rootmount = NULL; 177489b17223SAlexander Motin } 177589b17223SAlexander Motin if (vol->v_stopping && vol->v_provider_open == 0) 177689b17223SAlexander Motin g_raid_destroy_volume(vol); 177789b17223SAlexander Motin return (0); 177889b17223SAlexander Motin } 177989b17223SAlexander Motin 178089b17223SAlexander Motin /* 178189b17223SAlexander Motin * Update subdisk state. 178289b17223SAlexander Motin */ 178389b17223SAlexander Motin static int 178489b17223SAlexander Motin g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event) 178589b17223SAlexander Motin { 178689b17223SAlexander Motin struct g_raid_softc *sc; 178789b17223SAlexander Motin struct g_raid_volume *vol; 178889b17223SAlexander Motin 178989b17223SAlexander Motin sc = sd->sd_softc; 179089b17223SAlexander Motin vol = sd->sd_volume; 179189b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 179289b17223SAlexander Motin 179389b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.", 179489b17223SAlexander Motin g_raid_subdisk_event2str(event), 179589b17223SAlexander Motin vol->v_name, sd->sd_pos, 179689b17223SAlexander Motin sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]"); 179789b17223SAlexander Motin if (vol->v_tr) 179889b17223SAlexander Motin G_RAID_TR_EVENT(vol->v_tr, sd, event); 179989b17223SAlexander Motin 180089b17223SAlexander Motin return (0); 180189b17223SAlexander Motin } 180289b17223SAlexander Motin 180389b17223SAlexander Motin /* 180489b17223SAlexander Motin * Update disk state. 180589b17223SAlexander Motin */ 180689b17223SAlexander Motin static int 180789b17223SAlexander Motin g_raid_update_disk(struct g_raid_disk *disk, u_int event) 180889b17223SAlexander Motin { 180989b17223SAlexander Motin struct g_raid_softc *sc; 181089b17223SAlexander Motin 181189b17223SAlexander Motin sc = disk->d_softc; 181289b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 181389b17223SAlexander Motin 181489b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for disk %s.", 181589b17223SAlexander Motin g_raid_disk_event2str(event), 181689b17223SAlexander Motin g_raid_get_diskname(disk)); 181789b17223SAlexander Motin 181889b17223SAlexander Motin if (sc->sc_md) 181989b17223SAlexander Motin G_RAID_MD_EVENT(sc->sc_md, disk, event); 182089b17223SAlexander Motin return (0); 182189b17223SAlexander Motin } 182289b17223SAlexander Motin 182389b17223SAlexander Motin /* 182489b17223SAlexander Motin * Node event. 182589b17223SAlexander Motin */ 182689b17223SAlexander Motin static int 182789b17223SAlexander Motin g_raid_update_node(struct g_raid_softc *sc, u_int event) 182889b17223SAlexander Motin { 182989b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 183089b17223SAlexander Motin 183189b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Event %s for the array.", 183289b17223SAlexander Motin g_raid_node_event2str(event)); 183389b17223SAlexander Motin 183489b17223SAlexander Motin if (event == G_RAID_NODE_E_WAKE) 183589b17223SAlexander Motin return (0); 183689b17223SAlexander Motin if (sc->sc_md) 183789b17223SAlexander Motin G_RAID_MD_EVENT(sc->sc_md, NULL, event); 183889b17223SAlexander Motin return (0); 183989b17223SAlexander Motin } 184089b17223SAlexander Motin 184189b17223SAlexander Motin static int 184289b17223SAlexander Motin g_raid_access(struct g_provider *pp, int acr, int acw, int ace) 184389b17223SAlexander Motin { 184489b17223SAlexander Motin struct g_raid_volume *vol; 184589b17223SAlexander Motin struct g_raid_softc *sc; 184614e2cd0aSAlexander Motin int dcw, opens, error = 0; 184789b17223SAlexander Motin 184889b17223SAlexander Motin g_topology_assert(); 184989b17223SAlexander Motin sc = pp->geom->softc; 185089b17223SAlexander Motin vol = pp->private; 185189b17223SAlexander Motin KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 185289b17223SAlexander Motin KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name)); 185389b17223SAlexander Motin 185489b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name, 185589b17223SAlexander Motin acr, acw, ace); 185689b17223SAlexander Motin dcw = pp->acw + acw; 185789b17223SAlexander Motin 185889b17223SAlexander Motin g_topology_unlock(); 185989b17223SAlexander Motin sx_xlock(&sc->sc_lock); 186089b17223SAlexander Motin /* Deny new opens while dying. */ 186189b17223SAlexander Motin if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) { 186289b17223SAlexander Motin error = ENXIO; 186389b17223SAlexander Motin goto out; 186489b17223SAlexander Motin } 18650f0b2fd8SAlexander Motin /* Deny write opens for read-only volumes. */ 18660f0b2fd8SAlexander Motin if (vol->v_read_only && acw > 0) { 18670f0b2fd8SAlexander Motin error = EROFS; 18680f0b2fd8SAlexander Motin goto out; 18690f0b2fd8SAlexander Motin } 1870a479c51bSAlexander Motin if (dcw == 0) 187189b17223SAlexander Motin g_raid_clean(vol, dcw); 187289b17223SAlexander Motin vol->v_provider_open += acr + acw + ace; 187389b17223SAlexander Motin /* Handle delayed node destruction. */ 187489b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_DELAYED && 187589b17223SAlexander Motin vol->v_provider_open == 0) { 187689b17223SAlexander Motin /* Count open volumes. */ 187789b17223SAlexander Motin opens = g_raid_nopens(sc); 187889b17223SAlexander Motin if (opens == 0) { 187989b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_HARD; 188089b17223SAlexander Motin /* Wake up worker to make it selfdestruct. */ 188189b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 188289b17223SAlexander Motin } 188389b17223SAlexander Motin } 188489b17223SAlexander Motin /* Handle open volume destruction. */ 188589b17223SAlexander Motin if (vol->v_stopping && vol->v_provider_open == 0) 188689b17223SAlexander Motin g_raid_destroy_volume(vol); 188789b17223SAlexander Motin out: 188889b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 188989b17223SAlexander Motin g_topology_lock(); 189089b17223SAlexander Motin return (error); 189189b17223SAlexander Motin } 189289b17223SAlexander Motin 189389b17223SAlexander Motin struct g_raid_softc * 189489b17223SAlexander Motin g_raid_create_node(struct g_class *mp, 189589b17223SAlexander Motin const char *name, struct g_raid_md_object *md) 189689b17223SAlexander Motin { 189789b17223SAlexander Motin struct g_raid_softc *sc; 189889b17223SAlexander Motin struct g_geom *gp; 189989b17223SAlexander Motin int error; 190089b17223SAlexander Motin 190189b17223SAlexander Motin g_topology_assert(); 190289b17223SAlexander Motin G_RAID_DEBUG(1, "Creating array %s.", name); 190389b17223SAlexander Motin 190489b17223SAlexander Motin gp = g_new_geomf(mp, "%s", name); 190589b17223SAlexander Motin sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO); 190689b17223SAlexander Motin gp->start = g_raid_start; 190789b17223SAlexander Motin gp->orphan = g_raid_orphan; 190889b17223SAlexander Motin gp->access = g_raid_access; 190989b17223SAlexander Motin gp->dumpconf = g_raid_dumpconf; 191089b17223SAlexander Motin 191189b17223SAlexander Motin sc->sc_md = md; 191289b17223SAlexander Motin sc->sc_geom = gp; 191389b17223SAlexander Motin sc->sc_flags = 0; 191489b17223SAlexander Motin TAILQ_INIT(&sc->sc_volumes); 191589b17223SAlexander Motin TAILQ_INIT(&sc->sc_disks); 1916c9f545e5SAlexander Motin sx_init(&sc->sc_lock, "graid:lock"); 1917c9f545e5SAlexander Motin mtx_init(&sc->sc_queue_mtx, "graid:queue", NULL, MTX_DEF); 191889b17223SAlexander Motin TAILQ_INIT(&sc->sc_events); 191989b17223SAlexander Motin bioq_init(&sc->sc_queue); 192089b17223SAlexander Motin gp->softc = sc; 192189b17223SAlexander Motin error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0, 192289b17223SAlexander Motin "g_raid %s", name); 192389b17223SAlexander Motin if (error != 0) { 192489b17223SAlexander Motin G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name); 192589b17223SAlexander Motin mtx_destroy(&sc->sc_queue_mtx); 192689b17223SAlexander Motin sx_destroy(&sc->sc_lock); 192789b17223SAlexander Motin g_destroy_geom(sc->sc_geom); 192889b17223SAlexander Motin free(sc, M_RAID); 192989b17223SAlexander Motin return (NULL); 193089b17223SAlexander Motin } 193189b17223SAlexander Motin 193289b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Array %s created.", name); 193389b17223SAlexander Motin return (sc); 193489b17223SAlexander Motin } 193589b17223SAlexander Motin 193689b17223SAlexander Motin struct g_raid_volume * 193789b17223SAlexander Motin g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id) 193889b17223SAlexander Motin { 193989b17223SAlexander Motin struct g_raid_volume *vol, *vol1; 194089b17223SAlexander Motin int i; 194189b17223SAlexander Motin 194289b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Creating volume %s.", name); 194389b17223SAlexander Motin vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO); 194489b17223SAlexander Motin vol->v_softc = sc; 194589b17223SAlexander Motin strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME); 194689b17223SAlexander Motin vol->v_state = G_RAID_VOLUME_S_STARTING; 194789b17223SAlexander Motin vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN; 194889b17223SAlexander Motin vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN; 19498f12ca2eSAlexander Motin vol->v_rotate_parity = 1; 195089b17223SAlexander Motin bioq_init(&vol->v_inflight); 195189b17223SAlexander Motin bioq_init(&vol->v_locked); 195289b17223SAlexander Motin LIST_INIT(&vol->v_locks); 195389b17223SAlexander Motin for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) { 195489b17223SAlexander Motin vol->v_subdisks[i].sd_softc = sc; 195589b17223SAlexander Motin vol->v_subdisks[i].sd_volume = vol; 195689b17223SAlexander Motin vol->v_subdisks[i].sd_pos = i; 195789b17223SAlexander Motin vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE; 195889b17223SAlexander Motin } 195989b17223SAlexander Motin 196089b17223SAlexander Motin /* Find free ID for this volume. */ 196189b17223SAlexander Motin g_topology_lock(); 196289b17223SAlexander Motin vol1 = vol; 196389b17223SAlexander Motin if (id >= 0) { 196489b17223SAlexander Motin LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) { 196589b17223SAlexander Motin if (vol1->v_global_id == id) 196689b17223SAlexander Motin break; 196789b17223SAlexander Motin } 196889b17223SAlexander Motin } 196989b17223SAlexander Motin if (vol1 != NULL) { 197089b17223SAlexander Motin for (id = 0; ; id++) { 197189b17223SAlexander Motin LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) { 197289b17223SAlexander Motin if (vol1->v_global_id == id) 197389b17223SAlexander Motin break; 197489b17223SAlexander Motin } 197589b17223SAlexander Motin if (vol1 == NULL) 197689b17223SAlexander Motin break; 197789b17223SAlexander Motin } 197889b17223SAlexander Motin } 197989b17223SAlexander Motin vol->v_global_id = id; 198089b17223SAlexander Motin LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next); 198189b17223SAlexander Motin g_topology_unlock(); 198289b17223SAlexander Motin 198389b17223SAlexander Motin /* Delay root mounting. */ 198489b17223SAlexander Motin vol->v_rootmount = root_mount_hold("GRAID"); 198589b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount); 198689b17223SAlexander Motin vol->v_starting = 1; 198789b17223SAlexander Motin TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next); 198889b17223SAlexander Motin return (vol); 198989b17223SAlexander Motin } 199089b17223SAlexander Motin 199189b17223SAlexander Motin struct g_raid_disk * 199289b17223SAlexander Motin g_raid_create_disk(struct g_raid_softc *sc) 199389b17223SAlexander Motin { 199489b17223SAlexander Motin struct g_raid_disk *disk; 199589b17223SAlexander Motin 199689b17223SAlexander Motin G_RAID_DEBUG1(1, sc, "Creating disk."); 199789b17223SAlexander Motin disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO); 199889b17223SAlexander Motin disk->d_softc = sc; 199989b17223SAlexander Motin disk->d_state = G_RAID_DISK_S_NONE; 200089b17223SAlexander Motin TAILQ_INIT(&disk->d_subdisks); 200189b17223SAlexander Motin TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next); 200289b17223SAlexander Motin return (disk); 200389b17223SAlexander Motin } 200489b17223SAlexander Motin 200589b17223SAlexander Motin int g_raid_start_volume(struct g_raid_volume *vol) 200689b17223SAlexander Motin { 200789b17223SAlexander Motin struct g_raid_tr_class *class; 200889b17223SAlexander Motin struct g_raid_tr_object *obj; 200989b17223SAlexander Motin int status; 201089b17223SAlexander Motin 201189b17223SAlexander Motin G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name); 201289b17223SAlexander Motin LIST_FOREACH(class, &g_raid_tr_classes, trc_list) { 2013c89d2fbeSAlexander Motin if (!class->trc_enable) 2014c89d2fbeSAlexander Motin continue; 201589b17223SAlexander Motin G_RAID_DEBUG1(2, vol->v_softc, 201689b17223SAlexander Motin "Tasting volume %s for %s transformation.", 201789b17223SAlexander Motin vol->v_name, class->name); 201889b17223SAlexander Motin obj = (void *)kobj_create((kobj_class_t)class, M_RAID, 201989b17223SAlexander Motin M_WAITOK); 202089b17223SAlexander Motin obj->tro_class = class; 202189b17223SAlexander Motin obj->tro_volume = vol; 202289b17223SAlexander Motin status = G_RAID_TR_TASTE(obj, vol); 202389b17223SAlexander Motin if (status != G_RAID_TR_TASTE_FAIL) 202489b17223SAlexander Motin break; 202589b17223SAlexander Motin kobj_delete((kobj_t)obj, M_RAID); 202689b17223SAlexander Motin } 202789b17223SAlexander Motin if (class == NULL) { 202889b17223SAlexander Motin G_RAID_DEBUG1(0, vol->v_softc, 202989b17223SAlexander Motin "No transformation module found for %s.", 203089b17223SAlexander Motin vol->v_name); 203189b17223SAlexander Motin vol->v_tr = NULL; 203289b17223SAlexander Motin g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED); 203389b17223SAlexander Motin g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN, 203489b17223SAlexander Motin G_RAID_EVENT_VOLUME); 203589b17223SAlexander Motin return (-1); 203689b17223SAlexander Motin } 203789b17223SAlexander Motin G_RAID_DEBUG1(2, vol->v_softc, 203889b17223SAlexander Motin "Transformation module %s chosen for %s.", 203989b17223SAlexander Motin class->name, vol->v_name); 204089b17223SAlexander Motin vol->v_tr = obj; 204189b17223SAlexander Motin return (0); 204289b17223SAlexander Motin } 204389b17223SAlexander Motin 204489b17223SAlexander Motin int 204589b17223SAlexander Motin g_raid_destroy_node(struct g_raid_softc *sc, int worker) 204689b17223SAlexander Motin { 204789b17223SAlexander Motin struct g_raid_volume *vol, *tmpv; 204889b17223SAlexander Motin struct g_raid_disk *disk, *tmpd; 204989b17223SAlexander Motin int error = 0; 205089b17223SAlexander Motin 205189b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_HARD; 205289b17223SAlexander Motin TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) { 205389b17223SAlexander Motin if (g_raid_destroy_volume(vol)) 205489b17223SAlexander Motin error = EBUSY; 205589b17223SAlexander Motin } 205689b17223SAlexander Motin if (error) 205789b17223SAlexander Motin return (error); 205889b17223SAlexander Motin TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) { 205989b17223SAlexander Motin if (g_raid_destroy_disk(disk)) 206089b17223SAlexander Motin error = EBUSY; 206189b17223SAlexander Motin } 206289b17223SAlexander Motin if (error) 206389b17223SAlexander Motin return (error); 206489b17223SAlexander Motin if (sc->sc_md) { 206589b17223SAlexander Motin G_RAID_MD_FREE(sc->sc_md); 206689b17223SAlexander Motin kobj_delete((kobj_t)sc->sc_md, M_RAID); 206789b17223SAlexander Motin sc->sc_md = NULL; 206889b17223SAlexander Motin } 206989b17223SAlexander Motin if (sc->sc_geom != NULL) { 207089b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name); 207189b17223SAlexander Motin g_topology_lock(); 207289b17223SAlexander Motin sc->sc_geom->softc = NULL; 207389b17223SAlexander Motin g_wither_geom(sc->sc_geom, ENXIO); 207489b17223SAlexander Motin g_topology_unlock(); 207589b17223SAlexander Motin sc->sc_geom = NULL; 207689b17223SAlexander Motin } else 207789b17223SAlexander Motin G_RAID_DEBUG(1, "Array destroyed."); 207889b17223SAlexander Motin if (worker) { 207989b17223SAlexander Motin g_raid_event_cancel(sc, sc); 208089b17223SAlexander Motin mtx_destroy(&sc->sc_queue_mtx); 208189b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 208289b17223SAlexander Motin sx_destroy(&sc->sc_lock); 208389b17223SAlexander Motin wakeup(&sc->sc_stopping); 208489b17223SAlexander Motin free(sc, M_RAID); 208589b17223SAlexander Motin curthread->td_pflags &= ~TDP_GEOM; 208689b17223SAlexander Motin G_RAID_DEBUG(1, "Thread exiting."); 208789b17223SAlexander Motin kproc_exit(0); 208889b17223SAlexander Motin } else { 208989b17223SAlexander Motin /* Wake up worker to make it selfdestruct. */ 209089b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 209189b17223SAlexander Motin } 209289b17223SAlexander Motin return (0); 209389b17223SAlexander Motin } 209489b17223SAlexander Motin 209589b17223SAlexander Motin int 209689b17223SAlexander Motin g_raid_destroy_volume(struct g_raid_volume *vol) 209789b17223SAlexander Motin { 209889b17223SAlexander Motin struct g_raid_softc *sc; 209989b17223SAlexander Motin struct g_raid_disk *disk; 210089b17223SAlexander Motin int i; 210189b17223SAlexander Motin 210289b17223SAlexander Motin sc = vol->v_softc; 210389b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name); 210489b17223SAlexander Motin vol->v_stopping = 1; 210589b17223SAlexander Motin if (vol->v_state != G_RAID_VOLUME_S_STOPPED) { 210689b17223SAlexander Motin if (vol->v_tr) { 210789b17223SAlexander Motin G_RAID_TR_STOP(vol->v_tr); 210889b17223SAlexander Motin return (EBUSY); 210989b17223SAlexander Motin } else 211089b17223SAlexander Motin vol->v_state = G_RAID_VOLUME_S_STOPPED; 211189b17223SAlexander Motin } 211289b17223SAlexander Motin if (g_raid_event_check(sc, vol) != 0) 211389b17223SAlexander Motin return (EBUSY); 211489b17223SAlexander Motin if (vol->v_provider != NULL) 211589b17223SAlexander Motin return (EBUSY); 211689b17223SAlexander Motin if (vol->v_provider_open != 0) 211789b17223SAlexander Motin return (EBUSY); 211889b17223SAlexander Motin if (vol->v_tr) { 211989b17223SAlexander Motin G_RAID_TR_FREE(vol->v_tr); 212089b17223SAlexander Motin kobj_delete((kobj_t)vol->v_tr, M_RAID); 212189b17223SAlexander Motin vol->v_tr = NULL; 212289b17223SAlexander Motin } 212389b17223SAlexander Motin if (vol->v_rootmount) 212489b17223SAlexander Motin root_mount_rel(vol->v_rootmount); 212589b17223SAlexander Motin g_topology_lock(); 212689b17223SAlexander Motin LIST_REMOVE(vol, v_global_next); 212789b17223SAlexander Motin g_topology_unlock(); 212889b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_volumes, vol, v_next); 212989b17223SAlexander Motin for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) { 213089b17223SAlexander Motin g_raid_event_cancel(sc, &vol->v_subdisks[i]); 213189b17223SAlexander Motin disk = vol->v_subdisks[i].sd_disk; 213289b17223SAlexander Motin if (disk == NULL) 213389b17223SAlexander Motin continue; 213489b17223SAlexander Motin TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next); 213589b17223SAlexander Motin } 213689b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name); 213789b17223SAlexander Motin if (sc->sc_md) 213889b17223SAlexander Motin G_RAID_MD_FREE_VOLUME(sc->sc_md, vol); 213989b17223SAlexander Motin g_raid_event_cancel(sc, vol); 214089b17223SAlexander Motin free(vol, M_RAID); 214189b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_HARD) { 214289b17223SAlexander Motin /* Wake up worker to let it selfdestruct. */ 214389b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 214489b17223SAlexander Motin } 214589b17223SAlexander Motin return (0); 214689b17223SAlexander Motin } 214789b17223SAlexander Motin 214889b17223SAlexander Motin int 214989b17223SAlexander Motin g_raid_destroy_disk(struct g_raid_disk *disk) 215089b17223SAlexander Motin { 215189b17223SAlexander Motin struct g_raid_softc *sc; 215289b17223SAlexander Motin struct g_raid_subdisk *sd, *tmp; 215389b17223SAlexander Motin 215489b17223SAlexander Motin sc = disk->d_softc; 215589b17223SAlexander Motin G_RAID_DEBUG1(2, sc, "Destroying disk."); 215689b17223SAlexander Motin if (disk->d_consumer) { 215789b17223SAlexander Motin g_raid_kill_consumer(sc, disk->d_consumer); 215889b17223SAlexander Motin disk->d_consumer = NULL; 215989b17223SAlexander Motin } 216089b17223SAlexander Motin TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) { 216189b17223SAlexander Motin g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE); 216289b17223SAlexander Motin g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED, 216389b17223SAlexander Motin G_RAID_EVENT_SUBDISK); 216489b17223SAlexander Motin TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next); 216589b17223SAlexander Motin sd->sd_disk = NULL; 216689b17223SAlexander Motin } 216789b17223SAlexander Motin TAILQ_REMOVE(&sc->sc_disks, disk, d_next); 216889b17223SAlexander Motin if (sc->sc_md) 216989b17223SAlexander Motin G_RAID_MD_FREE_DISK(sc->sc_md, disk); 217089b17223SAlexander Motin g_raid_event_cancel(sc, disk); 217189b17223SAlexander Motin free(disk, M_RAID); 217289b17223SAlexander Motin return (0); 217389b17223SAlexander Motin } 217489b17223SAlexander Motin 217589b17223SAlexander Motin int 217689b17223SAlexander Motin g_raid_destroy(struct g_raid_softc *sc, int how) 217789b17223SAlexander Motin { 21788531bb3fSAlexander Motin int error, opens; 217989b17223SAlexander Motin 218089b17223SAlexander Motin g_topology_assert_not(); 218189b17223SAlexander Motin if (sc == NULL) 218289b17223SAlexander Motin return (ENXIO); 218389b17223SAlexander Motin sx_assert(&sc->sc_lock, SX_XLOCKED); 218489b17223SAlexander Motin 218589b17223SAlexander Motin /* Count open volumes. */ 218689b17223SAlexander Motin opens = g_raid_nopens(sc); 218789b17223SAlexander Motin 218889b17223SAlexander Motin /* React on some opened volumes. */ 218989b17223SAlexander Motin if (opens > 0) { 219089b17223SAlexander Motin switch (how) { 219189b17223SAlexander Motin case G_RAID_DESTROY_SOFT: 219289b17223SAlexander Motin G_RAID_DEBUG1(1, sc, 219389b17223SAlexander Motin "%d volumes are still open.", 219489b17223SAlexander Motin opens); 21958531bb3fSAlexander Motin sx_xunlock(&sc->sc_lock); 219689b17223SAlexander Motin return (EBUSY); 219789b17223SAlexander Motin case G_RAID_DESTROY_DELAYED: 219889b17223SAlexander Motin G_RAID_DEBUG1(1, sc, 219989b17223SAlexander Motin "Array will be destroyed on last close."); 220089b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_DELAYED; 22018531bb3fSAlexander Motin sx_xunlock(&sc->sc_lock); 220289b17223SAlexander Motin return (EBUSY); 220389b17223SAlexander Motin case G_RAID_DESTROY_HARD: 220489b17223SAlexander Motin G_RAID_DEBUG1(1, sc, 220589b17223SAlexander Motin "%d volumes are still open.", 220689b17223SAlexander Motin opens); 220789b17223SAlexander Motin } 220889b17223SAlexander Motin } 220989b17223SAlexander Motin 221089b17223SAlexander Motin /* Mark node for destruction. */ 221189b17223SAlexander Motin sc->sc_stopping = G_RAID_DESTROY_HARD; 221289b17223SAlexander Motin /* Wake up worker to let it selfdestruct. */ 221389b17223SAlexander Motin g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0); 221489b17223SAlexander Motin /* Sleep until node destroyed. */ 22158531bb3fSAlexander Motin error = sx_sleep(&sc->sc_stopping, &sc->sc_lock, 22168531bb3fSAlexander Motin PRIBIO | PDROP, "r:destroy", hz * 3); 22178531bb3fSAlexander Motin return (error == EWOULDBLOCK ? EBUSY : 0); 221889b17223SAlexander Motin } 221989b17223SAlexander Motin 222089b17223SAlexander Motin static void 222189b17223SAlexander Motin g_raid_taste_orphan(struct g_consumer *cp) 222289b17223SAlexander Motin { 222389b17223SAlexander Motin 222489b17223SAlexander Motin KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 222589b17223SAlexander Motin cp->provider->name)); 222689b17223SAlexander Motin } 222789b17223SAlexander Motin 222889b17223SAlexander Motin static struct g_geom * 222989b17223SAlexander Motin g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 223089b17223SAlexander Motin { 223189b17223SAlexander Motin struct g_consumer *cp; 223289b17223SAlexander Motin struct g_geom *gp, *geom; 223389b17223SAlexander Motin struct g_raid_md_class *class; 223489b17223SAlexander Motin struct g_raid_md_object *obj; 223589b17223SAlexander Motin int status; 223689b17223SAlexander Motin 223789b17223SAlexander Motin g_topology_assert(); 223889b17223SAlexander Motin g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 2239c89d2fbeSAlexander Motin if (!g_raid_enable) 2240c89d2fbeSAlexander Motin return (NULL); 224189b17223SAlexander Motin G_RAID_DEBUG(2, "Tasting provider %s.", pp->name); 224289b17223SAlexander Motin 2243dea1e226SAlexander Motin geom = NULL; 2244dea1e226SAlexander Motin status = G_RAID_MD_TASTE_FAIL; 22458df8e26aSAlexander Motin gp = g_new_geomf(mp, "raid:taste"); 224689b17223SAlexander Motin /* 224789b17223SAlexander Motin * This orphan function should be never called. 224889b17223SAlexander Motin */ 224989b17223SAlexander Motin gp->orphan = g_raid_taste_orphan; 225089b17223SAlexander Motin cp = g_new_consumer(gp); 225140ea77a0SAlexander Motin cp->flags |= G_CF_DIRECT_RECEIVE; 225289b17223SAlexander Motin g_attach(cp, pp); 2253dea1e226SAlexander Motin if (g_access(cp, 1, 0, 0) != 0) 2254dea1e226SAlexander Motin goto ofail; 225589b17223SAlexander Motin 225689b17223SAlexander Motin LIST_FOREACH(class, &g_raid_md_classes, mdc_list) { 2257c89d2fbeSAlexander Motin if (!class->mdc_enable) 2258c89d2fbeSAlexander Motin continue; 225989b17223SAlexander Motin G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.", 226089b17223SAlexander Motin pp->name, class->name); 226189b17223SAlexander Motin obj = (void *)kobj_create((kobj_class_t)class, M_RAID, 226289b17223SAlexander Motin M_WAITOK); 226389b17223SAlexander Motin obj->mdo_class = class; 226489b17223SAlexander Motin status = G_RAID_MD_TASTE(obj, mp, cp, &geom); 226589b17223SAlexander Motin if (status != G_RAID_MD_TASTE_NEW) 226689b17223SAlexander Motin kobj_delete((kobj_t)obj, M_RAID); 226789b17223SAlexander Motin if (status != G_RAID_MD_TASTE_FAIL) 226889b17223SAlexander Motin break; 226989b17223SAlexander Motin } 227089b17223SAlexander Motin 2271dea1e226SAlexander Motin if (status == G_RAID_MD_TASTE_FAIL) 2272dea1e226SAlexander Motin (void)g_access(cp, -1, 0, 0); 2273dea1e226SAlexander Motin ofail: 227489b17223SAlexander Motin g_detach(cp); 227589b17223SAlexander Motin g_destroy_consumer(cp); 227689b17223SAlexander Motin g_destroy_geom(gp); 227789b17223SAlexander Motin G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name); 227889b17223SAlexander Motin return (geom); 227989b17223SAlexander Motin } 228089b17223SAlexander Motin 228189b17223SAlexander Motin int 22828df8e26aSAlexander Motin g_raid_create_node_format(const char *format, struct gctl_req *req, 22838df8e26aSAlexander Motin struct g_geom **gp) 228489b17223SAlexander Motin { 228589b17223SAlexander Motin struct g_raid_md_class *class; 228689b17223SAlexander Motin struct g_raid_md_object *obj; 228789b17223SAlexander Motin int status; 228889b17223SAlexander Motin 228989b17223SAlexander Motin G_RAID_DEBUG(2, "Creating array for %s metadata.", format); 229089b17223SAlexander Motin LIST_FOREACH(class, &g_raid_md_classes, mdc_list) { 229189b17223SAlexander Motin if (strcasecmp(class->name, format) == 0) 229289b17223SAlexander Motin break; 229389b17223SAlexander Motin } 229489b17223SAlexander Motin if (class == NULL) { 229589b17223SAlexander Motin G_RAID_DEBUG(1, "No support for %s metadata.", format); 229689b17223SAlexander Motin return (G_RAID_MD_TASTE_FAIL); 229789b17223SAlexander Motin } 229889b17223SAlexander Motin obj = (void *)kobj_create((kobj_class_t)class, M_RAID, 229989b17223SAlexander Motin M_WAITOK); 230089b17223SAlexander Motin obj->mdo_class = class; 23018df8e26aSAlexander Motin status = G_RAID_MD_CREATE_REQ(obj, &g_raid_class, req, gp); 230289b17223SAlexander Motin if (status != G_RAID_MD_TASTE_NEW) 230389b17223SAlexander Motin kobj_delete((kobj_t)obj, M_RAID); 230489b17223SAlexander Motin return (status); 230589b17223SAlexander Motin } 230689b17223SAlexander Motin 230789b17223SAlexander Motin static int 230889b17223SAlexander Motin g_raid_destroy_geom(struct gctl_req *req __unused, 230989b17223SAlexander Motin struct g_class *mp __unused, struct g_geom *gp) 231089b17223SAlexander Motin { 231189b17223SAlexander Motin struct g_raid_softc *sc; 231289b17223SAlexander Motin int error; 231389b17223SAlexander Motin 231489b17223SAlexander Motin g_topology_unlock(); 231589b17223SAlexander Motin sc = gp->softc; 231689b17223SAlexander Motin sx_xlock(&sc->sc_lock); 231789b17223SAlexander Motin g_cancel_event(sc); 231889b17223SAlexander Motin error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT); 231989b17223SAlexander Motin g_topology_lock(); 232089b17223SAlexander Motin return (error); 232189b17223SAlexander Motin } 232289b17223SAlexander Motin 232389b17223SAlexander Motin void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol, 232489b17223SAlexander Motin struct g_raid_subdisk *sd, struct g_raid_disk *disk) 232589b17223SAlexander Motin { 232689b17223SAlexander Motin 232789b17223SAlexander Motin if (sc->sc_stopping == G_RAID_DESTROY_HARD) 232889b17223SAlexander Motin return; 232989b17223SAlexander Motin if (sc->sc_md) 233089b17223SAlexander Motin G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk); 233189b17223SAlexander Motin } 233289b17223SAlexander Motin 233389b17223SAlexander Motin void g_raid_fail_disk(struct g_raid_softc *sc, 233489b17223SAlexander Motin struct g_raid_subdisk *sd, struct g_raid_disk *disk) 233589b17223SAlexander Motin { 233689b17223SAlexander Motin 233789b17223SAlexander Motin if (disk == NULL) 233889b17223SAlexander Motin disk = sd->sd_disk; 233989b17223SAlexander Motin if (disk == NULL) { 234089b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!"); 234189b17223SAlexander Motin return; 234289b17223SAlexander Motin } 234389b17223SAlexander Motin if (disk->d_state != G_RAID_DISK_S_ACTIVE) { 234489b17223SAlexander Motin G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a " 234589b17223SAlexander Motin "wrong state (%s)!", g_raid_disk_state2str(disk->d_state)); 234689b17223SAlexander Motin return; 234789b17223SAlexander Motin } 234889b17223SAlexander Motin if (sc->sc_md) 234989b17223SAlexander Motin G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk); 235089b17223SAlexander Motin } 235189b17223SAlexander Motin 235289b17223SAlexander Motin static void 235389b17223SAlexander Motin g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 235489b17223SAlexander Motin struct g_consumer *cp, struct g_provider *pp) 235589b17223SAlexander Motin { 235689b17223SAlexander Motin struct g_raid_softc *sc; 235789b17223SAlexander Motin struct g_raid_volume *vol; 235889b17223SAlexander Motin struct g_raid_subdisk *sd; 235989b17223SAlexander Motin struct g_raid_disk *disk; 236089b17223SAlexander Motin int i, s; 236189b17223SAlexander Motin 236289b17223SAlexander Motin g_topology_assert(); 236389b17223SAlexander Motin 236489b17223SAlexander Motin sc = gp->softc; 236589b17223SAlexander Motin if (sc == NULL) 236689b17223SAlexander Motin return; 236789b17223SAlexander Motin if (pp != NULL) { 236889b17223SAlexander Motin vol = pp->private; 236989b17223SAlexander Motin g_topology_unlock(); 237089b17223SAlexander Motin sx_xlock(&sc->sc_lock); 2371bcb6ad36SAlexander Motin sbuf_printf(sb, "%s<descr>%s %s volume</descr>\n", indent, 2372bcb6ad36SAlexander Motin sc->sc_md->mdo_class->name, 2373bcb6ad36SAlexander Motin g_raid_volume_level2str(vol->v_raid_level, 2374bcb6ad36SAlexander Motin vol->v_raid_level_qualifier)); 237589b17223SAlexander Motin sbuf_printf(sb, "%s<Label>%s</Label>\n", indent, 237689b17223SAlexander Motin vol->v_name); 237789b17223SAlexander Motin sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent, 237889b17223SAlexander Motin g_raid_volume_level2str(vol->v_raid_level, 237989b17223SAlexander Motin vol->v_raid_level_qualifier)); 238089b17223SAlexander Motin sbuf_printf(sb, 238189b17223SAlexander Motin "%s<Transformation>%s</Transformation>\n", indent, 238289b17223SAlexander Motin vol->v_tr ? vol->v_tr->tro_class->name : "NONE"); 238389b17223SAlexander Motin sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 238489b17223SAlexander Motin vol->v_disks_count); 238589b17223SAlexander Motin sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent, 238689b17223SAlexander Motin vol->v_strip_size); 238789b17223SAlexander Motin sbuf_printf(sb, "%s<State>%s</State>\n", indent, 238889b17223SAlexander Motin g_raid_volume_state2str(vol->v_state)); 238989b17223SAlexander Motin sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent, 239089b17223SAlexander Motin vol->v_dirty ? "Yes" : "No"); 239189b17223SAlexander Motin sbuf_printf(sb, "%s<Subdisks>", indent); 239289b17223SAlexander Motin for (i = 0; i < vol->v_disks_count; i++) { 239389b17223SAlexander Motin sd = &vol->v_subdisks[i]; 239489b17223SAlexander Motin if (sd->sd_disk != NULL && 239589b17223SAlexander Motin sd->sd_disk->d_consumer != NULL) { 239689b17223SAlexander Motin sbuf_printf(sb, "%s ", 239789b17223SAlexander Motin g_raid_get_diskname(sd->sd_disk)); 239889b17223SAlexander Motin } else { 239989b17223SAlexander Motin sbuf_printf(sb, "NONE "); 240089b17223SAlexander Motin } 240189b17223SAlexander Motin sbuf_printf(sb, "(%s", 240289b17223SAlexander Motin g_raid_subdisk_state2str(sd->sd_state)); 240389b17223SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || 240489b17223SAlexander Motin sd->sd_state == G_RAID_SUBDISK_S_RESYNC) { 240589b17223SAlexander Motin sbuf_printf(sb, " %d%%", 240689b17223SAlexander Motin (int)(sd->sd_rebuild_pos * 100 / 240789b17223SAlexander Motin sd->sd_size)); 240889b17223SAlexander Motin } 240989b17223SAlexander Motin sbuf_printf(sb, ")"); 241089b17223SAlexander Motin if (i + 1 < vol->v_disks_count) 241189b17223SAlexander Motin sbuf_printf(sb, ", "); 241289b17223SAlexander Motin } 241389b17223SAlexander Motin sbuf_printf(sb, "</Subdisks>\n"); 241489b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 241589b17223SAlexander Motin g_topology_lock(); 241689b17223SAlexander Motin } else if (cp != NULL) { 241789b17223SAlexander Motin disk = cp->private; 241889b17223SAlexander Motin if (disk == NULL) 241989b17223SAlexander Motin return; 242089b17223SAlexander Motin g_topology_unlock(); 242189b17223SAlexander Motin sx_xlock(&sc->sc_lock); 242289b17223SAlexander Motin sbuf_printf(sb, "%s<State>%s", indent, 242389b17223SAlexander Motin g_raid_disk_state2str(disk->d_state)); 242489b17223SAlexander Motin if (!TAILQ_EMPTY(&disk->d_subdisks)) { 242589b17223SAlexander Motin sbuf_printf(sb, " ("); 242689b17223SAlexander Motin TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { 242789b17223SAlexander Motin sbuf_printf(sb, "%s", 242889b17223SAlexander Motin g_raid_subdisk_state2str(sd->sd_state)); 242989b17223SAlexander Motin if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD || 243089b17223SAlexander Motin sd->sd_state == G_RAID_SUBDISK_S_RESYNC) { 243189b17223SAlexander Motin sbuf_printf(sb, " %d%%", 243289b17223SAlexander Motin (int)(sd->sd_rebuild_pos * 100 / 243389b17223SAlexander Motin sd->sd_size)); 243489b17223SAlexander Motin } 243589b17223SAlexander Motin if (TAILQ_NEXT(sd, sd_next)) 243689b17223SAlexander Motin sbuf_printf(sb, ", "); 243789b17223SAlexander Motin } 243889b17223SAlexander Motin sbuf_printf(sb, ")"); 243989b17223SAlexander Motin } 244089b17223SAlexander Motin sbuf_printf(sb, "</State>\n"); 244189b17223SAlexander Motin sbuf_printf(sb, "%s<Subdisks>", indent); 244289b17223SAlexander Motin TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) { 244389b17223SAlexander Motin sbuf_printf(sb, "r%d(%s):%d@%ju", 244489b17223SAlexander Motin sd->sd_volume->v_global_id, 244589b17223SAlexander Motin sd->sd_volume->v_name, 244689b17223SAlexander Motin sd->sd_pos, sd->sd_offset); 244789b17223SAlexander Motin if (TAILQ_NEXT(sd, sd_next)) 244889b17223SAlexander Motin sbuf_printf(sb, ", "); 244989b17223SAlexander Motin } 245089b17223SAlexander Motin sbuf_printf(sb, "</Subdisks>\n"); 245189b17223SAlexander Motin sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent, 245289b17223SAlexander Motin disk->d_read_errs); 245389b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 245489b17223SAlexander Motin g_topology_lock(); 245589b17223SAlexander Motin } else { 245689b17223SAlexander Motin g_topology_unlock(); 245789b17223SAlexander Motin sx_xlock(&sc->sc_lock); 245889b17223SAlexander Motin if (sc->sc_md) { 245989b17223SAlexander Motin sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent, 246089b17223SAlexander Motin sc->sc_md->mdo_class->name); 246189b17223SAlexander Motin } 246289b17223SAlexander Motin if (!TAILQ_EMPTY(&sc->sc_volumes)) { 246389b17223SAlexander Motin s = 0xff; 246489b17223SAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) { 246589b17223SAlexander Motin if (vol->v_state < s) 246689b17223SAlexander Motin s = vol->v_state; 246789b17223SAlexander Motin } 246889b17223SAlexander Motin sbuf_printf(sb, "%s<State>%s</State>\n", indent, 246989b17223SAlexander Motin g_raid_volume_state2str(s)); 247089b17223SAlexander Motin } 247189b17223SAlexander Motin sx_xunlock(&sc->sc_lock); 247289b17223SAlexander Motin g_topology_lock(); 247389b17223SAlexander Motin } 247489b17223SAlexander Motin } 247589b17223SAlexander Motin 247689b17223SAlexander Motin static void 2477a479c51bSAlexander Motin g_raid_shutdown_post_sync(void *arg, int howto) 247889b17223SAlexander Motin { 247989b17223SAlexander Motin struct g_class *mp; 248089b17223SAlexander Motin struct g_geom *gp, *gp2; 248189b17223SAlexander Motin struct g_raid_softc *sc; 2482a479c51bSAlexander Motin struct g_raid_volume *vol; 248389b17223SAlexander Motin 248489b17223SAlexander Motin mp = arg; 248589b17223SAlexander Motin DROP_GIANT(); 248689b17223SAlexander Motin g_topology_lock(); 2487a479c51bSAlexander Motin g_raid_shutdown = 1; 248889b17223SAlexander Motin LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 248989b17223SAlexander Motin if ((sc = gp->softc) == NULL) 249089b17223SAlexander Motin continue; 249189b17223SAlexander Motin g_topology_unlock(); 249289b17223SAlexander Motin sx_xlock(&sc->sc_lock); 2493a479c51bSAlexander Motin TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) 2494a479c51bSAlexander Motin g_raid_clean(vol, -1); 249589b17223SAlexander Motin g_cancel_event(sc); 24968531bb3fSAlexander Motin g_raid_destroy(sc, G_RAID_DESTROY_DELAYED); 249789b17223SAlexander Motin g_topology_lock(); 249889b17223SAlexander Motin } 249989b17223SAlexander Motin g_topology_unlock(); 250089b17223SAlexander Motin PICKUP_GIANT(); 250189b17223SAlexander Motin } 250289b17223SAlexander Motin 250389b17223SAlexander Motin static void 250489b17223SAlexander Motin g_raid_init(struct g_class *mp) 250589b17223SAlexander Motin { 250689b17223SAlexander Motin 2507a479c51bSAlexander Motin g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 2508a479c51bSAlexander Motin g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 2509a479c51bSAlexander Motin if (g_raid_post_sync == NULL) 251089b17223SAlexander Motin G_RAID_DEBUG(0, "Warning! Cannot register shutdown event."); 251189b17223SAlexander Motin g_raid_started = 1; 251289b17223SAlexander Motin } 251389b17223SAlexander Motin 251489b17223SAlexander Motin static void 251589b17223SAlexander Motin g_raid_fini(struct g_class *mp) 251689b17223SAlexander Motin { 251789b17223SAlexander Motin 2518a479c51bSAlexander Motin if (g_raid_post_sync != NULL) 2519a479c51bSAlexander Motin EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid_post_sync); 252089b17223SAlexander Motin g_raid_started = 0; 252189b17223SAlexander Motin } 252289b17223SAlexander Motin 252389b17223SAlexander Motin int 252489b17223SAlexander Motin g_raid_md_modevent(module_t mod, int type, void *arg) 252589b17223SAlexander Motin { 252689b17223SAlexander Motin struct g_raid_md_class *class, *c, *nc; 252789b17223SAlexander Motin int error; 252889b17223SAlexander Motin 252989b17223SAlexander Motin error = 0; 253089b17223SAlexander Motin class = arg; 253189b17223SAlexander Motin switch (type) { 253289b17223SAlexander Motin case MOD_LOAD: 253389b17223SAlexander Motin c = LIST_FIRST(&g_raid_md_classes); 253489b17223SAlexander Motin if (c == NULL || c->mdc_priority > class->mdc_priority) 253589b17223SAlexander Motin LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list); 253689b17223SAlexander Motin else { 253789b17223SAlexander Motin while ((nc = LIST_NEXT(c, mdc_list)) != NULL && 253889b17223SAlexander Motin nc->mdc_priority < class->mdc_priority) 253989b17223SAlexander Motin c = nc; 254089b17223SAlexander Motin LIST_INSERT_AFTER(c, class, mdc_list); 254189b17223SAlexander Motin } 254289b17223SAlexander Motin if (g_raid_started) 254389b17223SAlexander Motin g_retaste(&g_raid_class); 254489b17223SAlexander Motin break; 254589b17223SAlexander Motin case MOD_UNLOAD: 254689b17223SAlexander Motin LIST_REMOVE(class, mdc_list); 254789b17223SAlexander Motin break; 254889b17223SAlexander Motin default: 254989b17223SAlexander Motin error = EOPNOTSUPP; 255089b17223SAlexander Motin break; 255189b17223SAlexander Motin } 255289b17223SAlexander Motin 255389b17223SAlexander Motin return (error); 255489b17223SAlexander Motin } 255589b17223SAlexander Motin 255689b17223SAlexander Motin int 255789b17223SAlexander Motin g_raid_tr_modevent(module_t mod, int type, void *arg) 255889b17223SAlexander Motin { 255989b17223SAlexander Motin struct g_raid_tr_class *class, *c, *nc; 256089b17223SAlexander Motin int error; 256189b17223SAlexander Motin 256289b17223SAlexander Motin error = 0; 256389b17223SAlexander Motin class = arg; 256489b17223SAlexander Motin switch (type) { 256589b17223SAlexander Motin case MOD_LOAD: 256689b17223SAlexander Motin c = LIST_FIRST(&g_raid_tr_classes); 256789b17223SAlexander Motin if (c == NULL || c->trc_priority > class->trc_priority) 256889b17223SAlexander Motin LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list); 256989b17223SAlexander Motin else { 257089b17223SAlexander Motin while ((nc = LIST_NEXT(c, trc_list)) != NULL && 257189b17223SAlexander Motin nc->trc_priority < class->trc_priority) 257289b17223SAlexander Motin c = nc; 257389b17223SAlexander Motin LIST_INSERT_AFTER(c, class, trc_list); 257489b17223SAlexander Motin } 257589b17223SAlexander Motin break; 257689b17223SAlexander Motin case MOD_UNLOAD: 257789b17223SAlexander Motin LIST_REMOVE(class, trc_list); 257889b17223SAlexander Motin break; 257989b17223SAlexander Motin default: 258089b17223SAlexander Motin error = EOPNOTSUPP; 258189b17223SAlexander Motin break; 258289b17223SAlexander Motin } 258389b17223SAlexander Motin 258489b17223SAlexander Motin return (error); 258589b17223SAlexander Motin } 258689b17223SAlexander Motin 258789b17223SAlexander Motin /* 258889b17223SAlexander Motin * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid) 258989b17223SAlexander Motin * to reduce module priority, allowing submodules to register them first. 259089b17223SAlexander Motin */ 259189b17223SAlexander Motin static moduledata_t g_raid_mod = { 259289b17223SAlexander Motin "g_raid", 259389b17223SAlexander Motin g_modevent, 259489b17223SAlexander Motin &g_raid_class 259589b17223SAlexander Motin }; 259689b17223SAlexander Motin DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD); 259789b17223SAlexander Motin MODULE_VERSION(geom_raid, 0); 2598