xref: /freebsd/sys/geom/raid/g_raid.c (revision 4eb861d3)
189b17223SAlexander Motin /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
33728855aSPedro F. Giffuni  *
489b17223SAlexander Motin  * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
589b17223SAlexander Motin  * All rights reserved.
689b17223SAlexander Motin  *
789b17223SAlexander Motin  * Redistribution and use in source and binary forms, with or without
889b17223SAlexander Motin  * modification, are permitted provided that the following conditions
989b17223SAlexander Motin  * are met:
1089b17223SAlexander Motin  * 1. Redistributions of source code must retain the above copyright
1189b17223SAlexander Motin  *    notice, this list of conditions and the following disclaimer.
1289b17223SAlexander Motin  * 2. Redistributions in binary form must reproduce the above copyright
1389b17223SAlexander Motin  *    notice, this list of conditions and the following disclaimer in the
1489b17223SAlexander Motin  *    documentation and/or other materials provided with the distribution.
1589b17223SAlexander Motin  *
1689b17223SAlexander Motin  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
1789b17223SAlexander Motin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1889b17223SAlexander Motin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1989b17223SAlexander Motin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
2089b17223SAlexander Motin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2189b17223SAlexander Motin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2289b17223SAlexander Motin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2389b17223SAlexander Motin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2489b17223SAlexander Motin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2589b17223SAlexander Motin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2689b17223SAlexander Motin  * SUCH DAMAGE.
2789b17223SAlexander Motin  */
2889b17223SAlexander Motin 
2989b17223SAlexander Motin #include <sys/param.h>
3089b17223SAlexander Motin #include <sys/systm.h>
31f3dc1727SMitchell Horne #include <sys/bio.h>
32f3dc1727SMitchell Horne #include <sys/eventhandler.h>
3389b17223SAlexander Motin #include <sys/kernel.h>
34f3dc1727SMitchell Horne #include <sys/kthread.h>
3589b17223SAlexander Motin #include <sys/limits.h>
3689b17223SAlexander Motin #include <sys/lock.h>
3789b17223SAlexander Motin #include <sys/malloc.h>
38f3dc1727SMitchell Horne #include <sys/module.h>
39f3dc1727SMitchell Horne #include <sys/mutex.h>
40f3dc1727SMitchell Horne #include <sys/proc.h>
414eb861d3SMitchell Horne #include <sys/reboot.h>
42f3dc1727SMitchell Horne #include <sys/sbuf.h>
43f3dc1727SMitchell Horne #include <sys/sched.h>
44f3dc1727SMitchell Horne #include <sys/sysctl.h>
45f3dc1727SMitchell Horne 
4689b17223SAlexander Motin #include <vm/uma.h>
47f3dc1727SMitchell Horne 
4889b17223SAlexander Motin #include <geom/geom.h>
49ac03832eSConrad Meyer #include <geom/geom_dbg.h>
5089b17223SAlexander Motin #include <geom/raid/g_raid.h>
5189b17223SAlexander Motin #include "g_raid_md_if.h"
5289b17223SAlexander Motin #include "g_raid_tr_if.h"
5389b17223SAlexander Motin 
5489b17223SAlexander Motin static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data");
5589b17223SAlexander Motin 
5689b17223SAlexander Motin SYSCTL_DECL(_kern_geom);
577029da5cSPawel Biernacki SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
587029da5cSPawel Biernacki     "GEOM_RAID stuff");
59c89d2fbeSAlexander Motin int g_raid_enable = 1;
60af3b2549SHans Petter Selasky SYSCTL_INT(_kern_geom_raid, OID_AUTO, enable, CTLFLAG_RWTUN,
61c89d2fbeSAlexander Motin     &g_raid_enable, 0, "Enable on-disk metadata taste");
6289b17223SAlexander Motin u_int g_raid_aggressive_spare = 0;
63af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RWTUN,
6489b17223SAlexander Motin     &g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
65fe51d6c1SAlexander Motin u_int g_raid_debug = 0;
66af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid_debug, 0,
6789b17223SAlexander Motin     "Debug level");
6889b17223SAlexander Motin int g_raid_read_err_thresh = 10;
69af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RWTUN,
7089b17223SAlexander Motin     &g_raid_read_err_thresh, 0,
7189b17223SAlexander Motin     "Number of read errors equated to disk failure");
7289b17223SAlexander Motin u_int g_raid_start_timeout = 30;
73af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RWTUN,
7489b17223SAlexander Motin     &g_raid_start_timeout, 0,
7589b17223SAlexander Motin     "Time to wait for all array components");
7689b17223SAlexander Motin static u_int g_raid_clean_time = 5;
77af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RWTUN,
7889b17223SAlexander Motin     &g_raid_clean_time, 0, "Mark volume as clean when idling");
7989b17223SAlexander Motin static u_int g_raid_disconnect_on_failure = 1;
80af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
8189b17223SAlexander Motin     &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
8289b17223SAlexander Motin static u_int g_raid_name_format = 0;
83af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RWTUN,
8489b17223SAlexander Motin     &g_raid_name_format, 0, "Providers name format.");
8589b17223SAlexander Motin static u_int g_raid_idle_threshold = 1000000;
86af3b2549SHans Petter Selasky SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RWTUN,
8789b17223SAlexander Motin     &g_raid_idle_threshold, 1000000,
8889b17223SAlexander Motin     "Time in microseconds to consider a volume idle.");
8989b17223SAlexander Motin 
9089b17223SAlexander Motin #define	MSLEEP(rv, ident, mtx, priority, wmesg, timeout)	do {	\
9189b17223SAlexander Motin 	G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));		\
9289b17223SAlexander Motin 	rv = msleep((ident), (mtx), (priority), (wmesg), (timeout));	\
9389b17223SAlexander Motin 	G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident));		\
9489b17223SAlexander Motin } while (0)
9589b17223SAlexander Motin 
9689b17223SAlexander Motin LIST_HEAD(, g_raid_md_class) g_raid_md_classes =
9789b17223SAlexander Motin     LIST_HEAD_INITIALIZER(g_raid_md_classes);
9889b17223SAlexander Motin 
9989b17223SAlexander Motin LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes =
10089b17223SAlexander Motin     LIST_HEAD_INITIALIZER(g_raid_tr_classes);
10189b17223SAlexander Motin 
10289b17223SAlexander Motin LIST_HEAD(, g_raid_volume) g_raid_volumes =
10389b17223SAlexander Motin     LIST_HEAD_INITIALIZER(g_raid_volumes);
10489b17223SAlexander Motin 
105a479c51bSAlexander Motin static eventhandler_tag g_raid_post_sync = NULL;
10689b17223SAlexander Motin static int g_raid_started = 0;
107a479c51bSAlexander Motin static int g_raid_shutdown = 0;
10889b17223SAlexander Motin 
10989b17223SAlexander Motin static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
11089b17223SAlexander Motin     struct g_geom *gp);
11189b17223SAlexander Motin static g_taste_t g_raid_taste;
11289b17223SAlexander Motin static void g_raid_init(struct g_class *mp);
11389b17223SAlexander Motin static void g_raid_fini(struct g_class *mp);
11489b17223SAlexander Motin 
11589b17223SAlexander Motin struct g_class g_raid_class = {
11689b17223SAlexander Motin 	.name = G_RAID_CLASS_NAME,
11789b17223SAlexander Motin 	.version = G_VERSION,
11889b17223SAlexander Motin 	.ctlreq = g_raid_ctl,
11989b17223SAlexander Motin 	.taste = g_raid_taste,
12089b17223SAlexander Motin 	.destroy_geom = g_raid_destroy_geom,
12189b17223SAlexander Motin 	.init = g_raid_init,
12289b17223SAlexander Motin 	.fini = g_raid_fini
12389b17223SAlexander Motin };
12489b17223SAlexander Motin 
12589b17223SAlexander Motin static void g_raid_destroy_provider(struct g_raid_volume *vol);
12689b17223SAlexander Motin static int g_raid_update_disk(struct g_raid_disk *disk, u_int event);
12789b17223SAlexander Motin static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event);
12889b17223SAlexander Motin static int g_raid_update_volume(struct g_raid_volume *vol, u_int event);
12989b17223SAlexander Motin static int g_raid_update_node(struct g_raid_softc *sc, u_int event);
13089b17223SAlexander Motin static void g_raid_dumpconf(struct sbuf *sb, const char *indent,
13189b17223SAlexander Motin     struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
13289b17223SAlexander Motin static void g_raid_start(struct bio *bp);
13389b17223SAlexander Motin static void g_raid_start_request(struct bio *bp);
13489b17223SAlexander Motin static void g_raid_disk_done(struct bio *bp);
13589b17223SAlexander Motin static void g_raid_poll(struct g_raid_softc *sc);
13689b17223SAlexander Motin 
13789b17223SAlexander Motin static const char *
g_raid_node_event2str(int event)13889b17223SAlexander Motin g_raid_node_event2str(int event)
13989b17223SAlexander Motin {
14089b17223SAlexander Motin 
14189b17223SAlexander Motin 	switch (event) {
14289b17223SAlexander Motin 	case G_RAID_NODE_E_WAKE:
14389b17223SAlexander Motin 		return ("WAKE");
14489b17223SAlexander Motin 	case G_RAID_NODE_E_START:
14589b17223SAlexander Motin 		return ("START");
14689b17223SAlexander Motin 	default:
14789b17223SAlexander Motin 		return ("INVALID");
14889b17223SAlexander Motin 	}
14989b17223SAlexander Motin }
15089b17223SAlexander Motin 
15189b17223SAlexander Motin const char *
g_raid_disk_state2str(int state)15289b17223SAlexander Motin g_raid_disk_state2str(int state)
15389b17223SAlexander Motin {
15489b17223SAlexander Motin 
15589b17223SAlexander Motin 	switch (state) {
15689b17223SAlexander Motin 	case G_RAID_DISK_S_NONE:
15789b17223SAlexander Motin 		return ("NONE");
15889b17223SAlexander Motin 	case G_RAID_DISK_S_OFFLINE:
15989b17223SAlexander Motin 		return ("OFFLINE");
16026c538bcSAlexander Motin 	case G_RAID_DISK_S_DISABLED:
16126c538bcSAlexander Motin 		return ("DISABLED");
16289b17223SAlexander Motin 	case G_RAID_DISK_S_FAILED:
16389b17223SAlexander Motin 		return ("FAILED");
16489b17223SAlexander Motin 	case G_RAID_DISK_S_STALE_FAILED:
16589b17223SAlexander Motin 		return ("STALE_FAILED");
16689b17223SAlexander Motin 	case G_RAID_DISK_S_SPARE:
16789b17223SAlexander Motin 		return ("SPARE");
16889b17223SAlexander Motin 	case G_RAID_DISK_S_STALE:
16989b17223SAlexander Motin 		return ("STALE");
17089b17223SAlexander Motin 	case G_RAID_DISK_S_ACTIVE:
17189b17223SAlexander Motin 		return ("ACTIVE");
17289b17223SAlexander Motin 	default:
17389b17223SAlexander Motin 		return ("INVALID");
17489b17223SAlexander Motin 	}
17589b17223SAlexander Motin }
17689b17223SAlexander Motin 
17789b17223SAlexander Motin static const char *
g_raid_disk_event2str(int event)17889b17223SAlexander Motin g_raid_disk_event2str(int event)
17989b17223SAlexander Motin {
18089b17223SAlexander Motin 
18189b17223SAlexander Motin 	switch (event) {
18289b17223SAlexander Motin 	case G_RAID_DISK_E_DISCONNECTED:
18389b17223SAlexander Motin 		return ("DISCONNECTED");
18489b17223SAlexander Motin 	default:
18589b17223SAlexander Motin 		return ("INVALID");
18689b17223SAlexander Motin 	}
18789b17223SAlexander Motin }
18889b17223SAlexander Motin 
18989b17223SAlexander Motin const char *
g_raid_subdisk_state2str(int state)19089b17223SAlexander Motin g_raid_subdisk_state2str(int state)
19189b17223SAlexander Motin {
19289b17223SAlexander Motin 
19389b17223SAlexander Motin 	switch (state) {
19489b17223SAlexander Motin 	case G_RAID_SUBDISK_S_NONE:
19589b17223SAlexander Motin 		return ("NONE");
19689b17223SAlexander Motin 	case G_RAID_SUBDISK_S_FAILED:
19789b17223SAlexander Motin 		return ("FAILED");
19889b17223SAlexander Motin 	case G_RAID_SUBDISK_S_NEW:
19989b17223SAlexander Motin 		return ("NEW");
20089b17223SAlexander Motin 	case G_RAID_SUBDISK_S_REBUILD:
20189b17223SAlexander Motin 		return ("REBUILD");
20289b17223SAlexander Motin 	case G_RAID_SUBDISK_S_UNINITIALIZED:
20389b17223SAlexander Motin 		return ("UNINITIALIZED");
20489b17223SAlexander Motin 	case G_RAID_SUBDISK_S_STALE:
20589b17223SAlexander Motin 		return ("STALE");
20689b17223SAlexander Motin 	case G_RAID_SUBDISK_S_RESYNC:
20789b17223SAlexander Motin 		return ("RESYNC");
20889b17223SAlexander Motin 	case G_RAID_SUBDISK_S_ACTIVE:
20989b17223SAlexander Motin 		return ("ACTIVE");
21089b17223SAlexander Motin 	default:
21189b17223SAlexander Motin 		return ("INVALID");
21289b17223SAlexander Motin 	}
21389b17223SAlexander Motin }
21489b17223SAlexander Motin 
21589b17223SAlexander Motin static const char *
g_raid_subdisk_event2str(int event)21689b17223SAlexander Motin g_raid_subdisk_event2str(int event)
21789b17223SAlexander Motin {
21889b17223SAlexander Motin 
21989b17223SAlexander Motin 	switch (event) {
22089b17223SAlexander Motin 	case G_RAID_SUBDISK_E_NEW:
22189b17223SAlexander Motin 		return ("NEW");
222d9d68496SAlexander Motin 	case G_RAID_SUBDISK_E_FAILED:
223d9d68496SAlexander Motin 		return ("FAILED");
22489b17223SAlexander Motin 	case G_RAID_SUBDISK_E_DISCONNECTED:
22589b17223SAlexander Motin 		return ("DISCONNECTED");
22689b17223SAlexander Motin 	default:
22789b17223SAlexander Motin 		return ("INVALID");
22889b17223SAlexander Motin 	}
22989b17223SAlexander Motin }
23089b17223SAlexander Motin 
23189b17223SAlexander Motin const char *
g_raid_volume_state2str(int state)23289b17223SAlexander Motin g_raid_volume_state2str(int state)
23389b17223SAlexander Motin {
23489b17223SAlexander Motin 
23589b17223SAlexander Motin 	switch (state) {
23689b17223SAlexander Motin 	case G_RAID_VOLUME_S_STARTING:
23789b17223SAlexander Motin 		return ("STARTING");
23889b17223SAlexander Motin 	case G_RAID_VOLUME_S_BROKEN:
23989b17223SAlexander Motin 		return ("BROKEN");
24089b17223SAlexander Motin 	case G_RAID_VOLUME_S_DEGRADED:
24189b17223SAlexander Motin 		return ("DEGRADED");
24289b17223SAlexander Motin 	case G_RAID_VOLUME_S_SUBOPTIMAL:
24389b17223SAlexander Motin 		return ("SUBOPTIMAL");
24489b17223SAlexander Motin 	case G_RAID_VOLUME_S_OPTIMAL:
24589b17223SAlexander Motin 		return ("OPTIMAL");
24689b17223SAlexander Motin 	case G_RAID_VOLUME_S_UNSUPPORTED:
24789b17223SAlexander Motin 		return ("UNSUPPORTED");
24889b17223SAlexander Motin 	case G_RAID_VOLUME_S_STOPPED:
24989b17223SAlexander Motin 		return ("STOPPED");
25089b17223SAlexander Motin 	default:
25189b17223SAlexander Motin 		return ("INVALID");
25289b17223SAlexander Motin 	}
25389b17223SAlexander Motin }
25489b17223SAlexander Motin 
25589b17223SAlexander Motin static const char *
g_raid_volume_event2str(int event)25689b17223SAlexander Motin g_raid_volume_event2str(int event)
25789b17223SAlexander Motin {
25889b17223SAlexander Motin 
25989b17223SAlexander Motin 	switch (event) {
26089b17223SAlexander Motin 	case G_RAID_VOLUME_E_UP:
26189b17223SAlexander Motin 		return ("UP");
26289b17223SAlexander Motin 	case G_RAID_VOLUME_E_DOWN:
26389b17223SAlexander Motin 		return ("DOWN");
26489b17223SAlexander Motin 	case G_RAID_VOLUME_E_START:
26589b17223SAlexander Motin 		return ("START");
26689b17223SAlexander Motin 	case G_RAID_VOLUME_E_STARTMD:
26789b17223SAlexander Motin 		return ("STARTMD");
26889b17223SAlexander Motin 	default:
26989b17223SAlexander Motin 		return ("INVALID");
27089b17223SAlexander Motin 	}
27189b17223SAlexander Motin }
27289b17223SAlexander Motin 
27389b17223SAlexander Motin const char *
g_raid_volume_level2str(int level,int qual)27489b17223SAlexander Motin g_raid_volume_level2str(int level, int qual)
27589b17223SAlexander Motin {
27689b17223SAlexander Motin 
27789b17223SAlexander Motin 	switch (level) {
27889b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID0:
27989b17223SAlexander Motin 		return ("RAID0");
28089b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID1:
28189b17223SAlexander Motin 		return ("RAID1");
28289b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID3:
283dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R3P0)
284dbb2e755SAlexander Motin 			return ("RAID3-P0");
285dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R3PN)
286dbb2e755SAlexander Motin 			return ("RAID3-PN");
28789b17223SAlexander Motin 		return ("RAID3");
28889b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID4:
289dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R4P0)
290bafd0b5bSAlexander Motin 			return ("RAID4-P0");
291dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R4PN)
292bafd0b5bSAlexander Motin 			return ("RAID4-PN");
29389b17223SAlexander Motin 		return ("RAID4");
29489b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID5:
295fc1de960SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5RA)
296dbb2e755SAlexander Motin 			return ("RAID5-RA");
297fc1de960SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5RS)
298dbb2e755SAlexander Motin 			return ("RAID5-RS");
299fc1de960SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5LA)
300dbb2e755SAlexander Motin 			return ("RAID5-LA");
301fc1de960SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5LS)
302dbb2e755SAlexander Motin 			return ("RAID5-LS");
30389b17223SAlexander Motin 		return ("RAID5");
30489b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID6:
305dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R6RA)
306dbb2e755SAlexander Motin 			return ("RAID6-RA");
307dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R6RS)
308dbb2e755SAlexander Motin 			return ("RAID6-RS");
309dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R6LA)
310dbb2e755SAlexander Motin 			return ("RAID6-LA");
311dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R6LS)
312dbb2e755SAlexander Motin 			return ("RAID6-LS");
31389b17223SAlexander Motin 		return ("RAID6");
314dbb2e755SAlexander Motin 	case G_RAID_VOLUME_RL_RAIDMDF:
315dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_RMDFRA)
316dbb2e755SAlexander Motin 			return ("RAIDMDF-RA");
317dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_RMDFRS)
318dbb2e755SAlexander Motin 			return ("RAIDMDF-RS");
319dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_RMDFLA)
320dbb2e755SAlexander Motin 			return ("RAIDMDF-LA");
321dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_RMDFLS)
322dbb2e755SAlexander Motin 			return ("RAIDMDF-LS");
323dbb2e755SAlexander Motin 		return ("RAIDMDF");
32489b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID1E:
325dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R1EA)
326dbb2e755SAlexander Motin 			return ("RAID1E-A");
327dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R1EO)
328dbb2e755SAlexander Motin 			return ("RAID1E-O");
32989b17223SAlexander Motin 		return ("RAID1E");
33089b17223SAlexander Motin 	case G_RAID_VOLUME_RL_SINGLE:
33189b17223SAlexander Motin 		return ("SINGLE");
33289b17223SAlexander Motin 	case G_RAID_VOLUME_RL_CONCAT:
33389b17223SAlexander Motin 		return ("CONCAT");
33489b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID5E:
335dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5ERA)
336dbb2e755SAlexander Motin 			return ("RAID5E-RA");
337dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5ERS)
338dbb2e755SAlexander Motin 			return ("RAID5E-RS");
339dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5ELA)
340dbb2e755SAlexander Motin 			return ("RAID5E-LA");
341dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5ELS)
342dbb2e755SAlexander Motin 			return ("RAID5E-LS");
34389b17223SAlexander Motin 		return ("RAID5E");
34489b17223SAlexander Motin 	case G_RAID_VOLUME_RL_RAID5EE:
345dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5EERA)
346dbb2e755SAlexander Motin 			return ("RAID5EE-RA");
347dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5EERS)
348dbb2e755SAlexander Motin 			return ("RAID5EE-RS");
349dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5EELA)
350dbb2e755SAlexander Motin 			return ("RAID5EE-LA");
351dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5EELS)
352dbb2e755SAlexander Motin 			return ("RAID5EE-LS");
35389b17223SAlexander Motin 		return ("RAID5EE");
354dbb2e755SAlexander Motin 	case G_RAID_VOLUME_RL_RAID5R:
355dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5RRA)
356dbb2e755SAlexander Motin 			return ("RAID5R-RA");
357dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5RRS)
358dbb2e755SAlexander Motin 			return ("RAID5R-RS");
359dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5RLA)
360dbb2e755SAlexander Motin 			return ("RAID5R-LA");
361dbb2e755SAlexander Motin 		if (qual == G_RAID_VOLUME_RLQ_R5RLS)
362dbb2e755SAlexander Motin 			return ("RAID5R-LS");
363dbb2e755SAlexander Motin 		return ("RAID5E");
36489b17223SAlexander Motin 	default:
36589b17223SAlexander Motin 		return ("UNKNOWN");
36689b17223SAlexander Motin 	}
36789b17223SAlexander Motin }
36889b17223SAlexander Motin 
36989b17223SAlexander Motin int
g_raid_volume_str2level(const char * str,int * level,int * qual)37089b17223SAlexander Motin g_raid_volume_str2level(const char *str, int *level, int *qual)
37189b17223SAlexander Motin {
37289b17223SAlexander Motin 
37389b17223SAlexander Motin 	*level = G_RAID_VOLUME_RL_UNKNOWN;
37489b17223SAlexander Motin 	*qual = G_RAID_VOLUME_RLQ_NONE;
37589b17223SAlexander Motin 	if (strcasecmp(str, "RAID0") == 0)
37689b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID0;
37789b17223SAlexander Motin 	else if (strcasecmp(str, "RAID1") == 0)
37889b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID1;
379dbb2e755SAlexander Motin 	else if (strcasecmp(str, "RAID3-P0") == 0) {
38089b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID3;
381dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R3P0;
3824b97ff61SAlexander Motin 	} else if (strcasecmp(str, "RAID3-PN") == 0 ||
383dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID3") == 0) {
384dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID3;
3854b97ff61SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R3PN;
386dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID4-P0") == 0) {
38789b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID4;
388dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R4P0;
3894b97ff61SAlexander Motin 	} else if (strcasecmp(str, "RAID4-PN") == 0 ||
390dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID4") == 0) {
391dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID4;
3924b97ff61SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R4PN;
393dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5-RA") == 0) {
39489b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5;
395fc1de960SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5RA;
396dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5-RS") == 0) {
397fc1de960SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5;
398fc1de960SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5RS;
399fc1de960SAlexander Motin 	} else if (strcasecmp(str, "RAID5") == 0 ||
400dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID5-LA") == 0) {
401fc1de960SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5;
402fc1de960SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5LA;
403dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5-LS") == 0) {
404fc1de960SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5;
405fc1de960SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5LS;
406dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID6-RA") == 0) {
40789b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID6;
408dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R6RA;
409dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID6-RS") == 0) {
410dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID6;
411dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R6RS;
412dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID6") == 0 ||
413dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID6-LA") == 0) {
414dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID6;
415dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R6LA;
416dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID6-LS") == 0) {
417dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID6;
418dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R6LS;
419dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAIDMDF-RA") == 0) {
420dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAIDMDF;
421dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_RMDFRA;
422dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAIDMDF-RS") == 0) {
423dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAIDMDF;
424dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_RMDFRS;
425dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAIDMDF") == 0 ||
426dbb2e755SAlexander Motin 		   strcasecmp(str, "RAIDMDF-LA") == 0) {
427dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAIDMDF;
428dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_RMDFLA;
429dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAIDMDF-LS") == 0) {
430dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAIDMDF;
431dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_RMDFLS;
432dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID10") == 0 ||
433dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID1E") == 0 ||
434dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID1E-A") == 0) {
43589b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID1E;
436dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R1EA;
437dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID1E-O") == 0) {
438dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID1E;
439dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R1EO;
440dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "SINGLE") == 0)
44189b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_SINGLE;
44289b17223SAlexander Motin 	else if (strcasecmp(str, "CONCAT") == 0)
44389b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_CONCAT;
444dbb2e755SAlexander Motin 	else if (strcasecmp(str, "RAID5E-RA") == 0) {
44589b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5E;
446dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5ERA;
447dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5E-RS") == 0) {
448dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5E;
449dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5ERS;
450dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5E") == 0 ||
451dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID5E-LA") == 0) {
452dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5E;
453dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5ELA;
454dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5E-LS") == 0) {
455dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5E;
456dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5ELS;
457dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5EE-RA") == 0) {
45889b17223SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5EE;
459dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5EERA;
460dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5EE-RS") == 0) {
461dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5EE;
462dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5EERS;
463dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5EE") == 0 ||
464dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID5EE-LA") == 0) {
465dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5EE;
466dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5EELA;
467dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5EE-LS") == 0) {
468dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5EE;
469dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5EELS;
470dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5R-RA") == 0) {
471dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5R;
472dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5RRA;
473dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5R-RS") == 0) {
474dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5R;
475dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5RRS;
476dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5R") == 0 ||
477dbb2e755SAlexander Motin 		   strcasecmp(str, "RAID5R-LA") == 0) {
478dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5R;
479dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5RLA;
480dbb2e755SAlexander Motin 	} else if (strcasecmp(str, "RAID5R-LS") == 0) {
481dbb2e755SAlexander Motin 		*level = G_RAID_VOLUME_RL_RAID5R;
482dbb2e755SAlexander Motin 		*qual = G_RAID_VOLUME_RLQ_R5RLS;
483dbb2e755SAlexander Motin 	} else
48489b17223SAlexander Motin 		return (-1);
48589b17223SAlexander Motin 	return (0);
48689b17223SAlexander Motin }
48789b17223SAlexander Motin 
48889b17223SAlexander Motin const char *
g_raid_get_diskname(struct g_raid_disk * disk)48989b17223SAlexander Motin g_raid_get_diskname(struct g_raid_disk *disk)
49089b17223SAlexander Motin {
49189b17223SAlexander Motin 
49289b17223SAlexander Motin 	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
49389b17223SAlexander Motin 		return ("[unknown]");
49489b17223SAlexander Motin 	return (disk->d_consumer->provider->name);
49589b17223SAlexander Motin }
49689b17223SAlexander Motin 
49789b17223SAlexander Motin void
g_raid_get_disk_info(struct g_raid_disk * disk)498609a7474SAlexander Motin g_raid_get_disk_info(struct g_raid_disk *disk)
499609a7474SAlexander Motin {
500609a7474SAlexander Motin 	struct g_consumer *cp = disk->d_consumer;
501609a7474SAlexander Motin 	int error, len;
502609a7474SAlexander Motin 
503609a7474SAlexander Motin 	/* Read kernel dumping information. */
504609a7474SAlexander Motin 	disk->d_kd.offset = 0;
505609a7474SAlexander Motin 	disk->d_kd.length = OFF_MAX;
506609a7474SAlexander Motin 	len = sizeof(disk->d_kd);
507609a7474SAlexander Motin 	error = g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd);
508609a7474SAlexander Motin 	if (error)
509609a7474SAlexander Motin 		disk->d_kd.di.dumper = NULL;
510609a7474SAlexander Motin 	if (disk->d_kd.di.dumper == NULL)
511609a7474SAlexander Motin 		G_RAID_DEBUG1(2, disk->d_softc,
512609a7474SAlexander Motin 		    "Dumping not supported by %s: %d.",
513609a7474SAlexander Motin 		    cp->provider->name, error);
514609a7474SAlexander Motin 
515609a7474SAlexander Motin 	/* Read BIO_DELETE support. */
516609a7474SAlexander Motin 	error = g_getattr("GEOM::candelete", cp, &disk->d_candelete);
517609a7474SAlexander Motin 	if (error)
518609a7474SAlexander Motin 		disk->d_candelete = 0;
519609a7474SAlexander Motin 	if (!disk->d_candelete)
520609a7474SAlexander Motin 		G_RAID_DEBUG1(2, disk->d_softc,
521609a7474SAlexander Motin 		    "BIO_DELETE not supported by %s: %d.",
522609a7474SAlexander Motin 		    cp->provider->name, error);
523609a7474SAlexander Motin }
524609a7474SAlexander Motin 
525609a7474SAlexander Motin void
g_raid_report_disk_state(struct g_raid_disk * disk)52689b17223SAlexander Motin g_raid_report_disk_state(struct g_raid_disk *disk)
52789b17223SAlexander Motin {
52889b17223SAlexander Motin 	struct g_raid_subdisk *sd;
52989b17223SAlexander Motin 	int len, state;
53089b17223SAlexander Motin 	uint32_t s;
53189b17223SAlexander Motin 
53289b17223SAlexander Motin 	if (disk->d_consumer == NULL)
53389b17223SAlexander Motin 		return;
53426c538bcSAlexander Motin 	if (disk->d_state == G_RAID_DISK_S_DISABLED) {
535b99586c2SAlexander Motin 		s = G_STATE_ACTIVE; /* XXX */
53626c538bcSAlexander Motin 	} else if (disk->d_state == G_RAID_DISK_S_FAILED ||
53789b17223SAlexander Motin 	    disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
53889b17223SAlexander Motin 		s = G_STATE_FAILED;
53989b17223SAlexander Motin 	} else {
54089b17223SAlexander Motin 		state = G_RAID_SUBDISK_S_ACTIVE;
54189b17223SAlexander Motin 		TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
54289b17223SAlexander Motin 			if (sd->sd_state < state)
54389b17223SAlexander Motin 				state = sd->sd_state;
54489b17223SAlexander Motin 		}
54589b17223SAlexander Motin 		if (state == G_RAID_SUBDISK_S_FAILED)
54689b17223SAlexander Motin 			s = G_STATE_FAILED;
54789b17223SAlexander Motin 		else if (state == G_RAID_SUBDISK_S_NEW ||
54889b17223SAlexander Motin 		    state == G_RAID_SUBDISK_S_REBUILD)
54989b17223SAlexander Motin 			s = G_STATE_REBUILD;
55089b17223SAlexander Motin 		else if (state == G_RAID_SUBDISK_S_STALE ||
55189b17223SAlexander Motin 		    state == G_RAID_SUBDISK_S_RESYNC)
55289b17223SAlexander Motin 			s = G_STATE_RESYNC;
55389b17223SAlexander Motin 		else
55489b17223SAlexander Motin 			s = G_STATE_ACTIVE;
55589b17223SAlexander Motin 	}
55689b17223SAlexander Motin 	len = sizeof(s);
55789b17223SAlexander Motin 	g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
55889b17223SAlexander Motin 	G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.",
55989b17223SAlexander Motin 	    g_raid_get_diskname(disk), s);
56089b17223SAlexander Motin }
56189b17223SAlexander Motin 
56289b17223SAlexander Motin void
g_raid_change_disk_state(struct g_raid_disk * disk,int state)56389b17223SAlexander Motin g_raid_change_disk_state(struct g_raid_disk *disk, int state)
56489b17223SAlexander Motin {
56589b17223SAlexander Motin 
56689b17223SAlexander Motin 	G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.",
56789b17223SAlexander Motin 	    g_raid_get_diskname(disk),
56889b17223SAlexander Motin 	    g_raid_disk_state2str(disk->d_state),
56989b17223SAlexander Motin 	    g_raid_disk_state2str(state));
57089b17223SAlexander Motin 	disk->d_state = state;
57189b17223SAlexander Motin 	g_raid_report_disk_state(disk);
57289b17223SAlexander Motin }
57389b17223SAlexander Motin 
57489b17223SAlexander Motin void
g_raid_change_subdisk_state(struct g_raid_subdisk * sd,int state)57589b17223SAlexander Motin g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state)
57689b17223SAlexander Motin {
57789b17223SAlexander Motin 
57889b17223SAlexander Motin 	G_RAID_DEBUG1(0, sd->sd_softc,
57989b17223SAlexander Motin 	    "Subdisk %s:%d-%s state changed from %s to %s.",
58089b17223SAlexander Motin 	    sd->sd_volume->v_name, sd->sd_pos,
58189b17223SAlexander Motin 	    sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]",
58289b17223SAlexander Motin 	    g_raid_subdisk_state2str(sd->sd_state),
58389b17223SAlexander Motin 	    g_raid_subdisk_state2str(state));
58489b17223SAlexander Motin 	sd->sd_state = state;
58589b17223SAlexander Motin 	if (sd->sd_disk)
58689b17223SAlexander Motin 		g_raid_report_disk_state(sd->sd_disk);
58789b17223SAlexander Motin }
58889b17223SAlexander Motin 
58989b17223SAlexander Motin void
g_raid_change_volume_state(struct g_raid_volume * vol,int state)59089b17223SAlexander Motin g_raid_change_volume_state(struct g_raid_volume *vol, int state)
59189b17223SAlexander Motin {
59289b17223SAlexander Motin 
59389b17223SAlexander Motin 	G_RAID_DEBUG1(0, vol->v_softc,
59489b17223SAlexander Motin 	    "Volume %s state changed from %s to %s.",
59589b17223SAlexander Motin 	    vol->v_name,
59689b17223SAlexander Motin 	    g_raid_volume_state2str(vol->v_state),
59789b17223SAlexander Motin 	    g_raid_volume_state2str(state));
59889b17223SAlexander Motin 	vol->v_state = state;
59989b17223SAlexander Motin }
60089b17223SAlexander Motin 
60189b17223SAlexander Motin /*
60289b17223SAlexander Motin  * --- Events handling functions ---
60389b17223SAlexander Motin  * Events in geom_raid are used to maintain subdisks and volumes status
60489b17223SAlexander Motin  * from one thread to simplify locking.
60589b17223SAlexander Motin  */
60689b17223SAlexander Motin static void
g_raid_event_free(struct g_raid_event * ep)60789b17223SAlexander Motin g_raid_event_free(struct g_raid_event *ep)
60889b17223SAlexander Motin {
60989b17223SAlexander Motin 
61089b17223SAlexander Motin 	free(ep, M_RAID);
61189b17223SAlexander Motin }
61289b17223SAlexander Motin 
61389b17223SAlexander Motin int
g_raid_event_send(void * arg,int event,int flags)61489b17223SAlexander Motin g_raid_event_send(void *arg, int event, int flags)
61589b17223SAlexander Motin {
61689b17223SAlexander Motin 	struct g_raid_softc *sc;
61789b17223SAlexander Motin 	struct g_raid_event *ep;
61889b17223SAlexander Motin 	int error;
61989b17223SAlexander Motin 
62089b17223SAlexander Motin 	if ((flags & G_RAID_EVENT_VOLUME) != 0) {
62189b17223SAlexander Motin 		sc = ((struct g_raid_volume *)arg)->v_softc;
62289b17223SAlexander Motin 	} else if ((flags & G_RAID_EVENT_DISK) != 0) {
62389b17223SAlexander Motin 		sc = ((struct g_raid_disk *)arg)->d_softc;
62489b17223SAlexander Motin 	} else if ((flags & G_RAID_EVENT_SUBDISK) != 0) {
62589b17223SAlexander Motin 		sc = ((struct g_raid_subdisk *)arg)->sd_softc;
62689b17223SAlexander Motin 	} else {
62789b17223SAlexander Motin 		sc = arg;
62889b17223SAlexander Motin 	}
62989b17223SAlexander Motin 	ep = malloc(sizeof(*ep), M_RAID,
63089b17223SAlexander Motin 	    sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT);
63189b17223SAlexander Motin 	if (ep == NULL)
63289b17223SAlexander Motin 		return (ENOMEM);
63389b17223SAlexander Motin 	ep->e_tgt = arg;
63489b17223SAlexander Motin 	ep->e_event = event;
63589b17223SAlexander Motin 	ep->e_flags = flags;
63689b17223SAlexander Motin 	ep->e_error = 0;
63789b17223SAlexander Motin 	G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc);
63889b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
63989b17223SAlexander Motin 	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
64089b17223SAlexander Motin 	mtx_unlock(&sc->sc_queue_mtx);
64189b17223SAlexander Motin 	wakeup(sc);
64289b17223SAlexander Motin 
64389b17223SAlexander Motin 	if ((flags & G_RAID_EVENT_WAIT) == 0)
64489b17223SAlexander Motin 		return (0);
64589b17223SAlexander Motin 
64689b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
64789b17223SAlexander Motin 	G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep);
64889b17223SAlexander Motin 	sx_xunlock(&sc->sc_lock);
64989b17223SAlexander Motin 	while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) {
65089b17223SAlexander Motin 		mtx_lock(&sc->sc_queue_mtx);
65189b17223SAlexander Motin 		MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event",
65289b17223SAlexander Motin 		    hz * 5);
65389b17223SAlexander Motin 	}
65489b17223SAlexander Motin 	error = ep->e_error;
65589b17223SAlexander Motin 	g_raid_event_free(ep);
65689b17223SAlexander Motin 	sx_xlock(&sc->sc_lock);
65789b17223SAlexander Motin 	return (error);
65889b17223SAlexander Motin }
65989b17223SAlexander Motin 
66089b17223SAlexander Motin static void
g_raid_event_cancel(struct g_raid_softc * sc,void * tgt)66189b17223SAlexander Motin g_raid_event_cancel(struct g_raid_softc *sc, void *tgt)
66289b17223SAlexander Motin {
66389b17223SAlexander Motin 	struct g_raid_event *ep, *tmpep;
66489b17223SAlexander Motin 
66589b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
66689b17223SAlexander Motin 
66789b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
66889b17223SAlexander Motin 	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
66989b17223SAlexander Motin 		if (ep->e_tgt != tgt)
67089b17223SAlexander Motin 			continue;
67189b17223SAlexander Motin 		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
67289b17223SAlexander Motin 		if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0)
67389b17223SAlexander Motin 			g_raid_event_free(ep);
67489b17223SAlexander Motin 		else {
67589b17223SAlexander Motin 			ep->e_error = ECANCELED;
67689b17223SAlexander Motin 			wakeup(ep);
67789b17223SAlexander Motin 		}
67889b17223SAlexander Motin 	}
67989b17223SAlexander Motin 	mtx_unlock(&sc->sc_queue_mtx);
68089b17223SAlexander Motin }
68189b17223SAlexander Motin 
68289b17223SAlexander Motin static int
g_raid_event_check(struct g_raid_softc * sc,void * tgt)68389b17223SAlexander Motin g_raid_event_check(struct g_raid_softc *sc, void *tgt)
68489b17223SAlexander Motin {
68589b17223SAlexander Motin 	struct g_raid_event *ep;
68689b17223SAlexander Motin 	int	res = 0;
68789b17223SAlexander Motin 
68889b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
68989b17223SAlexander Motin 
69089b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
69189b17223SAlexander Motin 	TAILQ_FOREACH(ep, &sc->sc_events, e_next) {
69289b17223SAlexander Motin 		if (ep->e_tgt != tgt)
69389b17223SAlexander Motin 			continue;
69489b17223SAlexander Motin 		res = 1;
69589b17223SAlexander Motin 		break;
69689b17223SAlexander Motin 	}
69789b17223SAlexander Motin 	mtx_unlock(&sc->sc_queue_mtx);
69889b17223SAlexander Motin 	return (res);
69989b17223SAlexander Motin }
70089b17223SAlexander Motin 
70189b17223SAlexander Motin /*
70289b17223SAlexander Motin  * Return the number of disks in given state.
70389b17223SAlexander Motin  * If state is equal to -1, count all connected disks.
70489b17223SAlexander Motin  */
70589b17223SAlexander Motin u_int
g_raid_ndisks(struct g_raid_softc * sc,int state)70689b17223SAlexander Motin g_raid_ndisks(struct g_raid_softc *sc, int state)
70789b17223SAlexander Motin {
70889b17223SAlexander Motin 	struct g_raid_disk *disk;
70989b17223SAlexander Motin 	u_int n;
71089b17223SAlexander Motin 
71189b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_LOCKED);
71289b17223SAlexander Motin 
71389b17223SAlexander Motin 	n = 0;
71489b17223SAlexander Motin 	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
71589b17223SAlexander Motin 		if (disk->d_state == state || state == -1)
71689b17223SAlexander Motin 			n++;
71789b17223SAlexander Motin 	}
71889b17223SAlexander Motin 	return (n);
71989b17223SAlexander Motin }
72089b17223SAlexander Motin 
72189b17223SAlexander Motin /*
72289b17223SAlexander Motin  * Return the number of subdisks in given state.
72389b17223SAlexander Motin  * If state is equal to -1, count all connected disks.
72489b17223SAlexander Motin  */
72589b17223SAlexander Motin u_int
g_raid_nsubdisks(struct g_raid_volume * vol,int state)72689b17223SAlexander Motin g_raid_nsubdisks(struct g_raid_volume *vol, int state)
72789b17223SAlexander Motin {
72889b17223SAlexander Motin 	struct g_raid_subdisk *subdisk;
7292d5d2424SScott Long 	struct g_raid_softc *sc __diagused;
73089b17223SAlexander Motin 	u_int i, n ;
73189b17223SAlexander Motin 
73289b17223SAlexander Motin 	sc = vol->v_softc;
73389b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_LOCKED);
73489b17223SAlexander Motin 
73589b17223SAlexander Motin 	n = 0;
73689b17223SAlexander Motin 	for (i = 0; i < vol->v_disks_count; i++) {
73789b17223SAlexander Motin 		subdisk = &vol->v_subdisks[i];
73889b17223SAlexander Motin 		if ((state == -1 &&
73989b17223SAlexander Motin 		     subdisk->sd_state != G_RAID_SUBDISK_S_NONE) ||
74089b17223SAlexander Motin 		    subdisk->sd_state == state)
74189b17223SAlexander Motin 			n++;
74289b17223SAlexander Motin 	}
74389b17223SAlexander Motin 	return (n);
74489b17223SAlexander Motin }
74589b17223SAlexander Motin 
74689b17223SAlexander Motin /*
74789b17223SAlexander Motin  * Return the first subdisk in given state.
74889b17223SAlexander Motin  * If state is equal to -1, then the first connected disks.
74989b17223SAlexander Motin  */
75089b17223SAlexander Motin struct g_raid_subdisk *
g_raid_get_subdisk(struct g_raid_volume * vol,int state)75189b17223SAlexander Motin g_raid_get_subdisk(struct g_raid_volume *vol, int state)
75289b17223SAlexander Motin {
75389b17223SAlexander Motin 	struct g_raid_subdisk *sd;
7542d5d2424SScott Long 	struct g_raid_softc *sc __diagused;
75589b17223SAlexander Motin 	u_int i;
75689b17223SAlexander Motin 
75789b17223SAlexander Motin 	sc = vol->v_softc;
75889b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_LOCKED);
75989b17223SAlexander Motin 
76089b17223SAlexander Motin 	for (i = 0; i < vol->v_disks_count; i++) {
76189b17223SAlexander Motin 		sd = &vol->v_subdisks[i];
76289b17223SAlexander Motin 		if ((state == -1 &&
76389b17223SAlexander Motin 		     sd->sd_state != G_RAID_SUBDISK_S_NONE) ||
76489b17223SAlexander Motin 		    sd->sd_state == state)
76589b17223SAlexander Motin 			return (sd);
76689b17223SAlexander Motin 	}
76789b17223SAlexander Motin 	return (NULL);
76889b17223SAlexander Motin }
76989b17223SAlexander Motin 
77089b17223SAlexander Motin struct g_consumer *
g_raid_open_consumer(struct g_raid_softc * sc,const char * name)77189b17223SAlexander Motin g_raid_open_consumer(struct g_raid_softc *sc, const char *name)
77289b17223SAlexander Motin {
77389b17223SAlexander Motin 	struct g_consumer *cp;
77489b17223SAlexander Motin 	struct g_provider *pp;
77589b17223SAlexander Motin 
77689b17223SAlexander Motin 	g_topology_assert();
77789b17223SAlexander Motin 
7788510f61aSXin LI 	if (strncmp(name, _PATH_DEV, 5) == 0)
77989b17223SAlexander Motin 		name += 5;
78089b17223SAlexander Motin 	pp = g_provider_by_name(name);
78189b17223SAlexander Motin 	if (pp == NULL)
78289b17223SAlexander Motin 		return (NULL);
78389b17223SAlexander Motin 	cp = g_new_consumer(sc->sc_geom);
78440ea77a0SAlexander Motin 	cp->flags |= G_CF_DIRECT_RECEIVE;
78589b17223SAlexander Motin 	if (g_attach(cp, pp) != 0) {
78689b17223SAlexander Motin 		g_destroy_consumer(cp);
78789b17223SAlexander Motin 		return (NULL);
78889b17223SAlexander Motin 	}
78989b17223SAlexander Motin 	if (g_access(cp, 1, 1, 1) != 0) {
79089b17223SAlexander Motin 		g_detach(cp);
79189b17223SAlexander Motin 		g_destroy_consumer(cp);
79289b17223SAlexander Motin 		return (NULL);
79389b17223SAlexander Motin 	}
79489b17223SAlexander Motin 	return (cp);
79589b17223SAlexander Motin }
79689b17223SAlexander Motin 
79789b17223SAlexander Motin static u_int
g_raid_nrequests(struct g_raid_softc * sc,struct g_consumer * cp)79889b17223SAlexander Motin g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp)
79989b17223SAlexander Motin {
80089b17223SAlexander Motin 	struct bio *bp;
80189b17223SAlexander Motin 	u_int nreqs = 0;
80289b17223SAlexander Motin 
80389b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
80489b17223SAlexander Motin 	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
80589b17223SAlexander Motin 		if (bp->bio_from == cp)
80689b17223SAlexander Motin 			nreqs++;
80789b17223SAlexander Motin 	}
80889b17223SAlexander Motin 	mtx_unlock(&sc->sc_queue_mtx);
80989b17223SAlexander Motin 	return (nreqs);
81089b17223SAlexander Motin }
81189b17223SAlexander Motin 
81289b17223SAlexander Motin u_int
g_raid_nopens(struct g_raid_softc * sc)81389b17223SAlexander Motin g_raid_nopens(struct g_raid_softc *sc)
81489b17223SAlexander Motin {
81589b17223SAlexander Motin 	struct g_raid_volume *vol;
81689b17223SAlexander Motin 	u_int opens;
81789b17223SAlexander Motin 
81889b17223SAlexander Motin 	opens = 0;
81989b17223SAlexander Motin 	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
82089b17223SAlexander Motin 		if (vol->v_provider_open != 0)
82189b17223SAlexander Motin 			opens++;
82289b17223SAlexander Motin 	}
82389b17223SAlexander Motin 	return (opens);
82489b17223SAlexander Motin }
82589b17223SAlexander Motin 
82689b17223SAlexander Motin static int
g_raid_consumer_is_busy(struct g_raid_softc * sc,struct g_consumer * cp)82789b17223SAlexander Motin g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp)
82889b17223SAlexander Motin {
82989b17223SAlexander Motin 
83089b17223SAlexander Motin 	if (cp->index > 0) {
83189b17223SAlexander Motin 		G_RAID_DEBUG1(2, sc,
83289b17223SAlexander Motin 		    "I/O requests for %s exist, can't destroy it now.",
83389b17223SAlexander Motin 		    cp->provider->name);
83489b17223SAlexander Motin 		return (1);
83589b17223SAlexander Motin 	}
83689b17223SAlexander Motin 	if (g_raid_nrequests(sc, cp) > 0) {
83789b17223SAlexander Motin 		G_RAID_DEBUG1(2, sc,
83889b17223SAlexander Motin 		    "I/O requests for %s in queue, can't destroy it now.",
83989b17223SAlexander Motin 		    cp->provider->name);
84089b17223SAlexander Motin 		return (1);
84189b17223SAlexander Motin 	}
84289b17223SAlexander Motin 	return (0);
84389b17223SAlexander Motin }
84489b17223SAlexander Motin 
84589b17223SAlexander Motin static void
g_raid_destroy_consumer(void * arg,int flags __unused)84689b17223SAlexander Motin g_raid_destroy_consumer(void *arg, int flags __unused)
84789b17223SAlexander Motin {
84889b17223SAlexander Motin 	struct g_consumer *cp;
84989b17223SAlexander Motin 
85089b17223SAlexander Motin 	g_topology_assert();
85189b17223SAlexander Motin 
85289b17223SAlexander Motin 	cp = arg;
85389b17223SAlexander Motin 	G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
85489b17223SAlexander Motin 	g_detach(cp);
85589b17223SAlexander Motin 	g_destroy_consumer(cp);
85689b17223SAlexander Motin }
85789b17223SAlexander Motin 
85889b17223SAlexander Motin void
g_raid_kill_consumer(struct g_raid_softc * sc,struct g_consumer * cp)85989b17223SAlexander Motin g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp)
86089b17223SAlexander Motin {
86189b17223SAlexander Motin 	struct g_provider *pp;
86289b17223SAlexander Motin 	int retaste_wait;
86389b17223SAlexander Motin 
86489b17223SAlexander Motin 	g_topology_assert_not();
86589b17223SAlexander Motin 
86689b17223SAlexander Motin 	g_topology_lock();
86789b17223SAlexander Motin 	cp->private = NULL;
86889b17223SAlexander Motin 	if (g_raid_consumer_is_busy(sc, cp))
86989b17223SAlexander Motin 		goto out;
87089b17223SAlexander Motin 	pp = cp->provider;
87189b17223SAlexander Motin 	retaste_wait = 0;
87289b17223SAlexander Motin 	if (cp->acw == 1) {
87389b17223SAlexander Motin 		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
87489b17223SAlexander Motin 			retaste_wait = 1;
87589b17223SAlexander Motin 	}
87689b17223SAlexander Motin 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
87789b17223SAlexander Motin 		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
87889b17223SAlexander Motin 	if (retaste_wait) {
87989b17223SAlexander Motin 		/*
88089b17223SAlexander Motin 		 * After retaste event was send (inside g_access()), we can send
88189b17223SAlexander Motin 		 * event to detach and destroy consumer.
88289b17223SAlexander Motin 		 * A class, which has consumer to the given provider connected
88389b17223SAlexander Motin 		 * will not receive retaste event for the provider.
88489b17223SAlexander Motin 		 * This is the way how I ignore retaste events when I close
88589b17223SAlexander Motin 		 * consumers opened for write: I detach and destroy consumer
88689b17223SAlexander Motin 		 * after retaste event is sent.
88789b17223SAlexander Motin 		 */
88889b17223SAlexander Motin 		g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL);
88989b17223SAlexander Motin 		goto out;
89089b17223SAlexander Motin 	}
89189b17223SAlexander Motin 	G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name);
89289b17223SAlexander Motin 	g_detach(cp);
89389b17223SAlexander Motin 	g_destroy_consumer(cp);
89489b17223SAlexander Motin out:
89589b17223SAlexander Motin 	g_topology_unlock();
89689b17223SAlexander Motin }
89789b17223SAlexander Motin 
89889b17223SAlexander Motin static void
g_raid_orphan(struct g_consumer * cp)89989b17223SAlexander Motin g_raid_orphan(struct g_consumer *cp)
90089b17223SAlexander Motin {
90189b17223SAlexander Motin 	struct g_raid_disk *disk;
90289b17223SAlexander Motin 
90389b17223SAlexander Motin 	g_topology_assert();
90489b17223SAlexander Motin 
90589b17223SAlexander Motin 	disk = cp->private;
90689b17223SAlexander Motin 	if (disk == NULL)
90789b17223SAlexander Motin 		return;
90889b17223SAlexander Motin 	g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED,
90989b17223SAlexander Motin 	    G_RAID_EVENT_DISK);
91089b17223SAlexander Motin }
91189b17223SAlexander Motin 
912a479c51bSAlexander Motin static void
g_raid_clean(struct g_raid_volume * vol,int acw)91389b17223SAlexander Motin g_raid_clean(struct g_raid_volume *vol, int acw)
91489b17223SAlexander Motin {
91589b17223SAlexander Motin 	struct g_raid_softc *sc;
91689b17223SAlexander Motin 	int timeout;
91789b17223SAlexander Motin 
91889b17223SAlexander Motin 	sc = vol->v_softc;
91989b17223SAlexander Motin 	g_topology_assert_not();
92089b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
92189b17223SAlexander Motin 
92289b17223SAlexander Motin //	if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
923a479c51bSAlexander Motin //		return;
92489b17223SAlexander Motin 	if (!vol->v_dirty)
925a479c51bSAlexander Motin 		return;
92689b17223SAlexander Motin 	if (vol->v_writes > 0)
927a479c51bSAlexander Motin 		return;
92889b17223SAlexander Motin 	if (acw > 0 || (acw == -1 &&
92989b17223SAlexander Motin 	    vol->v_provider != NULL && vol->v_provider->acw > 0)) {
93089b17223SAlexander Motin 		timeout = g_raid_clean_time - (time_uptime - vol->v_last_write);
931a479c51bSAlexander Motin 		if (!g_raid_shutdown && timeout > 0)
932a479c51bSAlexander Motin 			return;
93389b17223SAlexander Motin 	}
93489b17223SAlexander Motin 	vol->v_dirty = 0;
93589b17223SAlexander Motin 	G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.",
93689b17223SAlexander Motin 	    vol->v_name);
93789b17223SAlexander Motin 	g_raid_write_metadata(sc, vol, NULL, NULL);
93889b17223SAlexander Motin }
93989b17223SAlexander Motin 
94089b17223SAlexander Motin static void
g_raid_dirty(struct g_raid_volume * vol)94189b17223SAlexander Motin g_raid_dirty(struct g_raid_volume *vol)
94289b17223SAlexander Motin {
94389b17223SAlexander Motin 	struct g_raid_softc *sc;
94489b17223SAlexander Motin 
94589b17223SAlexander Motin 	sc = vol->v_softc;
94689b17223SAlexander Motin 	g_topology_assert_not();
94789b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
94889b17223SAlexander Motin 
94989b17223SAlexander Motin //	if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
95089b17223SAlexander Motin //		return;
95189b17223SAlexander Motin 	vol->v_dirty = 1;
95289b17223SAlexander Motin 	G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.",
95389b17223SAlexander Motin 	    vol->v_name);
95489b17223SAlexander Motin 	g_raid_write_metadata(sc, vol, NULL, NULL);
95589b17223SAlexander Motin }
95689b17223SAlexander Motin 
95789b17223SAlexander Motin void
g_raid_tr_flush_common(struct g_raid_tr_object * tr,struct bio * bp)95889b17223SAlexander Motin g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp)
95989b17223SAlexander Motin {
96089b17223SAlexander Motin 	struct g_raid_volume *vol;
96189b17223SAlexander Motin 	struct g_raid_subdisk *sd;
96289b17223SAlexander Motin 	struct bio_queue_head queue;
96389b17223SAlexander Motin 	struct bio *cbp;
96489b17223SAlexander Motin 	int i;
96589b17223SAlexander Motin 
96689b17223SAlexander Motin 	vol = tr->tro_volume;
96789b17223SAlexander Motin 
96889b17223SAlexander Motin 	/*
96989b17223SAlexander Motin 	 * Allocate all bios before sending any request, so we can return
97089b17223SAlexander Motin 	 * ENOMEM in nice and clean way.
97189b17223SAlexander Motin 	 */
97289b17223SAlexander Motin 	bioq_init(&queue);
97389b17223SAlexander Motin 	for (i = 0; i < vol->v_disks_count; i++) {
97489b17223SAlexander Motin 		sd = &vol->v_subdisks[i];
97589b17223SAlexander Motin 		if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
97689b17223SAlexander Motin 		    sd->sd_state == G_RAID_SUBDISK_S_FAILED)
97789b17223SAlexander Motin 			continue;
97889b17223SAlexander Motin 		cbp = g_clone_bio(bp);
97989b17223SAlexander Motin 		if (cbp == NULL)
98089b17223SAlexander Motin 			goto failure;
98189b17223SAlexander Motin 		cbp->bio_caller1 = sd;
98289b17223SAlexander Motin 		bioq_insert_tail(&queue, cbp);
98389b17223SAlexander Motin 	}
984b43560abSAlexander Motin 	while ((cbp = bioq_takefirst(&queue)) != NULL) {
98589b17223SAlexander Motin 		sd = cbp->bio_caller1;
98689b17223SAlexander Motin 		cbp->bio_caller1 = NULL;
98789b17223SAlexander Motin 		g_raid_subdisk_iostart(sd, cbp);
98889b17223SAlexander Motin 	}
98989b17223SAlexander Motin 	return;
99089b17223SAlexander Motin failure:
991b43560abSAlexander Motin 	while ((cbp = bioq_takefirst(&queue)) != NULL)
99289b17223SAlexander Motin 		g_destroy_bio(cbp);
99389b17223SAlexander Motin 	if (bp->bio_error == 0)
99489b17223SAlexander Motin 		bp->bio_error = ENOMEM;
99589b17223SAlexander Motin 	g_raid_iodone(bp, bp->bio_error);
99689b17223SAlexander Motin }
99789b17223SAlexander Motin 
99889b17223SAlexander Motin static void
g_raid_tr_kerneldump_common_done(struct bio * bp)99989b17223SAlexander Motin g_raid_tr_kerneldump_common_done(struct bio *bp)
100089b17223SAlexander Motin {
100189b17223SAlexander Motin 
100289b17223SAlexander Motin 	bp->bio_flags |= BIO_DONE;
100389b17223SAlexander Motin }
100489b17223SAlexander Motin 
100589b17223SAlexander Motin int
g_raid_tr_kerneldump_common(struct g_raid_tr_object * tr,void * virtual,vm_offset_t physical,off_t offset,size_t length)100689b17223SAlexander Motin g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
100789b17223SAlexander Motin     void *virtual, vm_offset_t physical, off_t offset, size_t length)
100889b17223SAlexander Motin {
100989b17223SAlexander Motin 	struct g_raid_softc *sc;
101089b17223SAlexander Motin 	struct g_raid_volume *vol;
101189b17223SAlexander Motin 	struct bio bp;
101289b17223SAlexander Motin 
101389b17223SAlexander Motin 	vol = tr->tro_volume;
101489b17223SAlexander Motin 	sc = vol->v_softc;
101589b17223SAlexander Motin 
1016c55f5707SWarner Losh 	g_reset_bio(&bp);
101789b17223SAlexander Motin 	bp.bio_cmd = BIO_WRITE;
101889b17223SAlexander Motin 	bp.bio_done = g_raid_tr_kerneldump_common_done;
101989b17223SAlexander Motin 	bp.bio_attribute = NULL;
102089b17223SAlexander Motin 	bp.bio_offset = offset;
102189b17223SAlexander Motin 	bp.bio_length = length;
102289b17223SAlexander Motin 	bp.bio_data = virtual;
102389b17223SAlexander Motin 	bp.bio_to = vol->v_provider;
102489b17223SAlexander Motin 
102589b17223SAlexander Motin 	g_raid_start(&bp);
102689b17223SAlexander Motin 	while (!(bp.bio_flags & BIO_DONE)) {
102789b17223SAlexander Motin 		G_RAID_DEBUG1(4, sc, "Poll...");
102889b17223SAlexander Motin 		g_raid_poll(sc);
102989b17223SAlexander Motin 		DELAY(10);
103089b17223SAlexander Motin 	}
103189b17223SAlexander Motin 
103289b17223SAlexander Motin 	return (bp.bio_error != 0 ? EIO : 0);
103389b17223SAlexander Motin }
103489b17223SAlexander Motin 
103589b17223SAlexander Motin static int
g_raid_dump(void * arg,void * virtual,off_t offset,size_t length)1036489ba222SMitchell Horne g_raid_dump(void *arg, void *virtual, off_t offset, size_t length)
103789b17223SAlexander Motin {
103889b17223SAlexander Motin 	struct g_raid_volume *vol;
103989b17223SAlexander Motin 	int error;
104089b17223SAlexander Motin 
104189b17223SAlexander Motin 	vol = (struct g_raid_volume *)arg;
104289b17223SAlexander Motin 	G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.",
104389b17223SAlexander Motin 	    (long long unsigned)offset, (long long unsigned)length);
104489b17223SAlexander Motin 
1045489ba222SMitchell Horne 	error = G_RAID_TR_KERNELDUMP(vol->v_tr, virtual, offset, length);
104689b17223SAlexander Motin 	return (error);
104789b17223SAlexander Motin }
104889b17223SAlexander Motin 
104989b17223SAlexander Motin static void
g_raid_kerneldump(struct g_raid_softc * sc,struct bio * bp)105089b17223SAlexander Motin g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp)
105189b17223SAlexander Motin {
105289b17223SAlexander Motin 	struct g_kerneldump *gkd;
105389b17223SAlexander Motin 	struct g_provider *pp;
105489b17223SAlexander Motin 	struct g_raid_volume *vol;
105589b17223SAlexander Motin 
105689b17223SAlexander Motin 	gkd = (struct g_kerneldump*)bp->bio_data;
105789b17223SAlexander Motin 	pp = bp->bio_to;
105889b17223SAlexander Motin 	vol = pp->private;
105989b17223SAlexander Motin 	g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)",
106089b17223SAlexander Motin 		pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
106189b17223SAlexander Motin 	gkd->di.dumper = g_raid_dump;
106289b17223SAlexander Motin 	gkd->di.priv = vol;
106389b17223SAlexander Motin 	gkd->di.blocksize = vol->v_sectorsize;
106489b17223SAlexander Motin 	gkd->di.maxiosize = DFLTPHYS;
106589b17223SAlexander Motin 	gkd->di.mediaoffset = gkd->offset;
106689b17223SAlexander Motin 	if ((gkd->offset + gkd->length) > vol->v_mediasize)
106789b17223SAlexander Motin 		gkd->length = vol->v_mediasize - gkd->offset;
106889b17223SAlexander Motin 	gkd->di.mediasize = gkd->length;
106989b17223SAlexander Motin 	g_io_deliver(bp, 0);
107089b17223SAlexander Motin }
107189b17223SAlexander Motin 
107289b17223SAlexander Motin static void
g_raid_candelete(struct g_raid_softc * sc,struct bio * bp)1073609a7474SAlexander Motin g_raid_candelete(struct g_raid_softc *sc, struct bio *bp)
1074609a7474SAlexander Motin {
1075609a7474SAlexander Motin 	struct g_provider *pp;
1076609a7474SAlexander Motin 	struct g_raid_volume *vol;
1077609a7474SAlexander Motin 	struct g_raid_subdisk *sd;
1078438622afSMark Johnston 	int i, val;
1079609a7474SAlexander Motin 
1080609a7474SAlexander Motin 	pp = bp->bio_to;
1081609a7474SAlexander Motin 	vol = pp->private;
1082609a7474SAlexander Motin 	for (i = 0; i < vol->v_disks_count; i++) {
1083609a7474SAlexander Motin 		sd = &vol->v_subdisks[i];
1084609a7474SAlexander Motin 		if (sd->sd_state == G_RAID_SUBDISK_S_NONE)
1085609a7474SAlexander Motin 			continue;
1086438622afSMark Johnston 		if (sd->sd_disk->d_candelete)
1087609a7474SAlexander Motin 			break;
1088609a7474SAlexander Motin 	}
1089438622afSMark Johnston 	val = i < vol->v_disks_count;
1090438622afSMark Johnston 	g_handleattr(bp, "GEOM::candelete", &val, sizeof(val));
1091609a7474SAlexander Motin }
1092609a7474SAlexander Motin 
1093609a7474SAlexander Motin static void
g_raid_start(struct bio * bp)109489b17223SAlexander Motin g_raid_start(struct bio *bp)
109589b17223SAlexander Motin {
109689b17223SAlexander Motin 	struct g_raid_softc *sc;
109789b17223SAlexander Motin 
109889b17223SAlexander Motin 	sc = bp->bio_to->geom->softc;
109989b17223SAlexander Motin 	/*
110089b17223SAlexander Motin 	 * If sc == NULL or there are no valid disks, provider's error
110189b17223SAlexander Motin 	 * should be set and g_raid_start() should not be called at all.
110289b17223SAlexander Motin 	 */
110389b17223SAlexander Motin //	KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING,
110489b17223SAlexander Motin //	    ("Provider's error should be set (error=%d)(mirror=%s).",
110589b17223SAlexander Motin //	    bp->bio_to->error, bp->bio_to->name));
110689b17223SAlexander Motin 	G_RAID_LOGREQ(3, bp, "Request received.");
110789b17223SAlexander Motin 
110889b17223SAlexander Motin 	switch (bp->bio_cmd) {
110989b17223SAlexander Motin 	case BIO_READ:
111089b17223SAlexander Motin 	case BIO_WRITE:
111189b17223SAlexander Motin 	case BIO_DELETE:
111289b17223SAlexander Motin 	case BIO_FLUSH:
11138b522bdaSWarner Losh 	case BIO_SPEEDUP:
111489b17223SAlexander Motin 		break;
111589b17223SAlexander Motin 	case BIO_GETATTR:
1116609a7474SAlexander Motin 		if (!strcmp(bp->bio_attribute, "GEOM::candelete"))
1117609a7474SAlexander Motin 			g_raid_candelete(sc, bp);
1118609a7474SAlexander Motin 		else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
111989b17223SAlexander Motin 			g_raid_kerneldump(sc, bp);
112089b17223SAlexander Motin 		else
112189b17223SAlexander Motin 			g_io_deliver(bp, EOPNOTSUPP);
112289b17223SAlexander Motin 		return;
112389b17223SAlexander Motin 	default:
112489b17223SAlexander Motin 		g_io_deliver(bp, EOPNOTSUPP);
112589b17223SAlexander Motin 		return;
112689b17223SAlexander Motin 	}
112789b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
11283ab0187aSAlexander Motin 	bioq_insert_tail(&sc->sc_queue, bp);
112989b17223SAlexander Motin 	mtx_unlock(&sc->sc_queue_mtx);
113089b17223SAlexander Motin 	if (!dumping) {
113189b17223SAlexander Motin 		G_RAID_DEBUG1(4, sc, "Waking up %p.", sc);
113289b17223SAlexander Motin 		wakeup(sc);
113389b17223SAlexander Motin 	}
113489b17223SAlexander Motin }
113589b17223SAlexander Motin 
113689b17223SAlexander Motin static int
g_raid_bio_overlaps(const struct bio * bp,off_t lstart,off_t len)113789b17223SAlexander Motin g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len)
113889b17223SAlexander Motin {
113989b17223SAlexander Motin 	/*
114089b17223SAlexander Motin 	 * 5 cases:
114189b17223SAlexander Motin 	 * (1) bp entirely below NO
114289b17223SAlexander Motin 	 * (2) bp entirely above NO
114389b17223SAlexander Motin 	 * (3) bp start below, but end in range YES
114489b17223SAlexander Motin 	 * (4) bp entirely within YES
114589b17223SAlexander Motin 	 * (5) bp starts within, ends above YES
114689b17223SAlexander Motin 	 *
114789b17223SAlexander Motin 	 * lock range 10-19 (offset 10 length 10)
114889b17223SAlexander Motin 	 * (1) 1-5: first if kicks it out
114989b17223SAlexander Motin 	 * (2) 30-35: second if kicks it out
115089b17223SAlexander Motin 	 * (3) 5-15: passes both ifs
115189b17223SAlexander Motin 	 * (4) 12-14: passes both ifs
115289b17223SAlexander Motin 	 * (5) 19-20: passes both
115389b17223SAlexander Motin 	 */
115489b17223SAlexander Motin 	off_t lend = lstart + len - 1;
115589b17223SAlexander Motin 	off_t bstart = bp->bio_offset;
115689b17223SAlexander Motin 	off_t bend = bp->bio_offset + bp->bio_length - 1;
115789b17223SAlexander Motin 
115889b17223SAlexander Motin 	if (bend < lstart)
115989b17223SAlexander Motin 		return (0);
116089b17223SAlexander Motin 	if (lend < bstart)
116189b17223SAlexander Motin 		return (0);
116289b17223SAlexander Motin 	return (1);
116389b17223SAlexander Motin }
116489b17223SAlexander Motin 
116589b17223SAlexander Motin static int
g_raid_is_in_locked_range(struct g_raid_volume * vol,const struct bio * bp)116689b17223SAlexander Motin g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp)
116789b17223SAlexander Motin {
116889b17223SAlexander Motin 	struct g_raid_lock *lp;
116989b17223SAlexander Motin 
117089b17223SAlexander Motin 	sx_assert(&vol->v_softc->sc_lock, SX_LOCKED);
117189b17223SAlexander Motin 
117289b17223SAlexander Motin 	LIST_FOREACH(lp, &vol->v_locks, l_next) {
117389b17223SAlexander Motin 		if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length))
117489b17223SAlexander Motin 			return (1);
117589b17223SAlexander Motin 	}
117689b17223SAlexander Motin 	return (0);
117789b17223SAlexander Motin }
117889b17223SAlexander Motin 
117989b17223SAlexander Motin static void
g_raid_start_request(struct bio * bp)118089b17223SAlexander Motin g_raid_start_request(struct bio *bp)
118189b17223SAlexander Motin {
11822d5d2424SScott Long 	struct g_raid_softc *sc __diagused;
118389b17223SAlexander Motin 	struct g_raid_volume *vol;
118489b17223SAlexander Motin 
118589b17223SAlexander Motin 	sc = bp->bio_to->geom->softc;
118689b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_LOCKED);
118789b17223SAlexander Motin 	vol = bp->bio_to->private;
118889b17223SAlexander Motin 
118989b17223SAlexander Motin 	/*
119089b17223SAlexander Motin 	 * Check to see if this item is in a locked range.  If so,
119189b17223SAlexander Motin 	 * queue it to our locked queue and return.  We'll requeue
119289b17223SAlexander Motin 	 * it when the range is unlocked.  Internal I/O for the
119389b17223SAlexander Motin 	 * rebuild/rescan/recovery process is excluded from this
119489b17223SAlexander Motin 	 * check so we can actually do the recovery.
119589b17223SAlexander Motin 	 */
119689b17223SAlexander Motin 	if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) &&
119789b17223SAlexander Motin 	    g_raid_is_in_locked_range(vol, bp)) {
119889b17223SAlexander Motin 		G_RAID_LOGREQ(3, bp, "Defer request.");
119989b17223SAlexander Motin 		bioq_insert_tail(&vol->v_locked, bp);
120089b17223SAlexander Motin 		return;
120189b17223SAlexander Motin 	}
120289b17223SAlexander Motin 
120389b17223SAlexander Motin 	/*
120489b17223SAlexander Motin 	 * If we're actually going to do the write/delete, then
120589b17223SAlexander Motin 	 * update the idle stats for the volume.
120689b17223SAlexander Motin 	 */
120789b17223SAlexander Motin 	if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
120889b17223SAlexander Motin 		if (!vol->v_dirty)
120989b17223SAlexander Motin 			g_raid_dirty(vol);
121089b17223SAlexander Motin 		vol->v_writes++;
121189b17223SAlexander Motin 	}
121289b17223SAlexander Motin 
121389b17223SAlexander Motin 	/*
121489b17223SAlexander Motin 	 * Put request onto inflight queue, so we can check if new
121589b17223SAlexander Motin 	 * synchronization requests don't collide with it.  Then tell
121689b17223SAlexander Motin 	 * the transformation layer to start the I/O.
121789b17223SAlexander Motin 	 */
121889b17223SAlexander Motin 	bioq_insert_tail(&vol->v_inflight, bp);
121989b17223SAlexander Motin 	G_RAID_LOGREQ(4, bp, "Request started");
122089b17223SAlexander Motin 	G_RAID_TR_IOSTART(vol->v_tr, bp);
122189b17223SAlexander Motin }
122289b17223SAlexander Motin 
122389b17223SAlexander Motin static void
g_raid_finish_with_locked_ranges(struct g_raid_volume * vol,struct bio * bp)122489b17223SAlexander Motin g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp)
122589b17223SAlexander Motin {
122689b17223SAlexander Motin 	off_t off, len;
122789b17223SAlexander Motin 	struct bio *nbp;
122889b17223SAlexander Motin 	struct g_raid_lock *lp;
122989b17223SAlexander Motin 
123089b17223SAlexander Motin 	vol->v_pending_lock = 0;
123189b17223SAlexander Motin 	LIST_FOREACH(lp, &vol->v_locks, l_next) {
123289b17223SAlexander Motin 		if (lp->l_pending) {
123389b17223SAlexander Motin 			off = lp->l_offset;
123489b17223SAlexander Motin 			len = lp->l_length;
123589b17223SAlexander Motin 			lp->l_pending = 0;
123689b17223SAlexander Motin 			TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) {
123789b17223SAlexander Motin 				if (g_raid_bio_overlaps(nbp, off, len))
123889b17223SAlexander Motin 					lp->l_pending++;
123989b17223SAlexander Motin 			}
124089b17223SAlexander Motin 			if (lp->l_pending) {
124189b17223SAlexander Motin 				vol->v_pending_lock = 1;
124289b17223SAlexander Motin 				G_RAID_DEBUG1(4, vol->v_softc,
124389b17223SAlexander Motin 				    "Deferred lock(%jd, %jd) has %d pending",
124489b17223SAlexander Motin 				    (intmax_t)off, (intmax_t)(off + len),
124589b17223SAlexander Motin 				    lp->l_pending);
124689b17223SAlexander Motin 				continue;
124789b17223SAlexander Motin 			}
124889b17223SAlexander Motin 			G_RAID_DEBUG1(4, vol->v_softc,
124989b17223SAlexander Motin 			    "Deferred lock of %jd to %jd completed",
125089b17223SAlexander Motin 			    (intmax_t)off, (intmax_t)(off + len));
125189b17223SAlexander Motin 			G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
125289b17223SAlexander Motin 		}
125389b17223SAlexander Motin 	}
125489b17223SAlexander Motin }
125589b17223SAlexander Motin 
125689b17223SAlexander Motin void
g_raid_iodone(struct bio * bp,int error)125789b17223SAlexander Motin g_raid_iodone(struct bio *bp, int error)
125889b17223SAlexander Motin {
12592d5d2424SScott Long 	struct g_raid_softc *sc __diagused;
126089b17223SAlexander Motin 	struct g_raid_volume *vol;
126189b17223SAlexander Motin 
126289b17223SAlexander Motin 	sc = bp->bio_to->geom->softc;
126389b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_LOCKED);
126489b17223SAlexander Motin 	vol = bp->bio_to->private;
126589b17223SAlexander Motin 	G_RAID_LOGREQ(3, bp, "Request done: %d.", error);
126689b17223SAlexander Motin 
126789b17223SAlexander Motin 	/* Update stats if we done write/delete. */
126889b17223SAlexander Motin 	if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
126989b17223SAlexander Motin 		vol->v_writes--;
127089b17223SAlexander Motin 		vol->v_last_write = time_uptime;
127189b17223SAlexander Motin 	}
127289b17223SAlexander Motin 
127389b17223SAlexander Motin 	bioq_remove(&vol->v_inflight, bp);
127489b17223SAlexander Motin 	if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp))
127589b17223SAlexander Motin 		g_raid_finish_with_locked_ranges(vol, bp);
127689b17223SAlexander Motin 	getmicrouptime(&vol->v_last_done);
127789b17223SAlexander Motin 	g_io_deliver(bp, error);
127889b17223SAlexander Motin }
127989b17223SAlexander Motin 
128089b17223SAlexander Motin int
g_raid_lock_range(struct g_raid_volume * vol,off_t off,off_t len,struct bio * ignore,void * argp)128189b17223SAlexander Motin g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
128289b17223SAlexander Motin     struct bio *ignore, void *argp)
128389b17223SAlexander Motin {
128489b17223SAlexander Motin 	struct g_raid_softc *sc;
128589b17223SAlexander Motin 	struct g_raid_lock *lp;
128689b17223SAlexander Motin 	struct bio *bp;
128789b17223SAlexander Motin 
128889b17223SAlexander Motin 	sc = vol->v_softc;
128989b17223SAlexander Motin 	lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO);
129089b17223SAlexander Motin 	LIST_INSERT_HEAD(&vol->v_locks, lp, l_next);
129189b17223SAlexander Motin 	lp->l_offset = off;
129289b17223SAlexander Motin 	lp->l_length = len;
129389b17223SAlexander Motin 	lp->l_callback_arg = argp;
129489b17223SAlexander Motin 
129589b17223SAlexander Motin 	lp->l_pending = 0;
129689b17223SAlexander Motin 	TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) {
129789b17223SAlexander Motin 		if (bp != ignore && g_raid_bio_overlaps(bp, off, len))
129889b17223SAlexander Motin 			lp->l_pending++;
129989b17223SAlexander Motin 	}
130089b17223SAlexander Motin 
130189b17223SAlexander Motin 	/*
130289b17223SAlexander Motin 	 * If there are any writes that are pending, we return EBUSY.  All
130389b17223SAlexander Motin 	 * callers will have to wait until all pending writes clear.
130489b17223SAlexander Motin 	 */
130589b17223SAlexander Motin 	if (lp->l_pending > 0) {
130689b17223SAlexander Motin 		vol->v_pending_lock = 1;
130789b17223SAlexander Motin 		G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend",
130889b17223SAlexander Motin 		    (intmax_t)off, (intmax_t)(off+len), lp->l_pending);
130989b17223SAlexander Motin 		return (EBUSY);
131089b17223SAlexander Motin 	}
131189b17223SAlexander Motin 	G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd",
131289b17223SAlexander Motin 	    (intmax_t)off, (intmax_t)(off+len));
131389b17223SAlexander Motin 	G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
131489b17223SAlexander Motin 	return (0);
131589b17223SAlexander Motin }
131689b17223SAlexander Motin 
131789b17223SAlexander Motin int
g_raid_unlock_range(struct g_raid_volume * vol,off_t off,off_t len)131889b17223SAlexander Motin g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len)
131989b17223SAlexander Motin {
132089b17223SAlexander Motin 	struct g_raid_lock *lp;
132189b17223SAlexander Motin 	struct g_raid_softc *sc;
132289b17223SAlexander Motin 	struct bio *bp;
132389b17223SAlexander Motin 
132489b17223SAlexander Motin 	sc = vol->v_softc;
132589b17223SAlexander Motin 	LIST_FOREACH(lp, &vol->v_locks, l_next) {
132689b17223SAlexander Motin 		if (lp->l_offset == off && lp->l_length == len) {
132789b17223SAlexander Motin 			LIST_REMOVE(lp, l_next);
132889b17223SAlexander Motin 			/* XXX
132989b17223SAlexander Motin 			 * Right now we just put them all back on the queue
133089b17223SAlexander Motin 			 * and hope for the best.  We hope this because any
133189b17223SAlexander Motin 			 * locked ranges will go right back on this list
133289b17223SAlexander Motin 			 * when the worker thread runs.
133389b17223SAlexander Motin 			 * XXX
133489b17223SAlexander Motin 			 */
133589b17223SAlexander Motin 			G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd",
133689b17223SAlexander Motin 			    (intmax_t)lp->l_offset,
133789b17223SAlexander Motin 			    (intmax_t)(lp->l_offset+lp->l_length));
133889b17223SAlexander Motin 			mtx_lock(&sc->sc_queue_mtx);
133989b17223SAlexander Motin 			while ((bp = bioq_takefirst(&vol->v_locked)) != NULL)
13403ab0187aSAlexander Motin 				bioq_insert_tail(&sc->sc_queue, bp);
134189b17223SAlexander Motin 			mtx_unlock(&sc->sc_queue_mtx);
134289b17223SAlexander Motin 			free(lp, M_RAID);
134389b17223SAlexander Motin 			return (0);
134489b17223SAlexander Motin 		}
134589b17223SAlexander Motin 	}
134689b17223SAlexander Motin 	return (EINVAL);
134789b17223SAlexander Motin }
134889b17223SAlexander Motin 
134989b17223SAlexander Motin void
g_raid_subdisk_iostart(struct g_raid_subdisk * sd,struct bio * bp)135089b17223SAlexander Motin g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp)
135189b17223SAlexander Motin {
135289b17223SAlexander Motin 	struct g_consumer *cp;
135389b17223SAlexander Motin 	struct g_raid_disk *disk, *tdisk;
135489b17223SAlexander Motin 
135589b17223SAlexander Motin 	bp->bio_caller1 = sd;
135689b17223SAlexander Motin 
135789b17223SAlexander Motin 	/*
135889b17223SAlexander Motin 	 * Make sure that the disk is present. Generally it is a task of
135989b17223SAlexander Motin 	 * transformation layers to not send requests to absent disks, but
136089b17223SAlexander Motin 	 * it is better to be safe and report situation then sorry.
136189b17223SAlexander Motin 	 */
136289b17223SAlexander Motin 	if (sd->sd_disk == NULL) {
136389b17223SAlexander Motin 		G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!");
136489b17223SAlexander Motin nodisk:
136589b17223SAlexander Motin 		bp->bio_from = NULL;
136689b17223SAlexander Motin 		bp->bio_to = NULL;
136789b17223SAlexander Motin 		bp->bio_error = ENXIO;
136889b17223SAlexander Motin 		g_raid_disk_done(bp);
136989b17223SAlexander Motin 		return;
137089b17223SAlexander Motin 	}
137189b17223SAlexander Motin 	disk = sd->sd_disk;
137289b17223SAlexander Motin 	if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
137389b17223SAlexander Motin 	    disk->d_state != G_RAID_DISK_S_FAILED) {
137489b17223SAlexander Motin 		G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a "
137589b17223SAlexander Motin 		    "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
137689b17223SAlexander Motin 		goto nodisk;
137789b17223SAlexander Motin 	}
137889b17223SAlexander Motin 
137989b17223SAlexander Motin 	cp = disk->d_consumer;
138089b17223SAlexander Motin 	bp->bio_from = cp;
138189b17223SAlexander Motin 	bp->bio_to = cp->provider;
138289b17223SAlexander Motin 	cp->index++;
138389b17223SAlexander Motin 
138489b17223SAlexander Motin 	/* Update average disks load. */
138589b17223SAlexander Motin 	TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) {
138689b17223SAlexander Motin 		if (tdisk->d_consumer == NULL)
138789b17223SAlexander Motin 			tdisk->d_load = 0;
138889b17223SAlexander Motin 		else
138989b17223SAlexander Motin 			tdisk->d_load = (tdisk->d_consumer->index *
139089b17223SAlexander Motin 			    G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8;
139189b17223SAlexander Motin 	}
139289b17223SAlexander Motin 
139389b17223SAlexander Motin 	disk->d_last_offset = bp->bio_offset + bp->bio_length;
139489b17223SAlexander Motin 	if (dumping) {
139589b17223SAlexander Motin 		G_RAID_LOGREQ(3, bp, "Sending dumping request.");
139689b17223SAlexander Motin 		if (bp->bio_cmd == BIO_WRITE) {
139789b17223SAlexander Motin 			bp->bio_error = g_raid_subdisk_kerneldump(sd,
1398489ba222SMitchell Horne 			    bp->bio_data, bp->bio_offset, bp->bio_length);
139989b17223SAlexander Motin 		} else
140089b17223SAlexander Motin 			bp->bio_error = EOPNOTSUPP;
140189b17223SAlexander Motin 		g_raid_disk_done(bp);
140289b17223SAlexander Motin 	} else {
140389b17223SAlexander Motin 		bp->bio_done = g_raid_disk_done;
140489b17223SAlexander Motin 		bp->bio_offset += sd->sd_offset;
140589b17223SAlexander Motin 		G_RAID_LOGREQ(3, bp, "Sending request.");
140689b17223SAlexander Motin 		g_io_request(bp, cp);
140789b17223SAlexander Motin 	}
140889b17223SAlexander Motin }
140989b17223SAlexander Motin 
141089b17223SAlexander Motin int
g_raid_subdisk_kerneldump(struct g_raid_subdisk * sd,void * virtual,off_t offset,size_t length)1411489ba222SMitchell Horne g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd, void *virtual,
1412489ba222SMitchell Horne     off_t offset, size_t length)
141389b17223SAlexander Motin {
141489b17223SAlexander Motin 
141589b17223SAlexander Motin 	if (sd->sd_disk == NULL)
141689b17223SAlexander Motin 		return (ENXIO);
141789b17223SAlexander Motin 	if (sd->sd_disk->d_kd.di.dumper == NULL)
141889b17223SAlexander Motin 		return (EOPNOTSUPP);
1419db71383bSMitchell Horne 	return (dump_write(&sd->sd_disk->d_kd.di, virtual,
1420db71383bSMitchell Horne 	    sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset, length));
142189b17223SAlexander Motin }
142289b17223SAlexander Motin 
142389b17223SAlexander Motin static void
g_raid_disk_done(struct bio * bp)142489b17223SAlexander Motin g_raid_disk_done(struct bio *bp)
142589b17223SAlexander Motin {
142689b17223SAlexander Motin 	struct g_raid_softc *sc;
142789b17223SAlexander Motin 	struct g_raid_subdisk *sd;
142889b17223SAlexander Motin 
142989b17223SAlexander Motin 	sd = bp->bio_caller1;
143089b17223SAlexander Motin 	sc = sd->sd_softc;
143189b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
14323ab0187aSAlexander Motin 	bioq_insert_tail(&sc->sc_queue, bp);
143389b17223SAlexander Motin 	mtx_unlock(&sc->sc_queue_mtx);
143489b17223SAlexander Motin 	if (!dumping)
143589b17223SAlexander Motin 		wakeup(sc);
143689b17223SAlexander Motin }
143789b17223SAlexander Motin 
143889b17223SAlexander Motin static void
g_raid_disk_done_request(struct bio * bp)143989b17223SAlexander Motin g_raid_disk_done_request(struct bio *bp)
144089b17223SAlexander Motin {
144189b17223SAlexander Motin 	struct g_raid_softc *sc;
144289b17223SAlexander Motin 	struct g_raid_disk *disk;
144389b17223SAlexander Motin 	struct g_raid_subdisk *sd;
144489b17223SAlexander Motin 	struct g_raid_volume *vol;
144589b17223SAlexander Motin 
144689b17223SAlexander Motin 	g_topology_assert_not();
144789b17223SAlexander Motin 
144889b17223SAlexander Motin 	G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error);
144989b17223SAlexander Motin 	sd = bp->bio_caller1;
145089b17223SAlexander Motin 	sc = sd->sd_softc;
145189b17223SAlexander Motin 	vol = sd->sd_volume;
145289b17223SAlexander Motin 	if (bp->bio_from != NULL) {
145389b17223SAlexander Motin 		bp->bio_from->index--;
145489b17223SAlexander Motin 		disk = bp->bio_from->private;
145589b17223SAlexander Motin 		if (disk == NULL)
145689b17223SAlexander Motin 			g_raid_kill_consumer(sc, bp->bio_from);
145789b17223SAlexander Motin 	}
145889b17223SAlexander Motin 	bp->bio_offset -= sd->sd_offset;
145989b17223SAlexander Motin 
146089b17223SAlexander Motin 	G_RAID_TR_IODONE(vol->v_tr, sd, bp);
146189b17223SAlexander Motin }
146289b17223SAlexander Motin 
146389b17223SAlexander Motin static void
g_raid_handle_event(struct g_raid_softc * sc,struct g_raid_event * ep)146489b17223SAlexander Motin g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep)
146589b17223SAlexander Motin {
146689b17223SAlexander Motin 
146789b17223SAlexander Motin 	if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0)
146889b17223SAlexander Motin 		ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event);
146989b17223SAlexander Motin 	else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0)
147089b17223SAlexander Motin 		ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event);
147189b17223SAlexander Motin 	else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0)
147289b17223SAlexander Motin 		ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event);
147389b17223SAlexander Motin 	else
147489b17223SAlexander Motin 		ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event);
147589b17223SAlexander Motin 	if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) {
147689b17223SAlexander Motin 		KASSERT(ep->e_error == 0,
147789b17223SAlexander Motin 		    ("Error cannot be handled."));
147889b17223SAlexander Motin 		g_raid_event_free(ep);
147989b17223SAlexander Motin 	} else {
148089b17223SAlexander Motin 		ep->e_flags |= G_RAID_EVENT_DONE;
148189b17223SAlexander Motin 		G_RAID_DEBUG1(4, sc, "Waking up %p.", ep);
148289b17223SAlexander Motin 		mtx_lock(&sc->sc_queue_mtx);
148389b17223SAlexander Motin 		wakeup(ep);
148489b17223SAlexander Motin 		mtx_unlock(&sc->sc_queue_mtx);
148589b17223SAlexander Motin 	}
148689b17223SAlexander Motin }
148789b17223SAlexander Motin 
148889b17223SAlexander Motin /*
148989b17223SAlexander Motin  * Worker thread.
149089b17223SAlexander Motin  */
149189b17223SAlexander Motin static void
g_raid_worker(void * arg)149289b17223SAlexander Motin g_raid_worker(void *arg)
149389b17223SAlexander Motin {
149489b17223SAlexander Motin 	struct g_raid_softc *sc;
149589b17223SAlexander Motin 	struct g_raid_event *ep;
149689b17223SAlexander Motin 	struct g_raid_volume *vol;
149789b17223SAlexander Motin 	struct bio *bp;
149889b17223SAlexander Motin 	struct timeval now, t;
149989b17223SAlexander Motin 	int timeout, rv;
150089b17223SAlexander Motin 
150189b17223SAlexander Motin 	sc = arg;
150289b17223SAlexander Motin 	thread_lock(curthread);
150389b17223SAlexander Motin 	sched_prio(curthread, PRIBIO);
150489b17223SAlexander Motin 	thread_unlock(curthread);
150589b17223SAlexander Motin 
150689b17223SAlexander Motin 	sx_xlock(&sc->sc_lock);
150789b17223SAlexander Motin 	for (;;) {
150889b17223SAlexander Motin 		mtx_lock(&sc->sc_queue_mtx);
150989b17223SAlexander Motin 		/*
151089b17223SAlexander Motin 		 * First take a look at events.
151189b17223SAlexander Motin 		 * This is important to handle events before any I/O requests.
151289b17223SAlexander Motin 		 */
151389b17223SAlexander Motin 		bp = NULL;
151489b17223SAlexander Motin 		vol = NULL;
151589b17223SAlexander Motin 		rv = 0;
151689b17223SAlexander Motin 		ep = TAILQ_FIRST(&sc->sc_events);
151789b17223SAlexander Motin 		if (ep != NULL)
151889b17223SAlexander Motin 			TAILQ_REMOVE(&sc->sc_events, ep, e_next);
151989b17223SAlexander Motin 		else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL)
152089b17223SAlexander Motin 			;
152189b17223SAlexander Motin 		else {
152289b17223SAlexander Motin 			getmicrouptime(&now);
152389b17223SAlexander Motin 			t = now;
152489b17223SAlexander Motin 			TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
152589b17223SAlexander Motin 				if (bioq_first(&vol->v_inflight) == NULL &&
152689b17223SAlexander Motin 				    vol->v_tr &&
152789b17223SAlexander Motin 				    timevalcmp(&vol->v_last_done, &t, < ))
152889b17223SAlexander Motin 					t = vol->v_last_done;
152989b17223SAlexander Motin 			}
153089b17223SAlexander Motin 			timevalsub(&t, &now);
153189b17223SAlexander Motin 			timeout = g_raid_idle_threshold +
153289b17223SAlexander Motin 			    t.tv_sec * 1000000 + t.tv_usec;
153389b17223SAlexander Motin 			if (timeout > 0) {
153489b17223SAlexander Motin 				/*
153589b17223SAlexander Motin 				 * Two steps to avoid overflows at HZ=1000
153689b17223SAlexander Motin 				 * and idle timeouts > 2.1s.  Some rounding
153789b17223SAlexander Motin 				 * errors can occur, but they are < 1tick,
153889b17223SAlexander Motin 				 * which is deemed to be close enough for
153989b17223SAlexander Motin 				 * this purpose.
154089b17223SAlexander Motin 				 */
154189b17223SAlexander Motin 				int micpertic = 1000000 / hz;
154289b17223SAlexander Motin 				timeout = (timeout + micpertic - 1) / micpertic;
154389b17223SAlexander Motin 				sx_xunlock(&sc->sc_lock);
154489b17223SAlexander Motin 				MSLEEP(rv, sc, &sc->sc_queue_mtx,
154589b17223SAlexander Motin 				    PRIBIO | PDROP, "-", timeout);
154689b17223SAlexander Motin 				sx_xlock(&sc->sc_lock);
154789b17223SAlexander Motin 				goto process;
154889b17223SAlexander Motin 			} else
154989b17223SAlexander Motin 				rv = EWOULDBLOCK;
155089b17223SAlexander Motin 		}
155189b17223SAlexander Motin 		mtx_unlock(&sc->sc_queue_mtx);
155289b17223SAlexander Motin process:
155389b17223SAlexander Motin 		if (ep != NULL) {
155489b17223SAlexander Motin 			g_raid_handle_event(sc, ep);
155589b17223SAlexander Motin 		} else if (bp != NULL) {
155689b17223SAlexander Motin 			if (bp->bio_to != NULL &&
155789b17223SAlexander Motin 			    bp->bio_to->geom == sc->sc_geom)
155889b17223SAlexander Motin 				g_raid_start_request(bp);
155989b17223SAlexander Motin 			else
156089b17223SAlexander Motin 				g_raid_disk_done_request(bp);
156189b17223SAlexander Motin 		} else if (rv == EWOULDBLOCK) {
156289b17223SAlexander Motin 			TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
156389b17223SAlexander Motin 				g_raid_clean(vol, -1);
156489b17223SAlexander Motin 				if (bioq_first(&vol->v_inflight) == NULL &&
156589b17223SAlexander Motin 				    vol->v_tr) {
156689b17223SAlexander Motin 					t.tv_sec = g_raid_idle_threshold / 1000000;
156789b17223SAlexander Motin 					t.tv_usec = g_raid_idle_threshold % 1000000;
156889b17223SAlexander Motin 					timevaladd(&t, &vol->v_last_done);
156989b17223SAlexander Motin 					getmicrouptime(&now);
157089b17223SAlexander Motin 					if (timevalcmp(&t, &now, <= )) {
157189b17223SAlexander Motin 						G_RAID_TR_IDLE(vol->v_tr);
157289b17223SAlexander Motin 						vol->v_last_done = now;
157389b17223SAlexander Motin 					}
157489b17223SAlexander Motin 				}
157589b17223SAlexander Motin 			}
157689b17223SAlexander Motin 		}
157789b17223SAlexander Motin 		if (sc->sc_stopping == G_RAID_DESTROY_HARD)
157889b17223SAlexander Motin 			g_raid_destroy_node(sc, 1);	/* May not return. */
157989b17223SAlexander Motin 	}
158089b17223SAlexander Motin }
158189b17223SAlexander Motin 
158289b17223SAlexander Motin static void
g_raid_poll(struct g_raid_softc * sc)158389b17223SAlexander Motin g_raid_poll(struct g_raid_softc *sc)
158489b17223SAlexander Motin {
158589b17223SAlexander Motin 	struct g_raid_event *ep;
158689b17223SAlexander Motin 	struct bio *bp;
158789b17223SAlexander Motin 
158889b17223SAlexander Motin 	sx_xlock(&sc->sc_lock);
158989b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
159089b17223SAlexander Motin 	/*
159189b17223SAlexander Motin 	 * First take a look at events.
159289b17223SAlexander Motin 	 * This is important to handle events before any I/O requests.
159389b17223SAlexander Motin 	 */
159489b17223SAlexander Motin 	ep = TAILQ_FIRST(&sc->sc_events);
159589b17223SAlexander Motin 	if (ep != NULL) {
159689b17223SAlexander Motin 		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
159789b17223SAlexander Motin 		mtx_unlock(&sc->sc_queue_mtx);
159889b17223SAlexander Motin 		g_raid_handle_event(sc, ep);
159989b17223SAlexander Motin 		goto out;
160089b17223SAlexander Motin 	}
160189b17223SAlexander Motin 	bp = bioq_takefirst(&sc->sc_queue);
160289b17223SAlexander Motin 	if (bp != NULL) {
160389b17223SAlexander Motin 		mtx_unlock(&sc->sc_queue_mtx);
160489b17223SAlexander Motin 		if (bp->bio_from == NULL ||
160589b17223SAlexander Motin 		    bp->bio_from->geom != sc->sc_geom)
160689b17223SAlexander Motin 			g_raid_start_request(bp);
160789b17223SAlexander Motin 		else
160889b17223SAlexander Motin 			g_raid_disk_done_request(bp);
160989b17223SAlexander Motin 	}
161089b17223SAlexander Motin out:
161189b17223SAlexander Motin 	sx_xunlock(&sc->sc_lock);
161289b17223SAlexander Motin }
161389b17223SAlexander Motin 
161489b17223SAlexander Motin static void
g_raid_launch_provider(struct g_raid_volume * vol)161589b17223SAlexander Motin g_raid_launch_provider(struct g_raid_volume *vol)
161689b17223SAlexander Motin {
161789b17223SAlexander Motin 	struct g_raid_disk *disk;
1618b43560abSAlexander Motin 	struct g_raid_subdisk *sd;
161989b17223SAlexander Motin 	struct g_raid_softc *sc;
162089b17223SAlexander Motin 	struct g_provider *pp;
162189b17223SAlexander Motin 	char name[G_RAID_MAX_VOLUMENAME];
162289b17223SAlexander Motin 	off_t off;
1623b43560abSAlexander Motin 	int i;
162489b17223SAlexander Motin 
162589b17223SAlexander Motin 	sc = vol->v_softc;
162689b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_LOCKED);
162789b17223SAlexander Motin 
162889b17223SAlexander Motin 	g_topology_lock();
162989b17223SAlexander Motin 	/* Try to name provider with volume name. */
163089b17223SAlexander Motin 	snprintf(name, sizeof(name), "raid/%s", vol->v_name);
163189b17223SAlexander Motin 	if (g_raid_name_format == 0 || vol->v_name[0] == 0 ||
163289b17223SAlexander Motin 	    g_provider_by_name(name) != NULL) {
163389b17223SAlexander Motin 		/* Otherwise use sequential volume number. */
163489b17223SAlexander Motin 		snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id);
163589b17223SAlexander Motin 	}
1636bd9fba0cSSean Bruno 
163789b17223SAlexander Motin 	pp = g_new_providerf(sc->sc_geom, "%s", name);
163840ea77a0SAlexander Motin 	pp->flags |= G_PF_DIRECT_RECEIVE;
1639b43560abSAlexander Motin 	if (vol->v_tr->tro_class->trc_accept_unmapped) {
1640b43560abSAlexander Motin 		pp->flags |= G_PF_ACCEPT_UNMAPPED;
1641b43560abSAlexander Motin 		for (i = 0; i < vol->v_disks_count; i++) {
1642b43560abSAlexander Motin 			sd = &vol->v_subdisks[i];
1643b43560abSAlexander Motin 			if (sd->sd_state == G_RAID_SUBDISK_S_NONE)
1644b43560abSAlexander Motin 				continue;
1645b43560abSAlexander Motin 			if ((sd->sd_disk->d_consumer->provider->flags &
1646b43560abSAlexander Motin 			    G_PF_ACCEPT_UNMAPPED) == 0)
1647b43560abSAlexander Motin 				pp->flags &= ~G_PF_ACCEPT_UNMAPPED;
1648b43560abSAlexander Motin 		}
1649b43560abSAlexander Motin 	}
165089b17223SAlexander Motin 	pp->private = vol;
165189b17223SAlexander Motin 	pp->mediasize = vol->v_mediasize;
165289b17223SAlexander Motin 	pp->sectorsize = vol->v_sectorsize;
165389b17223SAlexander Motin 	pp->stripesize = 0;
165489b17223SAlexander Motin 	pp->stripeoffset = 0;
165589b17223SAlexander Motin 	if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
165689b17223SAlexander Motin 	    vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
165789b17223SAlexander Motin 	    vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE ||
165889b17223SAlexander Motin 	    vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) {
165989b17223SAlexander Motin 		if ((disk = vol->v_subdisks[0].sd_disk) != NULL &&
166089b17223SAlexander Motin 		    disk->d_consumer != NULL &&
166189b17223SAlexander Motin 		    disk->d_consumer->provider != NULL) {
166289b17223SAlexander Motin 			pp->stripesize = disk->d_consumer->provider->stripesize;
166389b17223SAlexander Motin 			off = disk->d_consumer->provider->stripeoffset;
166489b17223SAlexander Motin 			pp->stripeoffset = off + vol->v_subdisks[0].sd_offset;
166589b17223SAlexander Motin 			if (off > 0)
166689b17223SAlexander Motin 				pp->stripeoffset %= off;
166789b17223SAlexander Motin 		}
166889b17223SAlexander Motin 		if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) {
166989b17223SAlexander Motin 			pp->stripesize *= (vol->v_disks_count - 1);
167089b17223SAlexander Motin 			pp->stripeoffset *= (vol->v_disks_count - 1);
167189b17223SAlexander Motin 		}
167289b17223SAlexander Motin 	} else
167389b17223SAlexander Motin 		pp->stripesize = vol->v_strip_size;
167489b17223SAlexander Motin 	vol->v_provider = pp;
167589b17223SAlexander Motin 	g_error_provider(pp, 0);
167689b17223SAlexander Motin 	g_topology_unlock();
167789b17223SAlexander Motin 	G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.",
167889b17223SAlexander Motin 	    pp->name, vol->v_name);
167989b17223SAlexander Motin }
168089b17223SAlexander Motin 
168189b17223SAlexander Motin static void
g_raid_destroy_provider(struct g_raid_volume * vol)168289b17223SAlexander Motin g_raid_destroy_provider(struct g_raid_volume *vol)
168389b17223SAlexander Motin {
168489b17223SAlexander Motin 	struct g_raid_softc *sc;
168589b17223SAlexander Motin 	struct g_provider *pp;
168689b17223SAlexander Motin 	struct bio *bp, *tmp;
168789b17223SAlexander Motin 
168889b17223SAlexander Motin 	g_topology_assert_not();
168989b17223SAlexander Motin 	sc = vol->v_softc;
169089b17223SAlexander Motin 	pp = vol->v_provider;
169189b17223SAlexander Motin 	KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name));
169289b17223SAlexander Motin 
169389b17223SAlexander Motin 	g_topology_lock();
169489b17223SAlexander Motin 	g_error_provider(pp, ENXIO);
169589b17223SAlexander Motin 	mtx_lock(&sc->sc_queue_mtx);
169689b17223SAlexander Motin 	TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) {
169789b17223SAlexander Motin 		if (bp->bio_to != pp)
169889b17223SAlexander Motin 			continue;
169989b17223SAlexander Motin 		bioq_remove(&sc->sc_queue, bp);
170089b17223SAlexander Motin 		g_io_deliver(bp, ENXIO);
170189b17223SAlexander Motin 	}
170289b17223SAlexander Motin 	mtx_unlock(&sc->sc_queue_mtx);
170389b17223SAlexander Motin 	G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.",
170489b17223SAlexander Motin 	    pp->name, vol->v_name);
170589b17223SAlexander Motin 	g_wither_provider(pp, ENXIO);
170689b17223SAlexander Motin 	g_topology_unlock();
170789b17223SAlexander Motin 	vol->v_provider = NULL;
170889b17223SAlexander Motin }
170989b17223SAlexander Motin 
171089b17223SAlexander Motin /*
171189b17223SAlexander Motin  * Update device state.
171289b17223SAlexander Motin  */
171389b17223SAlexander Motin static int
g_raid_update_volume(struct g_raid_volume * vol,u_int event)171489b17223SAlexander Motin g_raid_update_volume(struct g_raid_volume *vol, u_int event)
171589b17223SAlexander Motin {
171689b17223SAlexander Motin 	struct g_raid_softc *sc;
171789b17223SAlexander Motin 
171889b17223SAlexander Motin 	sc = vol->v_softc;
171989b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
172089b17223SAlexander Motin 
172189b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Event %s for volume %s.",
172289b17223SAlexander Motin 	    g_raid_volume_event2str(event),
172389b17223SAlexander Motin 	    vol->v_name);
172489b17223SAlexander Motin 	switch (event) {
172589b17223SAlexander Motin 	case G_RAID_VOLUME_E_DOWN:
172689b17223SAlexander Motin 		if (vol->v_provider != NULL)
172789b17223SAlexander Motin 			g_raid_destroy_provider(vol);
172889b17223SAlexander Motin 		break;
172989b17223SAlexander Motin 	case G_RAID_VOLUME_E_UP:
173089b17223SAlexander Motin 		if (vol->v_provider == NULL)
173189b17223SAlexander Motin 			g_raid_launch_provider(vol);
173289b17223SAlexander Motin 		break;
173389b17223SAlexander Motin 	case G_RAID_VOLUME_E_START:
173489b17223SAlexander Motin 		if (vol->v_tr)
173589b17223SAlexander Motin 			G_RAID_TR_START(vol->v_tr);
173689b17223SAlexander Motin 		return (0);
173789b17223SAlexander Motin 	default:
173889b17223SAlexander Motin 		if (sc->sc_md)
173989b17223SAlexander Motin 			G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event);
174089b17223SAlexander Motin 		return (0);
174189b17223SAlexander Motin 	}
174289b17223SAlexander Motin 
174389b17223SAlexander Motin 	/* Manage root mount release. */
174489b17223SAlexander Motin 	if (vol->v_starting) {
174589b17223SAlexander Motin 		vol->v_starting = 0;
174689b17223SAlexander Motin 		G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount);
174789b17223SAlexander Motin 		root_mount_rel(vol->v_rootmount);
174889b17223SAlexander Motin 		vol->v_rootmount = NULL;
174989b17223SAlexander Motin 	}
175089b17223SAlexander Motin 	if (vol->v_stopping && vol->v_provider_open == 0)
175189b17223SAlexander Motin 		g_raid_destroy_volume(vol);
175289b17223SAlexander Motin 	return (0);
175389b17223SAlexander Motin }
175489b17223SAlexander Motin 
175589b17223SAlexander Motin /*
175689b17223SAlexander Motin  * Update subdisk state.
175789b17223SAlexander Motin  */
175889b17223SAlexander Motin static int
g_raid_update_subdisk(struct g_raid_subdisk * sd,u_int event)175989b17223SAlexander Motin g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event)
176089b17223SAlexander Motin {
176189b17223SAlexander Motin 	struct g_raid_softc *sc;
176289b17223SAlexander Motin 	struct g_raid_volume *vol;
176389b17223SAlexander Motin 
176489b17223SAlexander Motin 	sc = sd->sd_softc;
176589b17223SAlexander Motin 	vol = sd->sd_volume;
176689b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
176789b17223SAlexander Motin 
176889b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.",
176989b17223SAlexander Motin 	    g_raid_subdisk_event2str(event),
177089b17223SAlexander Motin 	    vol->v_name, sd->sd_pos,
177189b17223SAlexander Motin 	    sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
177289b17223SAlexander Motin 	if (vol->v_tr)
177389b17223SAlexander Motin 		G_RAID_TR_EVENT(vol->v_tr, sd, event);
177489b17223SAlexander Motin 
177589b17223SAlexander Motin 	return (0);
177689b17223SAlexander Motin }
177789b17223SAlexander Motin 
177889b17223SAlexander Motin /*
177989b17223SAlexander Motin  * Update disk state.
178089b17223SAlexander Motin  */
178189b17223SAlexander Motin static int
g_raid_update_disk(struct g_raid_disk * disk,u_int event)178289b17223SAlexander Motin g_raid_update_disk(struct g_raid_disk *disk, u_int event)
178389b17223SAlexander Motin {
178489b17223SAlexander Motin 	struct g_raid_softc *sc;
178589b17223SAlexander Motin 
178689b17223SAlexander Motin 	sc = disk->d_softc;
178789b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
178889b17223SAlexander Motin 
178989b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Event %s for disk %s.",
179089b17223SAlexander Motin 	    g_raid_disk_event2str(event),
179189b17223SAlexander Motin 	    g_raid_get_diskname(disk));
179289b17223SAlexander Motin 
179389b17223SAlexander Motin 	if (sc->sc_md)
179489b17223SAlexander Motin 		G_RAID_MD_EVENT(sc->sc_md, disk, event);
179589b17223SAlexander Motin 	return (0);
179689b17223SAlexander Motin }
179789b17223SAlexander Motin 
179889b17223SAlexander Motin /*
179989b17223SAlexander Motin  * Node event.
180089b17223SAlexander Motin  */
180189b17223SAlexander Motin static int
g_raid_update_node(struct g_raid_softc * sc,u_int event)180289b17223SAlexander Motin g_raid_update_node(struct g_raid_softc *sc, u_int event)
180389b17223SAlexander Motin {
180489b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
180589b17223SAlexander Motin 
180689b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Event %s for the array.",
180789b17223SAlexander Motin 	    g_raid_node_event2str(event));
180889b17223SAlexander Motin 
180989b17223SAlexander Motin 	if (event == G_RAID_NODE_E_WAKE)
181089b17223SAlexander Motin 		return (0);
181189b17223SAlexander Motin 	if (sc->sc_md)
181289b17223SAlexander Motin 		G_RAID_MD_EVENT(sc->sc_md, NULL, event);
181389b17223SAlexander Motin 	return (0);
181489b17223SAlexander Motin }
181589b17223SAlexander Motin 
181689b17223SAlexander Motin static int
g_raid_access(struct g_provider * pp,int acr,int acw,int ace)181789b17223SAlexander Motin g_raid_access(struct g_provider *pp, int acr, int acw, int ace)
181889b17223SAlexander Motin {
181989b17223SAlexander Motin 	struct g_raid_volume *vol;
182089b17223SAlexander Motin 	struct g_raid_softc *sc;
182114e2cd0aSAlexander Motin 	int dcw, opens, error = 0;
182289b17223SAlexander Motin 
182389b17223SAlexander Motin 	g_topology_assert();
182489b17223SAlexander Motin 	sc = pp->geom->softc;
182589b17223SAlexander Motin 	vol = pp->private;
182689b17223SAlexander Motin 	KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
182789b17223SAlexander Motin 	KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name));
182889b17223SAlexander Motin 
182989b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
183089b17223SAlexander Motin 	    acr, acw, ace);
183189b17223SAlexander Motin 	dcw = pp->acw + acw;
183289b17223SAlexander Motin 
183389b17223SAlexander Motin 	g_topology_unlock();
183489b17223SAlexander Motin 	sx_xlock(&sc->sc_lock);
183589b17223SAlexander Motin 	/* Deny new opens while dying. */
183689b17223SAlexander Motin 	if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) {
183789b17223SAlexander Motin 		error = ENXIO;
183889b17223SAlexander Motin 		goto out;
183989b17223SAlexander Motin 	}
18400f0b2fd8SAlexander Motin 	/* Deny write opens for read-only volumes. */
18410f0b2fd8SAlexander Motin 	if (vol->v_read_only && acw > 0) {
18420f0b2fd8SAlexander Motin 		error = EROFS;
18430f0b2fd8SAlexander Motin 		goto out;
18440f0b2fd8SAlexander Motin 	}
1845a479c51bSAlexander Motin 	if (dcw == 0)
184689b17223SAlexander Motin 		g_raid_clean(vol, dcw);
184789b17223SAlexander Motin 	vol->v_provider_open += acr + acw + ace;
184889b17223SAlexander Motin 	/* Handle delayed node destruction. */
184989b17223SAlexander Motin 	if (sc->sc_stopping == G_RAID_DESTROY_DELAYED &&
185089b17223SAlexander Motin 	    vol->v_provider_open == 0) {
185189b17223SAlexander Motin 		/* Count open volumes. */
185289b17223SAlexander Motin 		opens = g_raid_nopens(sc);
185389b17223SAlexander Motin 		if (opens == 0) {
185489b17223SAlexander Motin 			sc->sc_stopping = G_RAID_DESTROY_HARD;
185589b17223SAlexander Motin 			/* Wake up worker to make it selfdestruct. */
185689b17223SAlexander Motin 			g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
185789b17223SAlexander Motin 		}
185889b17223SAlexander Motin 	}
185989b17223SAlexander Motin 	/* Handle open volume destruction. */
186089b17223SAlexander Motin 	if (vol->v_stopping && vol->v_provider_open == 0)
186189b17223SAlexander Motin 		g_raid_destroy_volume(vol);
186289b17223SAlexander Motin out:
186389b17223SAlexander Motin 	sx_xunlock(&sc->sc_lock);
186489b17223SAlexander Motin 	g_topology_lock();
186589b17223SAlexander Motin 	return (error);
186689b17223SAlexander Motin }
186789b17223SAlexander Motin 
186889b17223SAlexander Motin struct g_raid_softc *
g_raid_create_node(struct g_class * mp,const char * name,struct g_raid_md_object * md)186989b17223SAlexander Motin g_raid_create_node(struct g_class *mp,
187089b17223SAlexander Motin     const char *name, struct g_raid_md_object *md)
187189b17223SAlexander Motin {
187289b17223SAlexander Motin 	struct g_raid_softc *sc;
187389b17223SAlexander Motin 	struct g_geom *gp;
187489b17223SAlexander Motin 	int error;
187589b17223SAlexander Motin 
187689b17223SAlexander Motin 	g_topology_assert();
187789b17223SAlexander Motin 	G_RAID_DEBUG(1, "Creating array %s.", name);
187889b17223SAlexander Motin 
187989b17223SAlexander Motin 	gp = g_new_geomf(mp, "%s", name);
188089b17223SAlexander Motin 	sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
188189b17223SAlexander Motin 	gp->start = g_raid_start;
188289b17223SAlexander Motin 	gp->orphan = g_raid_orphan;
188389b17223SAlexander Motin 	gp->access = g_raid_access;
188489b17223SAlexander Motin 	gp->dumpconf = g_raid_dumpconf;
188589b17223SAlexander Motin 
188689b17223SAlexander Motin 	sc->sc_md = md;
188789b17223SAlexander Motin 	sc->sc_geom = gp;
188889b17223SAlexander Motin 	sc->sc_flags = 0;
188989b17223SAlexander Motin 	TAILQ_INIT(&sc->sc_volumes);
189089b17223SAlexander Motin 	TAILQ_INIT(&sc->sc_disks);
1891c9f545e5SAlexander Motin 	sx_init(&sc->sc_lock, "graid:lock");
1892c9f545e5SAlexander Motin 	mtx_init(&sc->sc_queue_mtx, "graid:queue", NULL, MTX_DEF);
189389b17223SAlexander Motin 	TAILQ_INIT(&sc->sc_events);
189489b17223SAlexander Motin 	bioq_init(&sc->sc_queue);
189589b17223SAlexander Motin 	gp->softc = sc;
189689b17223SAlexander Motin 	error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0,
189789b17223SAlexander Motin 	    "g_raid %s", name);
189889b17223SAlexander Motin 	if (error != 0) {
189989b17223SAlexander Motin 		G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name);
190089b17223SAlexander Motin 		mtx_destroy(&sc->sc_queue_mtx);
190189b17223SAlexander Motin 		sx_destroy(&sc->sc_lock);
190289b17223SAlexander Motin 		g_destroy_geom(sc->sc_geom);
190389b17223SAlexander Motin 		free(sc, M_RAID);
190489b17223SAlexander Motin 		return (NULL);
190589b17223SAlexander Motin 	}
190689b17223SAlexander Motin 
190789b17223SAlexander Motin 	G_RAID_DEBUG1(0, sc, "Array %s created.", name);
190889b17223SAlexander Motin 	return (sc);
190989b17223SAlexander Motin }
191089b17223SAlexander Motin 
191189b17223SAlexander Motin struct g_raid_volume *
g_raid_create_volume(struct g_raid_softc * sc,const char * name,int id)191289b17223SAlexander Motin g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id)
191389b17223SAlexander Motin {
191489b17223SAlexander Motin 	struct g_raid_volume	*vol, *vol1;
191589b17223SAlexander Motin 	int i;
191689b17223SAlexander Motin 
191789b17223SAlexander Motin 	G_RAID_DEBUG1(1, sc, "Creating volume %s.", name);
191889b17223SAlexander Motin 	vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO);
191989b17223SAlexander Motin 	vol->v_softc = sc;
192089b17223SAlexander Motin 	strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME);
192189b17223SAlexander Motin 	vol->v_state = G_RAID_VOLUME_S_STARTING;
192289b17223SAlexander Motin 	vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
192389b17223SAlexander Motin 	vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN;
19248f12ca2eSAlexander Motin 	vol->v_rotate_parity = 1;
192589b17223SAlexander Motin 	bioq_init(&vol->v_inflight);
192689b17223SAlexander Motin 	bioq_init(&vol->v_locked);
192789b17223SAlexander Motin 	LIST_INIT(&vol->v_locks);
192889b17223SAlexander Motin 	for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
192989b17223SAlexander Motin 		vol->v_subdisks[i].sd_softc = sc;
193089b17223SAlexander Motin 		vol->v_subdisks[i].sd_volume = vol;
193189b17223SAlexander Motin 		vol->v_subdisks[i].sd_pos = i;
193289b17223SAlexander Motin 		vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE;
193389b17223SAlexander Motin 	}
193489b17223SAlexander Motin 
193589b17223SAlexander Motin 	/* Find free ID for this volume. */
193689b17223SAlexander Motin 	g_topology_lock();
193789b17223SAlexander Motin 	vol1 = vol;
193889b17223SAlexander Motin 	if (id >= 0) {
193989b17223SAlexander Motin 		LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
194089b17223SAlexander Motin 			if (vol1->v_global_id == id)
194189b17223SAlexander Motin 				break;
194289b17223SAlexander Motin 		}
194389b17223SAlexander Motin 	}
194489b17223SAlexander Motin 	if (vol1 != NULL) {
194589b17223SAlexander Motin 		for (id = 0; ; id++) {
194689b17223SAlexander Motin 			LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
194789b17223SAlexander Motin 				if (vol1->v_global_id == id)
194889b17223SAlexander Motin 					break;
194989b17223SAlexander Motin 			}
195089b17223SAlexander Motin 			if (vol1 == NULL)
195189b17223SAlexander Motin 				break;
195289b17223SAlexander Motin 		}
195389b17223SAlexander Motin 	}
195489b17223SAlexander Motin 	vol->v_global_id = id;
195589b17223SAlexander Motin 	LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next);
195689b17223SAlexander Motin 	g_topology_unlock();
195789b17223SAlexander Motin 
195889b17223SAlexander Motin 	/* Delay root mounting. */
195989b17223SAlexander Motin 	vol->v_rootmount = root_mount_hold("GRAID");
196089b17223SAlexander Motin 	G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount);
196189b17223SAlexander Motin 	vol->v_starting = 1;
196289b17223SAlexander Motin 	TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
196389b17223SAlexander Motin 	return (vol);
196489b17223SAlexander Motin }
196589b17223SAlexander Motin 
196689b17223SAlexander Motin struct g_raid_disk *
g_raid_create_disk(struct g_raid_softc * sc)196789b17223SAlexander Motin g_raid_create_disk(struct g_raid_softc *sc)
196889b17223SAlexander Motin {
196989b17223SAlexander Motin 	struct g_raid_disk	*disk;
197089b17223SAlexander Motin 
197189b17223SAlexander Motin 	G_RAID_DEBUG1(1, sc, "Creating disk.");
197289b17223SAlexander Motin 	disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
197389b17223SAlexander Motin 	disk->d_softc = sc;
197489b17223SAlexander Motin 	disk->d_state = G_RAID_DISK_S_NONE;
197589b17223SAlexander Motin 	TAILQ_INIT(&disk->d_subdisks);
197689b17223SAlexander Motin 	TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next);
197789b17223SAlexander Motin 	return (disk);
197889b17223SAlexander Motin }
197989b17223SAlexander Motin 
g_raid_start_volume(struct g_raid_volume * vol)198089b17223SAlexander Motin int g_raid_start_volume(struct g_raid_volume *vol)
198189b17223SAlexander Motin {
198289b17223SAlexander Motin 	struct g_raid_tr_class *class;
198389b17223SAlexander Motin 	struct g_raid_tr_object *obj;
198489b17223SAlexander Motin 	int status;
198589b17223SAlexander Motin 
198689b17223SAlexander Motin 	G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name);
198789b17223SAlexander Motin 	LIST_FOREACH(class, &g_raid_tr_classes, trc_list) {
1988c89d2fbeSAlexander Motin 		if (!class->trc_enable)
1989c89d2fbeSAlexander Motin 			continue;
199089b17223SAlexander Motin 		G_RAID_DEBUG1(2, vol->v_softc,
199189b17223SAlexander Motin 		    "Tasting volume %s for %s transformation.",
199289b17223SAlexander Motin 		    vol->v_name, class->name);
199389b17223SAlexander Motin 		obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
199489b17223SAlexander Motin 		    M_WAITOK);
199589b17223SAlexander Motin 		obj->tro_class = class;
199689b17223SAlexander Motin 		obj->tro_volume = vol;
199789b17223SAlexander Motin 		status = G_RAID_TR_TASTE(obj, vol);
199889b17223SAlexander Motin 		if (status != G_RAID_TR_TASTE_FAIL)
199989b17223SAlexander Motin 			break;
200089b17223SAlexander Motin 		kobj_delete((kobj_t)obj, M_RAID);
200189b17223SAlexander Motin 	}
200289b17223SAlexander Motin 	if (class == NULL) {
200389b17223SAlexander Motin 		G_RAID_DEBUG1(0, vol->v_softc,
200489b17223SAlexander Motin 		    "No transformation module found for %s.",
200589b17223SAlexander Motin 		    vol->v_name);
200689b17223SAlexander Motin 		vol->v_tr = NULL;
200789b17223SAlexander Motin 		g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED);
200889b17223SAlexander Motin 		g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN,
200989b17223SAlexander Motin 		    G_RAID_EVENT_VOLUME);
201089b17223SAlexander Motin 		return (-1);
201189b17223SAlexander Motin 	}
201289b17223SAlexander Motin 	G_RAID_DEBUG1(2, vol->v_softc,
201389b17223SAlexander Motin 	    "Transformation module %s chosen for %s.",
201489b17223SAlexander Motin 	    class->name, vol->v_name);
201589b17223SAlexander Motin 	vol->v_tr = obj;
201689b17223SAlexander Motin 	return (0);
201789b17223SAlexander Motin }
201889b17223SAlexander Motin 
201989b17223SAlexander Motin int
g_raid_destroy_node(struct g_raid_softc * sc,int worker)202089b17223SAlexander Motin g_raid_destroy_node(struct g_raid_softc *sc, int worker)
202189b17223SAlexander Motin {
202289b17223SAlexander Motin 	struct g_raid_volume *vol, *tmpv;
202389b17223SAlexander Motin 	struct g_raid_disk *disk, *tmpd;
202489b17223SAlexander Motin 	int error = 0;
202589b17223SAlexander Motin 
202689b17223SAlexander Motin 	sc->sc_stopping = G_RAID_DESTROY_HARD;
202789b17223SAlexander Motin 	TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
202889b17223SAlexander Motin 		if (g_raid_destroy_volume(vol))
202989b17223SAlexander Motin 			error = EBUSY;
203089b17223SAlexander Motin 	}
203189b17223SAlexander Motin 	if (error)
203289b17223SAlexander Motin 		return (error);
203389b17223SAlexander Motin 	TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
203489b17223SAlexander Motin 		if (g_raid_destroy_disk(disk))
203589b17223SAlexander Motin 			error = EBUSY;
203689b17223SAlexander Motin 	}
203789b17223SAlexander Motin 	if (error)
203889b17223SAlexander Motin 		return (error);
203989b17223SAlexander Motin 	if (sc->sc_md) {
204089b17223SAlexander Motin 		G_RAID_MD_FREE(sc->sc_md);
204189b17223SAlexander Motin 		kobj_delete((kobj_t)sc->sc_md, M_RAID);
204289b17223SAlexander Motin 		sc->sc_md = NULL;
204389b17223SAlexander Motin 	}
204489b17223SAlexander Motin 	if (sc->sc_geom != NULL) {
204589b17223SAlexander Motin 		G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name);
204689b17223SAlexander Motin 		g_topology_lock();
204789b17223SAlexander Motin 		sc->sc_geom->softc = NULL;
204889b17223SAlexander Motin 		g_wither_geom(sc->sc_geom, ENXIO);
204989b17223SAlexander Motin 		g_topology_unlock();
205089b17223SAlexander Motin 		sc->sc_geom = NULL;
205189b17223SAlexander Motin 	} else
205289b17223SAlexander Motin 		G_RAID_DEBUG(1, "Array destroyed.");
205389b17223SAlexander Motin 	if (worker) {
205489b17223SAlexander Motin 		g_raid_event_cancel(sc, sc);
205589b17223SAlexander Motin 		mtx_destroy(&sc->sc_queue_mtx);
205689b17223SAlexander Motin 		sx_xunlock(&sc->sc_lock);
205789b17223SAlexander Motin 		sx_destroy(&sc->sc_lock);
205889b17223SAlexander Motin 		wakeup(&sc->sc_stopping);
205989b17223SAlexander Motin 		free(sc, M_RAID);
206089b17223SAlexander Motin 		curthread->td_pflags &= ~TDP_GEOM;
206189b17223SAlexander Motin 		G_RAID_DEBUG(1, "Thread exiting.");
206289b17223SAlexander Motin 		kproc_exit(0);
206389b17223SAlexander Motin 	} else {
206489b17223SAlexander Motin 		/* Wake up worker to make it selfdestruct. */
206589b17223SAlexander Motin 		g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
206689b17223SAlexander Motin 	}
206789b17223SAlexander Motin 	return (0);
206889b17223SAlexander Motin }
206989b17223SAlexander Motin 
207089b17223SAlexander Motin int
g_raid_destroy_volume(struct g_raid_volume * vol)207189b17223SAlexander Motin g_raid_destroy_volume(struct g_raid_volume *vol)
207289b17223SAlexander Motin {
207389b17223SAlexander Motin 	struct g_raid_softc *sc;
207489b17223SAlexander Motin 	struct g_raid_disk *disk;
207589b17223SAlexander Motin 	int i;
207689b17223SAlexander Motin 
207789b17223SAlexander Motin 	sc = vol->v_softc;
207889b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name);
207989b17223SAlexander Motin 	vol->v_stopping = 1;
208089b17223SAlexander Motin 	if (vol->v_state != G_RAID_VOLUME_S_STOPPED) {
208189b17223SAlexander Motin 		if (vol->v_tr) {
208289b17223SAlexander Motin 			G_RAID_TR_STOP(vol->v_tr);
208389b17223SAlexander Motin 			return (EBUSY);
208489b17223SAlexander Motin 		} else
208589b17223SAlexander Motin 			vol->v_state = G_RAID_VOLUME_S_STOPPED;
208689b17223SAlexander Motin 	}
208789b17223SAlexander Motin 	if (g_raid_event_check(sc, vol) != 0)
208889b17223SAlexander Motin 		return (EBUSY);
208989b17223SAlexander Motin 	if (vol->v_provider != NULL)
209089b17223SAlexander Motin 		return (EBUSY);
209189b17223SAlexander Motin 	if (vol->v_provider_open != 0)
209289b17223SAlexander Motin 		return (EBUSY);
209389b17223SAlexander Motin 	if (vol->v_tr) {
209489b17223SAlexander Motin 		G_RAID_TR_FREE(vol->v_tr);
209589b17223SAlexander Motin 		kobj_delete((kobj_t)vol->v_tr, M_RAID);
209689b17223SAlexander Motin 		vol->v_tr = NULL;
209789b17223SAlexander Motin 	}
209889b17223SAlexander Motin 	if (vol->v_rootmount)
209989b17223SAlexander Motin 		root_mount_rel(vol->v_rootmount);
210089b17223SAlexander Motin 	g_topology_lock();
210189b17223SAlexander Motin 	LIST_REMOVE(vol, v_global_next);
210289b17223SAlexander Motin 	g_topology_unlock();
210389b17223SAlexander Motin 	TAILQ_REMOVE(&sc->sc_volumes, vol, v_next);
210489b17223SAlexander Motin 	for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
210589b17223SAlexander Motin 		g_raid_event_cancel(sc, &vol->v_subdisks[i]);
210689b17223SAlexander Motin 		disk = vol->v_subdisks[i].sd_disk;
210789b17223SAlexander Motin 		if (disk == NULL)
210889b17223SAlexander Motin 			continue;
210989b17223SAlexander Motin 		TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
211089b17223SAlexander Motin 	}
211189b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name);
211289b17223SAlexander Motin 	if (sc->sc_md)
211389b17223SAlexander Motin 		G_RAID_MD_FREE_VOLUME(sc->sc_md, vol);
211489b17223SAlexander Motin 	g_raid_event_cancel(sc, vol);
211589b17223SAlexander Motin 	free(vol, M_RAID);
211689b17223SAlexander Motin 	if (sc->sc_stopping == G_RAID_DESTROY_HARD) {
211789b17223SAlexander Motin 		/* Wake up worker to let it selfdestruct. */
211889b17223SAlexander Motin 		g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
211989b17223SAlexander Motin 	}
212089b17223SAlexander Motin 	return (0);
212189b17223SAlexander Motin }
212289b17223SAlexander Motin 
212389b17223SAlexander Motin int
g_raid_destroy_disk(struct g_raid_disk * disk)212489b17223SAlexander Motin g_raid_destroy_disk(struct g_raid_disk *disk)
212589b17223SAlexander Motin {
212689b17223SAlexander Motin 	struct g_raid_softc *sc;
212789b17223SAlexander Motin 	struct g_raid_subdisk *sd, *tmp;
212889b17223SAlexander Motin 
212989b17223SAlexander Motin 	sc = disk->d_softc;
213089b17223SAlexander Motin 	G_RAID_DEBUG1(2, sc, "Destroying disk.");
213189b17223SAlexander Motin 	if (disk->d_consumer) {
213289b17223SAlexander Motin 		g_raid_kill_consumer(sc, disk->d_consumer);
213389b17223SAlexander Motin 		disk->d_consumer = NULL;
213489b17223SAlexander Motin 	}
213589b17223SAlexander Motin 	TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
213689b17223SAlexander Motin 		g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
213789b17223SAlexander Motin 		g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
213889b17223SAlexander Motin 		    G_RAID_EVENT_SUBDISK);
213989b17223SAlexander Motin 		TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next);
214089b17223SAlexander Motin 		sd->sd_disk = NULL;
214189b17223SAlexander Motin 	}
214289b17223SAlexander Motin 	TAILQ_REMOVE(&sc->sc_disks, disk, d_next);
214389b17223SAlexander Motin 	if (sc->sc_md)
214489b17223SAlexander Motin 		G_RAID_MD_FREE_DISK(sc->sc_md, disk);
214589b17223SAlexander Motin 	g_raid_event_cancel(sc, disk);
214689b17223SAlexander Motin 	free(disk, M_RAID);
214789b17223SAlexander Motin 	return (0);
214889b17223SAlexander Motin }
214989b17223SAlexander Motin 
215089b17223SAlexander Motin int
g_raid_destroy(struct g_raid_softc * sc,int how)215189b17223SAlexander Motin g_raid_destroy(struct g_raid_softc *sc, int how)
215289b17223SAlexander Motin {
21538531bb3fSAlexander Motin 	int error, opens;
215489b17223SAlexander Motin 
215589b17223SAlexander Motin 	g_topology_assert_not();
215689b17223SAlexander Motin 	if (sc == NULL)
215789b17223SAlexander Motin 		return (ENXIO);
215889b17223SAlexander Motin 	sx_assert(&sc->sc_lock, SX_XLOCKED);
215989b17223SAlexander Motin 
216089b17223SAlexander Motin 	/* Count open volumes. */
216189b17223SAlexander Motin 	opens = g_raid_nopens(sc);
216289b17223SAlexander Motin 
216389b17223SAlexander Motin 	/* React on some opened volumes. */
216489b17223SAlexander Motin 	if (opens > 0) {
216589b17223SAlexander Motin 		switch (how) {
216689b17223SAlexander Motin 		case G_RAID_DESTROY_SOFT:
216789b17223SAlexander Motin 			G_RAID_DEBUG1(1, sc,
216889b17223SAlexander Motin 			    "%d volumes are still open.",
216989b17223SAlexander Motin 			    opens);
21708531bb3fSAlexander Motin 			sx_xunlock(&sc->sc_lock);
217189b17223SAlexander Motin 			return (EBUSY);
217289b17223SAlexander Motin 		case G_RAID_DESTROY_DELAYED:
217389b17223SAlexander Motin 			G_RAID_DEBUG1(1, sc,
217489b17223SAlexander Motin 			    "Array will be destroyed on last close.");
217589b17223SAlexander Motin 			sc->sc_stopping = G_RAID_DESTROY_DELAYED;
21768531bb3fSAlexander Motin 			sx_xunlock(&sc->sc_lock);
217789b17223SAlexander Motin 			return (EBUSY);
217889b17223SAlexander Motin 		case G_RAID_DESTROY_HARD:
217989b17223SAlexander Motin 			G_RAID_DEBUG1(1, sc,
218089b17223SAlexander Motin 			    "%d volumes are still open.",
218189b17223SAlexander Motin 			    opens);
218289b17223SAlexander Motin 		}
218389b17223SAlexander Motin 	}
218489b17223SAlexander Motin 
218589b17223SAlexander Motin 	/* Mark node for destruction. */
218689b17223SAlexander Motin 	sc->sc_stopping = G_RAID_DESTROY_HARD;
218789b17223SAlexander Motin 	/* Wake up worker to let it selfdestruct. */
218889b17223SAlexander Motin 	g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
218989b17223SAlexander Motin 	/* Sleep until node destroyed. */
21908531bb3fSAlexander Motin 	error = sx_sleep(&sc->sc_stopping, &sc->sc_lock,
21918531bb3fSAlexander Motin 	    PRIBIO | PDROP, "r:destroy", hz * 3);
21928531bb3fSAlexander Motin 	return (error == EWOULDBLOCK ? EBUSY : 0);
219389b17223SAlexander Motin }
219489b17223SAlexander Motin 
219589b17223SAlexander Motin static void
g_raid_taste_orphan(struct g_consumer * cp)219689b17223SAlexander Motin g_raid_taste_orphan(struct g_consumer *cp)
219789b17223SAlexander Motin {
219889b17223SAlexander Motin 
219989b17223SAlexander Motin 	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
220089b17223SAlexander Motin 	    cp->provider->name));
220189b17223SAlexander Motin }
220289b17223SAlexander Motin 
220389b17223SAlexander Motin static struct g_geom *
g_raid_taste(struct g_class * mp,struct g_provider * pp,int flags __unused)220489b17223SAlexander Motin g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
220589b17223SAlexander Motin {
220689b17223SAlexander Motin 	struct g_consumer *cp;
220789b17223SAlexander Motin 	struct g_geom *gp, *geom;
220889b17223SAlexander Motin 	struct g_raid_md_class *class;
220989b17223SAlexander Motin 	struct g_raid_md_object *obj;
221089b17223SAlexander Motin 	int status;
221189b17223SAlexander Motin 
221289b17223SAlexander Motin 	g_topology_assert();
221389b17223SAlexander Motin 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2214c89d2fbeSAlexander Motin 	if (!g_raid_enable)
2215c89d2fbeSAlexander Motin 		return (NULL);
221689b17223SAlexander Motin 	G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
221789b17223SAlexander Motin 
2218dea1e226SAlexander Motin 	geom = NULL;
2219dea1e226SAlexander Motin 	status = G_RAID_MD_TASTE_FAIL;
22208df8e26aSAlexander Motin 	gp = g_new_geomf(mp, "raid:taste");
222189b17223SAlexander Motin 	/*
222289b17223SAlexander Motin 	 * This orphan function should be never called.
222389b17223SAlexander Motin 	 */
222489b17223SAlexander Motin 	gp->orphan = g_raid_taste_orphan;
222589b17223SAlexander Motin 	cp = g_new_consumer(gp);
22260d8cec76SAlexander Motin 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2227d22ff249SEdward Tomasz Napierala 	if (g_attach(cp, pp) != 0)
2228d22ff249SEdward Tomasz Napierala 		goto ofail2;
2229dea1e226SAlexander Motin 	if (g_access(cp, 1, 0, 0) != 0)
2230dea1e226SAlexander Motin 		goto ofail;
223189b17223SAlexander Motin 
223289b17223SAlexander Motin 	LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2233c89d2fbeSAlexander Motin 		if (!class->mdc_enable)
2234c89d2fbeSAlexander Motin 			continue;
223589b17223SAlexander Motin 		G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.",
223689b17223SAlexander Motin 		    pp->name, class->name);
223789b17223SAlexander Motin 		obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
223889b17223SAlexander Motin 		    M_WAITOK);
223989b17223SAlexander Motin 		obj->mdo_class = class;
224089b17223SAlexander Motin 		status = G_RAID_MD_TASTE(obj, mp, cp, &geom);
224189b17223SAlexander Motin 		if (status != G_RAID_MD_TASTE_NEW)
224289b17223SAlexander Motin 			kobj_delete((kobj_t)obj, M_RAID);
224389b17223SAlexander Motin 		if (status != G_RAID_MD_TASTE_FAIL)
224489b17223SAlexander Motin 			break;
224589b17223SAlexander Motin 	}
224689b17223SAlexander Motin 
2247dea1e226SAlexander Motin 	if (status == G_RAID_MD_TASTE_FAIL)
2248dea1e226SAlexander Motin 		(void)g_access(cp, -1, 0, 0);
2249dea1e226SAlexander Motin ofail:
225089b17223SAlexander Motin 	g_detach(cp);
2251d22ff249SEdward Tomasz Napierala ofail2:
225289b17223SAlexander Motin 	g_destroy_consumer(cp);
225389b17223SAlexander Motin 	g_destroy_geom(gp);
225489b17223SAlexander Motin 	G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name);
225589b17223SAlexander Motin 	return (geom);
225689b17223SAlexander Motin }
225789b17223SAlexander Motin 
225889b17223SAlexander Motin int
g_raid_create_node_format(const char * format,struct gctl_req * req,struct g_geom ** gp)22598df8e26aSAlexander Motin g_raid_create_node_format(const char *format, struct gctl_req *req,
22608df8e26aSAlexander Motin     struct g_geom **gp)
226189b17223SAlexander Motin {
226289b17223SAlexander Motin 	struct g_raid_md_class *class;
226389b17223SAlexander Motin 	struct g_raid_md_object *obj;
226489b17223SAlexander Motin 	int status;
226589b17223SAlexander Motin 
226689b17223SAlexander Motin 	G_RAID_DEBUG(2, "Creating array for %s metadata.", format);
226789b17223SAlexander Motin 	LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
226889b17223SAlexander Motin 		if (strcasecmp(class->name, format) == 0)
226989b17223SAlexander Motin 			break;
227089b17223SAlexander Motin 	}
227189b17223SAlexander Motin 	if (class == NULL) {
227289b17223SAlexander Motin 		G_RAID_DEBUG(1, "No support for %s metadata.", format);
227389b17223SAlexander Motin 		return (G_RAID_MD_TASTE_FAIL);
227489b17223SAlexander Motin 	}
227589b17223SAlexander Motin 	obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
227689b17223SAlexander Motin 	    M_WAITOK);
227789b17223SAlexander Motin 	obj->mdo_class = class;
22788df8e26aSAlexander Motin 	status = G_RAID_MD_CREATE_REQ(obj, &g_raid_class, req, gp);
227989b17223SAlexander Motin 	if (status != G_RAID_MD_TASTE_NEW)
228089b17223SAlexander Motin 		kobj_delete((kobj_t)obj, M_RAID);
228189b17223SAlexander Motin 	return (status);
228289b17223SAlexander Motin }
228389b17223SAlexander Motin 
228489b17223SAlexander Motin static int
g_raid_destroy_geom(struct gctl_req * req __unused,struct g_class * mp __unused,struct g_geom * gp)228589b17223SAlexander Motin g_raid_destroy_geom(struct gctl_req *req __unused,
228689b17223SAlexander Motin     struct g_class *mp __unused, struct g_geom *gp)
228789b17223SAlexander Motin {
228889b17223SAlexander Motin 	struct g_raid_softc *sc;
228989b17223SAlexander Motin 	int error;
229089b17223SAlexander Motin 
229189b17223SAlexander Motin 	g_topology_unlock();
229289b17223SAlexander Motin 	sc = gp->softc;
229389b17223SAlexander Motin 	sx_xlock(&sc->sc_lock);
229489b17223SAlexander Motin 	g_cancel_event(sc);
229589b17223SAlexander Motin 	error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT);
229689b17223SAlexander Motin 	g_topology_lock();
229789b17223SAlexander Motin 	return (error);
229889b17223SAlexander Motin }
229989b17223SAlexander Motin 
g_raid_write_metadata(struct g_raid_softc * sc,struct g_raid_volume * vol,struct g_raid_subdisk * sd,struct g_raid_disk * disk)230089b17223SAlexander Motin void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
230189b17223SAlexander Motin     struct g_raid_subdisk *sd, struct g_raid_disk *disk)
230289b17223SAlexander Motin {
230389b17223SAlexander Motin 
230489b17223SAlexander Motin 	if (sc->sc_stopping == G_RAID_DESTROY_HARD)
230589b17223SAlexander Motin 		return;
230689b17223SAlexander Motin 	if (sc->sc_md)
230789b17223SAlexander Motin 		G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk);
230889b17223SAlexander Motin }
230989b17223SAlexander Motin 
g_raid_fail_disk(struct g_raid_softc * sc,struct g_raid_subdisk * sd,struct g_raid_disk * disk)231089b17223SAlexander Motin void g_raid_fail_disk(struct g_raid_softc *sc,
231189b17223SAlexander Motin     struct g_raid_subdisk *sd, struct g_raid_disk *disk)
231289b17223SAlexander Motin {
231389b17223SAlexander Motin 
231489b17223SAlexander Motin 	if (disk == NULL)
231589b17223SAlexander Motin 		disk = sd->sd_disk;
231689b17223SAlexander Motin 	if (disk == NULL) {
231789b17223SAlexander Motin 		G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!");
231889b17223SAlexander Motin 		return;
231989b17223SAlexander Motin 	}
232089b17223SAlexander Motin 	if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
232189b17223SAlexander Motin 		G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a "
232289b17223SAlexander Motin 		    "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
232389b17223SAlexander Motin 		return;
232489b17223SAlexander Motin 	}
232589b17223SAlexander Motin 	if (sc->sc_md)
232689b17223SAlexander Motin 		G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk);
232789b17223SAlexander Motin }
232889b17223SAlexander Motin 
232989b17223SAlexander Motin static void
g_raid_dumpconf(struct sbuf * sb,const char * indent,struct g_geom * gp,struct g_consumer * cp,struct g_provider * pp)233089b17223SAlexander Motin g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
233189b17223SAlexander Motin     struct g_consumer *cp, struct g_provider *pp)
233289b17223SAlexander Motin {
233389b17223SAlexander Motin 	struct g_raid_softc *sc;
233489b17223SAlexander Motin 	struct g_raid_volume *vol;
233589b17223SAlexander Motin 	struct g_raid_subdisk *sd;
233689b17223SAlexander Motin 	struct g_raid_disk *disk;
233789b17223SAlexander Motin 	int i, s;
233889b17223SAlexander Motin 
233989b17223SAlexander Motin 	g_topology_assert();
234089b17223SAlexander Motin 
234189b17223SAlexander Motin 	sc = gp->softc;
234289b17223SAlexander Motin 	if (sc == NULL)
234389b17223SAlexander Motin 		return;
234489b17223SAlexander Motin 	if (pp != NULL) {
234589b17223SAlexander Motin 		vol = pp->private;
234689b17223SAlexander Motin 		g_topology_unlock();
234789b17223SAlexander Motin 		sx_xlock(&sc->sc_lock);
2348bcb6ad36SAlexander Motin 		sbuf_printf(sb, "%s<descr>%s %s volume</descr>\n", indent,
2349bcb6ad36SAlexander Motin 		    sc->sc_md->mdo_class->name,
2350bcb6ad36SAlexander Motin 		    g_raid_volume_level2str(vol->v_raid_level,
2351bcb6ad36SAlexander Motin 		    vol->v_raid_level_qualifier));
235289b17223SAlexander Motin 		sbuf_printf(sb, "%s<Label>%s</Label>\n", indent,
235389b17223SAlexander Motin 		    vol->v_name);
235489b17223SAlexander Motin 		sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent,
235589b17223SAlexander Motin 		    g_raid_volume_level2str(vol->v_raid_level,
235689b17223SAlexander Motin 		    vol->v_raid_level_qualifier));
235789b17223SAlexander Motin 		sbuf_printf(sb,
235889b17223SAlexander Motin 		    "%s<Transformation>%s</Transformation>\n", indent,
235989b17223SAlexander Motin 		    vol->v_tr ? vol->v_tr->tro_class->name : "NONE");
236089b17223SAlexander Motin 		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
236189b17223SAlexander Motin 		    vol->v_disks_count);
236289b17223SAlexander Motin 		sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent,
236389b17223SAlexander Motin 		    vol->v_strip_size);
236489b17223SAlexander Motin 		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
236589b17223SAlexander Motin 		    g_raid_volume_state2str(vol->v_state));
236689b17223SAlexander Motin 		sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent,
236789b17223SAlexander Motin 		    vol->v_dirty ? "Yes" : "No");
236889b17223SAlexander Motin 		sbuf_printf(sb, "%s<Subdisks>", indent);
236989b17223SAlexander Motin 		for (i = 0; i < vol->v_disks_count; i++) {
237089b17223SAlexander Motin 			sd = &vol->v_subdisks[i];
237189b17223SAlexander Motin 			if (sd->sd_disk != NULL &&
237289b17223SAlexander Motin 			    sd->sd_disk->d_consumer != NULL) {
237389b17223SAlexander Motin 				sbuf_printf(sb, "%s ",
237489b17223SAlexander Motin 				    g_raid_get_diskname(sd->sd_disk));
237589b17223SAlexander Motin 			} else {
237649ee0fceSAlexander Motin 				sbuf_cat(sb, "NONE ");
237789b17223SAlexander Motin 			}
237889b17223SAlexander Motin 			sbuf_printf(sb, "(%s",
237989b17223SAlexander Motin 			    g_raid_subdisk_state2str(sd->sd_state));
238089b17223SAlexander Motin 			if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
238189b17223SAlexander Motin 			    sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
238289b17223SAlexander Motin 				sbuf_printf(sb, " %d%%",
238389b17223SAlexander Motin 				    (int)(sd->sd_rebuild_pos * 100 /
238489b17223SAlexander Motin 				     sd->sd_size));
238589b17223SAlexander Motin 			}
238649ee0fceSAlexander Motin 			sbuf_cat(sb, ")");
238789b17223SAlexander Motin 			if (i + 1 < vol->v_disks_count)
238849ee0fceSAlexander Motin 				sbuf_cat(sb, ", ");
238989b17223SAlexander Motin 		}
239049ee0fceSAlexander Motin 		sbuf_cat(sb, "</Subdisks>\n");
239189b17223SAlexander Motin 		sx_xunlock(&sc->sc_lock);
239289b17223SAlexander Motin 		g_topology_lock();
239389b17223SAlexander Motin 	} else if (cp != NULL) {
239489b17223SAlexander Motin 		disk = cp->private;
239589b17223SAlexander Motin 		if (disk == NULL)
239689b17223SAlexander Motin 			return;
239789b17223SAlexander Motin 		g_topology_unlock();
239889b17223SAlexander Motin 		sx_xlock(&sc->sc_lock);
239989b17223SAlexander Motin 		sbuf_printf(sb, "%s<State>%s", indent,
240089b17223SAlexander Motin 		    g_raid_disk_state2str(disk->d_state));
240189b17223SAlexander Motin 		if (!TAILQ_EMPTY(&disk->d_subdisks)) {
240249ee0fceSAlexander Motin 			sbuf_cat(sb, " (");
240389b17223SAlexander Motin 			TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
240489b17223SAlexander Motin 				sbuf_printf(sb, "%s",
240589b17223SAlexander Motin 				    g_raid_subdisk_state2str(sd->sd_state));
240689b17223SAlexander Motin 				if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
240789b17223SAlexander Motin 				    sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
240889b17223SAlexander Motin 					sbuf_printf(sb, " %d%%",
240989b17223SAlexander Motin 					    (int)(sd->sd_rebuild_pos * 100 /
241089b17223SAlexander Motin 					     sd->sd_size));
241189b17223SAlexander Motin 				}
241289b17223SAlexander Motin 				if (TAILQ_NEXT(sd, sd_next))
241349ee0fceSAlexander Motin 					sbuf_cat(sb, ", ");
241489b17223SAlexander Motin 			}
241549ee0fceSAlexander Motin 			sbuf_cat(sb, ")");
241689b17223SAlexander Motin 		}
241749ee0fceSAlexander Motin 		sbuf_cat(sb, "</State>\n");
241889b17223SAlexander Motin 		sbuf_printf(sb, "%s<Subdisks>", indent);
241989b17223SAlexander Motin 		TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
242089b17223SAlexander Motin 			sbuf_printf(sb, "r%d(%s):%d@%ju",
242189b17223SAlexander Motin 			    sd->sd_volume->v_global_id,
242289b17223SAlexander Motin 			    sd->sd_volume->v_name,
24236d305ab0SEugene Grosbein 			    sd->sd_pos, (uintmax_t)sd->sd_offset);
242489b17223SAlexander Motin 			if (TAILQ_NEXT(sd, sd_next))
242549ee0fceSAlexander Motin 				sbuf_cat(sb, ", ");
242689b17223SAlexander Motin 		}
242749ee0fceSAlexander Motin 		sbuf_cat(sb, "</Subdisks>\n");
242889b17223SAlexander Motin 		sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent,
242989b17223SAlexander Motin 		    disk->d_read_errs);
243089b17223SAlexander Motin 		sx_xunlock(&sc->sc_lock);
243189b17223SAlexander Motin 		g_topology_lock();
243289b17223SAlexander Motin 	} else {
243389b17223SAlexander Motin 		g_topology_unlock();
243489b17223SAlexander Motin 		sx_xlock(&sc->sc_lock);
243589b17223SAlexander Motin 		if (sc->sc_md) {
243689b17223SAlexander Motin 			sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent,
243789b17223SAlexander Motin 			    sc->sc_md->mdo_class->name);
243889b17223SAlexander Motin 		}
243989b17223SAlexander Motin 		if (!TAILQ_EMPTY(&sc->sc_volumes)) {
244089b17223SAlexander Motin 			s = 0xff;
244189b17223SAlexander Motin 			TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
244289b17223SAlexander Motin 				if (vol->v_state < s)
244389b17223SAlexander Motin 					s = vol->v_state;
244489b17223SAlexander Motin 			}
244589b17223SAlexander Motin 			sbuf_printf(sb, "%s<State>%s</State>\n", indent,
244689b17223SAlexander Motin 			    g_raid_volume_state2str(s));
244789b17223SAlexander Motin 		}
244889b17223SAlexander Motin 		sx_xunlock(&sc->sc_lock);
244989b17223SAlexander Motin 		g_topology_lock();
245089b17223SAlexander Motin 	}
245189b17223SAlexander Motin }
245289b17223SAlexander Motin 
245389b17223SAlexander Motin static void
g_raid_shutdown_post_sync(void * arg,int howto)2454a479c51bSAlexander Motin g_raid_shutdown_post_sync(void *arg, int howto)
245589b17223SAlexander Motin {
245689b17223SAlexander Motin 	struct g_class *mp;
245789b17223SAlexander Motin 	struct g_geom *gp, *gp2;
245889b17223SAlexander Motin 	struct g_raid_softc *sc;
2459a479c51bSAlexander Motin 	struct g_raid_volume *vol;
246089b17223SAlexander Motin 
24614eb861d3SMitchell Horne 	if ((howto & RB_NOSYNC) != 0)
24624eb861d3SMitchell Horne 		return;
24634eb861d3SMitchell Horne 
246489b17223SAlexander Motin 	mp = arg;
246589b17223SAlexander Motin 	g_topology_lock();
2466a479c51bSAlexander Motin 	g_raid_shutdown = 1;
246789b17223SAlexander Motin 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
246889b17223SAlexander Motin 		if ((sc = gp->softc) == NULL)
246989b17223SAlexander Motin 			continue;
247089b17223SAlexander Motin 		g_topology_unlock();
247189b17223SAlexander Motin 		sx_xlock(&sc->sc_lock);
2472a479c51bSAlexander Motin 		TAILQ_FOREACH(vol, &sc->sc_volumes, v_next)
2473a479c51bSAlexander Motin 			g_raid_clean(vol, -1);
247489b17223SAlexander Motin 		g_cancel_event(sc);
24758531bb3fSAlexander Motin 		g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
247689b17223SAlexander Motin 		g_topology_lock();
247789b17223SAlexander Motin 	}
247889b17223SAlexander Motin 	g_topology_unlock();
247989b17223SAlexander Motin }
248089b17223SAlexander Motin 
248189b17223SAlexander Motin static void
g_raid_init(struct g_class * mp)248289b17223SAlexander Motin g_raid_init(struct g_class *mp)
248389b17223SAlexander Motin {
248489b17223SAlexander Motin 
2485a479c51bSAlexander Motin 	g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
2486a479c51bSAlexander Motin 	    g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
2487a479c51bSAlexander Motin 	if (g_raid_post_sync == NULL)
248889b17223SAlexander Motin 		G_RAID_DEBUG(0, "Warning! Cannot register shutdown event.");
248989b17223SAlexander Motin 	g_raid_started = 1;
249089b17223SAlexander Motin }
249189b17223SAlexander Motin 
249289b17223SAlexander Motin static void
g_raid_fini(struct g_class * mp)249389b17223SAlexander Motin g_raid_fini(struct g_class *mp)
249489b17223SAlexander Motin {
249589b17223SAlexander Motin 
2496a479c51bSAlexander Motin 	if (g_raid_post_sync != NULL)
2497a479c51bSAlexander Motin 		EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid_post_sync);
249889b17223SAlexander Motin 	g_raid_started = 0;
249989b17223SAlexander Motin }
250089b17223SAlexander Motin 
250189b17223SAlexander Motin int
g_raid_md_modevent(module_t mod,int type,void * arg)250289b17223SAlexander Motin g_raid_md_modevent(module_t mod, int type, void *arg)
250389b17223SAlexander Motin {
250489b17223SAlexander Motin 	struct g_raid_md_class *class, *c, *nc;
250589b17223SAlexander Motin 	int error;
250689b17223SAlexander Motin 
250789b17223SAlexander Motin 	error = 0;
250889b17223SAlexander Motin 	class = arg;
250989b17223SAlexander Motin 	switch (type) {
251089b17223SAlexander Motin 	case MOD_LOAD:
251189b17223SAlexander Motin 		c = LIST_FIRST(&g_raid_md_classes);
251289b17223SAlexander Motin 		if (c == NULL || c->mdc_priority > class->mdc_priority)
251389b17223SAlexander Motin 			LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list);
251489b17223SAlexander Motin 		else {
251589b17223SAlexander Motin 			while ((nc = LIST_NEXT(c, mdc_list)) != NULL &&
251689b17223SAlexander Motin 			    nc->mdc_priority < class->mdc_priority)
251789b17223SAlexander Motin 				c = nc;
251889b17223SAlexander Motin 			LIST_INSERT_AFTER(c, class, mdc_list);
251989b17223SAlexander Motin 		}
252089b17223SAlexander Motin 		if (g_raid_started)
252189b17223SAlexander Motin 			g_retaste(&g_raid_class);
252289b17223SAlexander Motin 		break;
252389b17223SAlexander Motin 	case MOD_UNLOAD:
252489b17223SAlexander Motin 		LIST_REMOVE(class, mdc_list);
252589b17223SAlexander Motin 		break;
252689b17223SAlexander Motin 	default:
252789b17223SAlexander Motin 		error = EOPNOTSUPP;
252889b17223SAlexander Motin 		break;
252989b17223SAlexander Motin 	}
253089b17223SAlexander Motin 
253189b17223SAlexander Motin 	return (error);
253289b17223SAlexander Motin }
253389b17223SAlexander Motin 
253489b17223SAlexander Motin int
g_raid_tr_modevent(module_t mod,int type,void * arg)253589b17223SAlexander Motin g_raid_tr_modevent(module_t mod, int type, void *arg)
253689b17223SAlexander Motin {
253789b17223SAlexander Motin 	struct g_raid_tr_class *class, *c, *nc;
253889b17223SAlexander Motin 	int error;
253989b17223SAlexander Motin 
254089b17223SAlexander Motin 	error = 0;
254189b17223SAlexander Motin 	class = arg;
254289b17223SAlexander Motin 	switch (type) {
254389b17223SAlexander Motin 	case MOD_LOAD:
254489b17223SAlexander Motin 		c = LIST_FIRST(&g_raid_tr_classes);
254589b17223SAlexander Motin 		if (c == NULL || c->trc_priority > class->trc_priority)
254689b17223SAlexander Motin 			LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list);
254789b17223SAlexander Motin 		else {
254889b17223SAlexander Motin 			while ((nc = LIST_NEXT(c, trc_list)) != NULL &&
254989b17223SAlexander Motin 			    nc->trc_priority < class->trc_priority)
255089b17223SAlexander Motin 				c = nc;
255189b17223SAlexander Motin 			LIST_INSERT_AFTER(c, class, trc_list);
255289b17223SAlexander Motin 		}
255389b17223SAlexander Motin 		break;
255489b17223SAlexander Motin 	case MOD_UNLOAD:
255589b17223SAlexander Motin 		LIST_REMOVE(class, trc_list);
255689b17223SAlexander Motin 		break;
255789b17223SAlexander Motin 	default:
255889b17223SAlexander Motin 		error = EOPNOTSUPP;
255989b17223SAlexander Motin 		break;
256089b17223SAlexander Motin 	}
256189b17223SAlexander Motin 
256289b17223SAlexander Motin 	return (error);
256389b17223SAlexander Motin }
256489b17223SAlexander Motin 
256589b17223SAlexander Motin /*
256689b17223SAlexander Motin  * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid)
256789b17223SAlexander Motin  * to reduce module priority, allowing submodules to register them first.
256889b17223SAlexander Motin  */
256989b17223SAlexander Motin static moduledata_t g_raid_mod = {
257089b17223SAlexander Motin 	"g_raid",
257189b17223SAlexander Motin 	g_modevent,
257289b17223SAlexander Motin 	&g_raid_class
257389b17223SAlexander Motin };
257489b17223SAlexander Motin DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD);
257589b17223SAlexander Motin MODULE_VERSION(geom_raid, 0);
2576