xref: /dragonfly/sys/dev/disk/dm/dm_table.c (revision 8477f730)
1 /*        $NetBSD: dm_table.c,v 1.5 2010/01/04 00:19:08 haad Exp $      */
2 
3 /*
4  * Copyright (c) 2010-2011 Alex Hornung <alex@alexhornung.com>
5  * Copyright (c) 2008 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Adam Hamsik.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <cpu/atomic.h>
36 #include <dev/disk/dm/dm.h>
37 
38 /*
39  * There are two types of users of this interface:
40  *
41  * a) Readers such as
42  *    dmstrategy, dmgetdisklabel, dmsize, dm_dev_status_ioctl,
43  *    dm_table_deps_ioctl, dm_table_status_ioctl, dm_table_reload_ioctl
44  *
45  * b) Writers such as
46  *    dm_dev_remove_ioctl, dm_dev_resume_ioctl, dm_table_clear_ioctl
47  *
48  * Writers can work with table_head only when there are no readers. We
49  * simply use shared/exclusive locking to ensure this.
50  */
51 
52 /*
53  * Function to increment table user reference counter. Return id
54  * of table_id table.
55  * DM_TABLE_ACTIVE will return active table id.
56  * DM_TABLE_INACTIVE will return inactive table id.
57  */
58 static int
dm_table_busy(dm_table_head_t * head,uint8_t table_id)59 dm_table_busy(dm_table_head_t *head, uint8_t table_id)
60 {
61 	uint8_t id;
62 
63 	id = 0;
64 
65 	lockmgr(&head->table_mtx, LK_SHARED);
66 
67 	if (table_id == DM_TABLE_ACTIVE)
68 		id = head->cur_active_table;
69 	else
70 		id = 1 - head->cur_active_table;
71 
72 	atomic_add_int(&head->io_cnt, 1);
73 
74 	return id;
75 }
76 
77 /*
78  * Function release table lock and eventually wakeup all waiters.
79  */
80 static void
dm_table_unbusy(dm_table_head_t * head)81 dm_table_unbusy(dm_table_head_t *head)
82 {
83 	KKASSERT(head->io_cnt != 0);
84 
85 	atomic_subtract_int(&head->io_cnt, 1);
86 
87 	lockmgr(&head->table_mtx, LK_RELEASE);
88 }
89 
90 /*
91  * Return current active table to caller, increment io_cnt reference counter.
92  */
93 dm_table_t *
dm_table_get_entry(dm_table_head_t * head,uint8_t table_id)94 dm_table_get_entry(dm_table_head_t *head, uint8_t table_id)
95 {
96 	uint8_t id;
97 
98 	id = dm_table_busy(head, table_id);
99 
100 	return &head->tables[id];
101 }
102 
103 /*
104  * Decrement io reference counter and release shared lock.
105  */
106 void
dm_table_release(dm_table_head_t * head,uint8_t table_id)107 dm_table_release(dm_table_head_t *head, uint8_t table_id)
108 {
109 	dm_table_unbusy(head);
110 }
111 
112 /*
113  * Switch table from inactive to active mode. Have to wait until io_cnt is 0.
114  */
115 void
dm_table_switch_tables(dm_table_head_t * head)116 dm_table_switch_tables(dm_table_head_t *head)
117 {
118 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
119 
120 	head->cur_active_table = 1 - head->cur_active_table;
121 
122 	lockmgr(&head->table_mtx, LK_RELEASE);
123 }
124 
125 /*
126  * Destroy all table data. This function can run when there are no
127  * readers on table lists.
128  */
129 int
dm_table_destroy(dm_table_head_t * head,uint8_t table_id)130 dm_table_destroy(dm_table_head_t *head, uint8_t table_id)
131 {
132 	dm_table_t *tbl;
133 	dm_table_entry_t *table_en;
134 	uint8_t id;
135 
136 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
137 
138 	dmdebug("table_id=%d io_cnt=%d\n", table_id, head->io_cnt);
139 
140 	if (table_id == DM_TABLE_ACTIVE)
141 		id = head->cur_active_table;
142 	else
143 		id = 1 - head->cur_active_table;
144 
145 	tbl = &head->tables[id];
146 
147 	while ((table_en = TAILQ_FIRST(tbl)) != NULL) {
148 		TAILQ_REMOVE(tbl, table_en, next);
149 
150 		if (table_en->target->destroy)
151 			table_en->target->destroy(table_en);
152 		table_en->target_config = NULL;
153 
154 		dm_table_free_deps(table_en);
155 
156 		/* decrement the refcount for the target */
157 		dm_target_unbusy(table_en->target);
158 
159 		kfree(table_en, M_DM);
160 	}
161 	KKASSERT(TAILQ_EMPTY(tbl));
162 
163 	lockmgr(&head->table_mtx, LK_RELEASE);
164 
165 	return 0;
166 }
167 
168 /*
169  * Return length of active or inactive table in device.
170  */
171 static uint64_t
_dm_table_size(dm_table_head_t * head,int table)172 _dm_table_size(dm_table_head_t *head, int table)
173 {
174 	dm_table_t *tbl;
175 	dm_table_entry_t *table_en;
176 	uint64_t length;
177 
178 	length = 0;
179 
180 	/* Select active table */
181 	tbl = dm_table_get_entry(head, table);
182 
183 	/*
184 	 * Find out what tables I want to select.
185 	 * if length => rawblkno then we should used that table.
186 	 */
187 	TAILQ_FOREACH(table_en, tbl, next) {
188 		length += table_en->length;
189 	}
190 
191 	dm_table_unbusy(head);
192 
193 	return length;
194 }
195 
196 uint64_t
dm_table_size(dm_table_head_t * head)197 dm_table_size(dm_table_head_t *head)
198 {
199 	return _dm_table_size(head, DM_TABLE_ACTIVE);
200 }
201 
202 uint64_t
dm_inactive_table_size(dm_table_head_t * head)203 dm_inactive_table_size(dm_table_head_t *head)
204 {
205 	return _dm_table_size(head, DM_TABLE_INACTIVE);
206 }
207 
208 /*
209  * Return > 0 if table is at least one table entry (returns number of entries)
210  * and return 0 if there is not. Target count returned from this function
211  * doesn't need to be true when userspace user receive it (after return
212  * there can be dm_dev_resume_ioctl), therefore this is only informative.
213  */
214 int
dm_table_get_target_count(dm_table_head_t * head,uint8_t table_id)215 dm_table_get_target_count(dm_table_head_t *head, uint8_t table_id)
216 {
217 	dm_table_entry_t *table_en;
218 	dm_table_t *tbl;
219 	uint32_t target_count;
220 
221 	target_count = 0;
222 
223 	tbl = dm_table_get_entry(head, table_id);
224 
225 	TAILQ_FOREACH(table_en, tbl, next)
226 	    target_count++;
227 
228 	dm_table_unbusy(head);
229 
230 	return target_count;
231 }
232 
233 /*
234  * Initialize dm_table_head_t structures, I'm trying to keep this structure as
235  * opaque as possible.
236  */
237 void
dm_table_head_init(dm_table_head_t * head)238 dm_table_head_init(dm_table_head_t *head)
239 {
240 	head->cur_active_table = 0;
241 	head->io_cnt = 0;
242 
243 	/* Initialize tables. */
244 	TAILQ_INIT(&head->tables[0]);
245 	TAILQ_INIT(&head->tables[1]);
246 
247 	lockinit(&head->table_mtx, "dmtbl", 0, LK_CANRECURSE);
248 }
249 
250 /*
251  * Destroy all variables in table_head
252  */
253 void
dm_table_head_destroy(dm_table_head_t * head)254 dm_table_head_destroy(dm_table_head_t *head)
255 {
256 	KKASSERT(!lockinuse(&head->table_mtx));
257 
258 	/* tables don't exist when I call this routine, therefore it
259 	 * doesn't make sense to have io_cnt != 0 */
260 	KKASSERT(head->io_cnt == 0);
261 
262 	lockuninit(&head->table_mtx);
263 }
264 
265 void
dm_table_init_target(dm_table_entry_t * table_en,void * cfg)266 dm_table_init_target(dm_table_entry_t *table_en, void *cfg)
267 {
268 	table_en->target_config = cfg;
269 }
270 
271 int
dm_table_add_deps(dm_table_entry_t * table_en,dm_pdev_t * pdev)272 dm_table_add_deps(dm_table_entry_t *table_en, dm_pdev_t *pdev)
273 {
274 	dm_table_head_t *head;
275 	dm_mapping_t *map;
276 
277 	KKASSERT(pdev);
278 
279 	head = &table_en->dev->table_head;
280 	lockmgr(&head->table_mtx, LK_SHARED);
281 
282 	TAILQ_FOREACH(map, &table_en->pdev_maps, next) {
283 		if (map->data.pdev->udev == pdev->udev) {
284 			lockmgr(&head->table_mtx, LK_RELEASE);
285 			return -1;
286 		}
287 	}
288 
289 	map = kmalloc(sizeof(*map), M_DM, M_WAITOK | M_ZERO);
290 	map->data.pdev = pdev;
291 	TAILQ_INSERT_TAIL(&table_en->pdev_maps, map, next);
292 
293 	lockmgr(&head->table_mtx, LK_RELEASE);
294 
295 	return 0;
296 }
297 
298 void
dm_table_free_deps(dm_table_entry_t * table_en)299 dm_table_free_deps(dm_table_entry_t *table_en)
300 {
301 	dm_table_head_t *head;
302 	dm_mapping_t *map;
303 
304 	head = &table_en->dev->table_head;
305 	lockmgr(&head->table_mtx, LK_SHARED);
306 
307 	while ((map = TAILQ_FIRST(&table_en->pdev_maps)) != NULL) {
308 		TAILQ_REMOVE(&table_en->pdev_maps, map, next);
309 		kfree(map, M_DM);
310 	}
311 	KKASSERT(TAILQ_EMPTY(&table_en->pdev_maps));
312 
313 	lockmgr(&head->table_mtx, LK_RELEASE);
314 }
315