xref: /dragonfly/sys/dev/disk/dm/dm_table.c (revision 7bcb6caf)
1 /*        $NetBSD: dm_table.c,v 1.5 2010/01/04 00:19:08 haad Exp $      */
2 
3 /*
4  * Copyright (c) 2010-2011 Alex Hornung <alex@alexhornung.com>
5  * Copyright (c) 2008 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Adam Hamsik.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/malloc.h>
34 #include <cpu/atomic.h>
35 #include <dev/disk/dm/dm.h>
36 
37 /*
38  * There are two types of users of this interface:
39  *
40  * a) Readers such as
41  *    dmstrategy, dmgetdisklabel, dmsize, dm_dev_status_ioctl,
42  *    dm_table_deps_ioctl, dm_table_status_ioctl, dm_table_reload_ioctl
43  *
44  * b) Writers such as
45  *    dm_dev_remove_ioctl, dm_dev_resume_ioctl, dm_table_clear_ioctl
46  *
47  * Writers can work with table_head only when there are no readers. We
48  * simply use shared/exclusive locking to ensure this.
49  */
50 
51 /*
52  * Function to increment table user reference counter. Return id
53  * of table_id table.
54  * DM_TABLE_ACTIVE will return active table id.
55  * DM_TABLE_INACTIVE will return inactive table id.
56  */
57 static int
58 dm_table_busy(dm_table_head_t *head, uint8_t table_id)
59 {
60 	uint8_t id;
61 
62 	id = 0;
63 
64 	lockmgr(&head->table_mtx, LK_SHARED);
65 
66 	if (table_id == DM_TABLE_ACTIVE)
67 		id = head->cur_active_table;
68 	else
69 		id = 1 - head->cur_active_table;
70 
71 	atomic_add_int(&head->io_cnt, 1);
72 
73 	return id;
74 }
75 
76 /*
77  * Function release table lock and eventually wakeup all waiters.
78  */
79 static void
80 dm_table_unbusy(dm_table_head_t *head)
81 {
82 	KKASSERT(head->io_cnt != 0);
83 
84 	atomic_subtract_int(&head->io_cnt, 1);
85 
86 	lockmgr(&head->table_mtx, LK_RELEASE);
87 }
88 
89 /*
90  * Return current active table to caller, increment io_cnt reference counter.
91  */
92 dm_table_t *
93 dm_table_get_entry(dm_table_head_t *head, uint8_t table_id)
94 {
95 	uint8_t id;
96 
97 	id = dm_table_busy(head, table_id);
98 
99 	return &head->tables[id];
100 }
101 
102 /*
103  * Decrement io reference counter and release shared lock.
104  */
105 void
106 dm_table_release(dm_table_head_t *head, uint8_t table_id)
107 {
108 	dm_table_unbusy(head);
109 }
110 
111 /*
112  * Switch table from inactive to active mode. Have to wait until io_cnt is 0.
113  */
114 void
115 dm_table_switch_tables(dm_table_head_t *head)
116 {
117 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
118 
119 	head->cur_active_table = 1 - head->cur_active_table;
120 
121 	lockmgr(&head->table_mtx, LK_RELEASE);
122 }
123 
124 /*
125  * Destroy all table data. This function can run when there are no
126  * readers on table lists.
127  */
128 int
129 dm_table_destroy(dm_table_head_t *head, uint8_t table_id)
130 {
131 	dm_table_t *tbl;
132 	dm_table_entry_t *table_en;
133 	uint8_t id;
134 
135 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
136 
137 	dmdebug("table_id=%d io_cnt=%d\n", table_id, head->io_cnt);
138 
139 	if (table_id == DM_TABLE_ACTIVE)
140 		id = head->cur_active_table;
141 	else
142 		id = 1 - head->cur_active_table;
143 
144 	tbl = &head->tables[id];
145 
146 	while ((table_en = TAILQ_FIRST(tbl)) != NULL) {
147 		TAILQ_REMOVE(tbl, table_en, next);
148 
149 		if (table_en->target->destroy)
150 			table_en->target->destroy(table_en);
151 		table_en->target_config = NULL;
152 
153 		dm_table_free_deps(table_en);
154 
155 		/* decrement the refcount for the target */
156 		dm_target_unbusy(table_en->target);
157 
158 		kfree(table_en, M_DM);
159 	}
160 	KKASSERT(TAILQ_EMPTY(tbl));
161 
162 	lockmgr(&head->table_mtx, LK_RELEASE);
163 
164 	return 0;
165 }
166 
167 /*
168  * Return length of active or inactive table in device.
169  */
170 static uint64_t
171 _dm_table_size(dm_table_head_t *head, int table)
172 {
173 	dm_table_t *tbl;
174 	dm_table_entry_t *table_en;
175 	uint64_t length;
176 
177 	length = 0;
178 
179 	/* Select active table */
180 	tbl = dm_table_get_entry(head, table);
181 
182 	/*
183 	 * Find out what tables I want to select.
184 	 * if length => rawblkno then we should used that table.
185 	 */
186 	TAILQ_FOREACH(table_en, tbl, next) {
187 		length += table_en->length;
188 	}
189 
190 	dm_table_unbusy(head);
191 
192 	return length;
193 }
194 
195 uint64_t
196 dm_table_size(dm_table_head_t *head)
197 {
198 	return _dm_table_size(head, DM_TABLE_ACTIVE);
199 }
200 
201 uint64_t
202 dm_inactive_table_size(dm_table_head_t *head)
203 {
204 	return _dm_table_size(head, DM_TABLE_INACTIVE);
205 }
206 
207 /*
208  * Return > 0 if table is at least one table entry (returns number of entries)
209  * and return 0 if there is not. Target count returned from this function
210  * doesn't need to be true when userspace user receive it (after return
211  * there can be dm_dev_resume_ioctl), therefore this is only informative.
212  */
213 int
214 dm_table_get_target_count(dm_table_head_t *head, uint8_t table_id)
215 {
216 	dm_table_entry_t *table_en;
217 	dm_table_t *tbl;
218 	uint32_t target_count;
219 
220 	target_count = 0;
221 
222 	tbl = dm_table_get_entry(head, table_id);
223 
224 	TAILQ_FOREACH(table_en, tbl, next)
225 	    target_count++;
226 
227 	dm_table_unbusy(head);
228 
229 	return target_count;
230 }
231 
232 /*
233  * Initialize dm_table_head_t structures, I'm trying to keep this structure as
234  * opaque as possible.
235  */
236 void
237 dm_table_head_init(dm_table_head_t *head)
238 {
239 	head->cur_active_table = 0;
240 	head->io_cnt = 0;
241 
242 	/* Initialize tables. */
243 	TAILQ_INIT(&head->tables[0]);
244 	TAILQ_INIT(&head->tables[1]);
245 
246 	lockinit(&head->table_mtx, "dmtbl", 0, LK_CANRECURSE);
247 }
248 
249 /*
250  * Destroy all variables in table_head
251  */
252 void
253 dm_table_head_destroy(dm_table_head_t *head)
254 {
255 	KKASSERT(!lockinuse(&head->table_mtx));
256 
257 	/* tables don't exist when I call this routine, therefore it
258 	 * doesn't make sense to have io_cnt != 0 */
259 	KKASSERT(head->io_cnt == 0);
260 
261 	lockuninit(&head->table_mtx);
262 }
263 
264 void
265 dm_table_init_target(dm_table_entry_t *table_en, void *cfg)
266 {
267 	table_en->target_config = cfg;
268 }
269 
270 int
271 dm_table_add_deps(dm_table_entry_t *table_en, dm_pdev_t *pdev)
272 {
273 	dm_table_head_t *head;
274 	dm_mapping_t *map;
275 
276 	KKASSERT(pdev);
277 
278 	head = &table_en->dev->table_head;
279 	lockmgr(&head->table_mtx, LK_SHARED);
280 
281 	TAILQ_FOREACH(map, &table_en->pdev_maps, next) {
282 		if (map->data.pdev->udev == pdev->udev) {
283 			lockmgr(&head->table_mtx, LK_RELEASE);
284 			return -1;
285 		}
286 	}
287 
288 	map = kmalloc(sizeof(*map), M_DM, M_WAITOK | M_ZERO);
289 	map->data.pdev = pdev;
290 	TAILQ_INSERT_TAIL(&table_en->pdev_maps, map, next);
291 
292 	lockmgr(&head->table_mtx, LK_RELEASE);
293 
294 	return 0;
295 }
296 
297 void
298 dm_table_free_deps(dm_table_entry_t *table_en)
299 {
300 	dm_table_head_t *head;
301 	dm_mapping_t *map;
302 
303 	head = &table_en->dev->table_head;
304 	lockmgr(&head->table_mtx, LK_SHARED);
305 
306 	while ((map = TAILQ_FIRST(&table_en->pdev_maps)) != NULL) {
307 		TAILQ_REMOVE(&table_en->pdev_maps, map, next);
308 		kfree(map, M_DM);
309 	}
310 	KKASSERT(TAILQ_EMPTY(&table_en->pdev_maps));
311 
312 	lockmgr(&head->table_mtx, LK_RELEASE);
313 }
314