xref: /dragonfly/sys/dev/disk/dm/dm_table.c (revision fbc9049b)
1 /*        $NetBSD: dm_table.c,v 1.5 2010/01/04 00:19:08 haad Exp $      */
2 
3 /*
4  * Copyright (c) 2010-2011 Alex Hornung <alex@alexhornung.com>
5  * Copyright (c) 2008 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Adam Hamsik.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/malloc.h>
34 #include <cpu/atomic.h>
35 #include <dev/disk/dm/dm.h>
36 
37 /*
38  * There are two types of users of this interface:
39  *
40  * a) Readers such as
41  *    dmstrategy, dmgetdisklabel, dmsize, dm_dev_status_ioctl,
42  *    dm_table_deps_ioctl, dm_table_status_ioctl, dm_table_reload_ioctl
43  *
44  * b) Writers such as
45  *    dm_dev_remove_ioctl, dm_dev_resume_ioctl, dm_table_clear_ioctl
46  *
47  * Writers can work with table_head only when there are no readers. We
48  * simply use shared/exclusive locking to ensure this.
49  */
50 
51 /*
52  * Function to increment table user reference counter. Return id
53  * of table_id table.
54  * DM_TABLE_ACTIVE will return active table id.
55  * DM_TABLE_INACTIVE will return inactive table id.
56  */
57 static int
58 dm_table_busy(dm_table_head_t *head, uint8_t table_id)
59 {
60 	uint8_t id;
61 
62 	id = 0;
63 
64 	lockmgr(&head->table_mtx, LK_SHARED);
65 
66 	if (table_id == DM_TABLE_ACTIVE)
67 		id = head->cur_active_table;
68 	else
69 		id = 1 - head->cur_active_table;
70 
71 	atomic_add_int(&head->io_cnt, 1);
72 
73 	return id;
74 }
75 
76 /*
77  * Function release table lock and eventually wakeup all waiters.
78  */
79 static void
80 dm_table_unbusy(dm_table_head_t *head)
81 {
82 	KKASSERT(head->io_cnt != 0);
83 
84 	atomic_subtract_int(&head->io_cnt, 1);
85 
86 	lockmgr(&head->table_mtx, LK_RELEASE);
87 }
88 
89 /*
90  * Return current active table to caller, increment io_cnt reference counter.
91  */
92 dm_table_t *
93 dm_table_get_entry(dm_table_head_t *head, uint8_t table_id)
94 {
95 	uint8_t id;
96 
97 	id = dm_table_busy(head, table_id);
98 
99 	return &head->tables[id];
100 }
101 
102 /*
103  * Decrement io reference counter and release shared lock.
104  */
105 void
106 dm_table_release(dm_table_head_t *head, uint8_t table_id)
107 {
108 	dm_table_unbusy(head);
109 }
110 
111 /*
112  * Switch table from inactive to active mode. Have to wait until io_cnt is 0.
113  */
114 void
115 dm_table_switch_tables(dm_table_head_t *head)
116 {
117 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
118 
119 	head->cur_active_table = 1 - head->cur_active_table;
120 
121 	lockmgr(&head->table_mtx, LK_RELEASE);
122 }
123 
124 /*
125  * Destroy all table data. This function can run when there are no
126  * readers on table lists.
127  */
128 int
129 dm_table_destroy(dm_table_head_t *head, uint8_t table_id)
130 {
131 	dm_table_t *tbl;
132 	dm_table_entry_t *table_en;
133 	uint8_t id;
134 
135 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
136 
137 	dmdebug("dm_table_destroy called with %d--%d\n", table_id, head->io_cnt);
138 
139 	if (table_id == DM_TABLE_ACTIVE)
140 		id = head->cur_active_table;
141 	else
142 		id = 1 - head->cur_active_table;
143 
144 	tbl = &head->tables[id];
145 
146 	while ((table_en = TAILQ_FIRST(tbl)) != NULL) {
147 		TAILQ_REMOVE(tbl, table_en, next);
148 
149 		if (table_en->target->destroy(table_en) == 0)
150 			table_en->target_config = NULL;
151 
152 		dm_table_free_deps(table_en);
153 
154 		/* decrement the refcount for the target */
155 		dm_target_unbusy(table_en->target);
156 
157 		kfree(table_en, M_DM);
158 	}
159 	KKASSERT(TAILQ_EMPTY(tbl));
160 
161 	lockmgr(&head->table_mtx, LK_RELEASE);
162 
163 	return 0;
164 }
165 
166 /*
167  * Return length of active or inactive table in device.
168  */
169 static uint64_t
170 _dm_table_size(dm_table_head_t *head, int table)
171 {
172 	dm_table_t *tbl;
173 	dm_table_entry_t *table_en;
174 	uint64_t length;
175 
176 	length = 0;
177 
178 	/* Select active table */
179 	tbl = dm_table_get_entry(head, table);
180 
181 	/*
182 	 * Find out what tables I want to select.
183 	 * if length => rawblkno then we should used that table.
184 	 */
185 	TAILQ_FOREACH(table_en, tbl, next) {
186 		length += table_en->length;
187 	}
188 
189 	dm_table_unbusy(head);
190 
191 	return length;
192 }
193 
194 uint64_t
195 dm_table_size(dm_table_head_t *head)
196 {
197 	return _dm_table_size(head, DM_TABLE_ACTIVE);
198 }
199 
200 uint64_t
201 dm_inactive_table_size(dm_table_head_t *head)
202 {
203 	return _dm_table_size(head, DM_TABLE_INACTIVE);
204 }
205 
206 /*
207  * Return > 0 if table is at least one table entry (returns number of entries)
208  * and return 0 if there is not. Target count returned from this function
209  * doesn't need to be true when userspace user receive it (after return
210  * there can be dm_dev_resume_ioctl), therefore this is only informative.
211  */
212 int
213 dm_table_get_target_count(dm_table_head_t *head, uint8_t table_id)
214 {
215 	dm_table_entry_t *table_en;
216 	dm_table_t *tbl;
217 	uint32_t target_count;
218 
219 	target_count = 0;
220 
221 	tbl = dm_table_get_entry(head, table_id);
222 
223 	TAILQ_FOREACH(table_en, tbl, next)
224 	    target_count++;
225 
226 	dm_table_unbusy(head);
227 
228 	return target_count;
229 }
230 
231 /*
232  * Initialize dm_table_head_t structures, I'm trying to keep this structure as
233  * opaque as possible.
234  */
235 void
236 dm_table_head_init(dm_table_head_t *head)
237 {
238 	head->cur_active_table = 0;
239 	head->io_cnt = 0;
240 
241 	/* Initialize tables. */
242 	TAILQ_INIT(&head->tables[0]);
243 	TAILQ_INIT(&head->tables[1]);
244 
245 	lockinit(&head->table_mtx, "dmtbl", 0, LK_CANRECURSE);
246 }
247 
248 /*
249  * Destroy all variables in table_head
250  */
251 void
252 dm_table_head_destroy(dm_table_head_t *head)
253 {
254 	KKASSERT(lockcount(&head->table_mtx) == 0);
255 
256 	/* tables don't exist when I call this routine, therefore it
257 	 * doesn't make sense to have io_cnt != 0 */
258 	KKASSERT(head->io_cnt == 0);
259 
260 	lockuninit(&head->table_mtx);
261 }
262 
263 void
264 dm_table_init_target(dm_table_entry_t *table_en, uint32_t type, void *cfg)
265 {
266 	table_en->dev->dev_type = type;
267 	table_en->target_config = cfg;
268 }
269 
270 int
271 dm_table_add_deps(dm_table_entry_t *table_en, dm_pdev_t *pdev)
272 {
273 	dm_table_head_t *head;
274 	dm_mapping_t *map;
275 
276 	KKASSERT(pdev);
277 
278 	head = &table_en->dev->table_head;
279 	lockmgr(&head->table_mtx, LK_SHARED);
280 
281 	TAILQ_FOREACH(map, &table_en->pdev_maps, next) {
282 		if (map->data.pdev->udev == pdev->udev) {
283 			lockmgr(&head->table_mtx, LK_RELEASE);
284 			return -1;
285 		}
286 	}
287 
288 	map = kmalloc(sizeof(*map), M_DM, M_WAITOK | M_ZERO);
289 	map->data.pdev = pdev;
290 	TAILQ_INSERT_TAIL(&table_en->pdev_maps, map, next);
291 
292 	lockmgr(&head->table_mtx, LK_RELEASE);
293 
294 	return 0;
295 }
296 
297 void
298 dm_table_free_deps(dm_table_entry_t *table_en)
299 {
300 	dm_table_head_t *head;
301 	dm_mapping_t *map;
302 
303 	head = &table_en->dev->table_head;
304 	lockmgr(&head->table_mtx, LK_SHARED);
305 
306 	while ((map = TAILQ_FIRST(&table_en->pdev_maps)) != NULL) {
307 		TAILQ_REMOVE(&table_en->pdev_maps, map, next);
308 		kfree(map, M_DM);
309 	}
310 	KKASSERT(TAILQ_EMPTY(&table_en->pdev_maps));
311 
312 	lockmgr(&head->table_mtx, LK_RELEASE);
313 }
314