xref: /dragonfly/sys/dev/disk/dm/dm_table.c (revision d4e57234)
1 /*        $NetBSD: dm_table.c,v 1.5 2010/01/04 00:19:08 haad Exp $      */
2 
3 /*
4  * Copyright (c) 2010-2011 Alex Hornung <alex@alexhornung.com>
5  * Copyright (c) 2008 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Adam Hamsik.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/types.h>
34 
35 #include <sys/malloc.h>
36 
37 #include <dev/disk/dm/dm.h>
38 
39 /*
40  * There are two types of users of this interface:
41  *
42  * a) Readers such as
43  *    dmstrategy, dmgetdisklabel, dmsize, dm_dev_status_ioctl,
44  *    dm_table_deps_ioctl, dm_table_status_ioctl, dm_table_reload_ioctl
45  *
46  * b) Writers such as
47  *    dm_dev_remove_ioctl, dm_dev_resume_ioctl, dm_table_clear_ioctl
48  *
49  * Writers can work with table_head only when there are no readers. We
50  * simply use shared/exclusive locking to ensure this.
51  */
52 
53 /*
54  * Function to increment table user reference counter. Return id
55  * of table_id table.
56  * DM_TABLE_ACTIVE will return active table id.
57  * DM_TABLE_INACTIVE will return inactive table id.
58  */
59 static int
60 dm_table_busy(dm_table_head_t *head, uint8_t table_id)
61 {
62 	uint8_t id;
63 
64 	id = 0;
65 
66 	lockmgr(&head->table_mtx, LK_SHARED);
67 
68 	if (table_id == DM_TABLE_ACTIVE)
69 		id = head->cur_active_table;
70 	else
71 		id = 1 - head->cur_active_table;
72 
73 	atomic_add_int(&head->io_cnt, 1);
74 
75 	return id;
76 }
77 /*
78  * Function release table lock and eventually wakeup all waiters.
79  */
80 static void
81 dm_table_unbusy(dm_table_head_t *head)
82 {
83 	KKASSERT(head->io_cnt != 0);
84 
85 	atomic_subtract_int(&head->io_cnt, 1);
86 
87 	lockmgr(&head->table_mtx, LK_RELEASE);
88 }
89 /*
90  * Return current active table to caller, increment io_cnt reference counter.
91  */
92 dm_table_t *
93 dm_table_get_entry(dm_table_head_t *head, uint8_t table_id)
94 {
95 	uint8_t id;
96 
97 	id = dm_table_busy(head, table_id);
98 
99 	return &head->tables[id];
100 }
101 /*
102  * Decrement io reference counter and release shared lock.
103  */
104 void
105 dm_table_release(dm_table_head_t *head, uint8_t table_id)
106 {
107 	dm_table_unbusy(head);
108 }
109 /*
110  * Switch table from inactive to active mode. Have to wait until io_cnt is 0.
111  */
112 void
113 dm_table_switch_tables(dm_table_head_t *head)
114 {
115 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
116 
117 	head->cur_active_table = 1 - head->cur_active_table;
118 
119 	lockmgr(&head->table_mtx, LK_RELEASE);
120 }
121 /*
122  * Destroy all table data. This function can run when there are no
123  * readers on table lists.
124  */
125 int
126 dm_table_destroy(dm_table_head_t *head, uint8_t table_id)
127 {
128 	dm_table_t *tbl;
129 	dm_table_entry_t *table_en;
130 	uint8_t id;
131 
132 	lockmgr(&head->table_mtx, LK_EXCLUSIVE);
133 
134 	aprint_debug("dm_table_destroy called with %d--%d\n", table_id, head->io_cnt);
135 
136 	if (table_id == DM_TABLE_ACTIVE)
137 		id = head->cur_active_table;
138 	else
139 		id = 1 - head->cur_active_table;
140 
141 	tbl = &head->tables[id];
142 
143 	while ((table_en = TAILQ_FIRST(tbl)) != NULL) {
144 		TAILQ_REMOVE(tbl, table_en, next);
145 		/*
146 		 * Remove target specific config data. After successfull
147 		 * call table_en->target_config must be set to NULL.
148 		 */
149 		table_en->target->destroy(table_en);
150 
151 		dm_table_free_deps(table_en);
152 
153 		/* decrement the refcount for the target */
154 		dm_target_unbusy(table_en->target);
155 
156 		kfree(table_en, M_DM);
157 	}
158 	KKASSERT(TAILQ_EMPTY(tbl));
159 
160 	lockmgr(&head->table_mtx, LK_RELEASE);
161 
162 	return 0;
163 }
164 /*
165  * Return length of active or inactive table in device.
166  */
167 static uint64_t
168 _dm_table_size(dm_table_head_t *head, int table)
169 {
170 	dm_table_t *tbl;
171 	dm_table_entry_t *table_en;
172 	uint64_t length;
173 
174 	length = 0;
175 
176 	/* Select active table */
177 	tbl = dm_table_get_entry(head, table);
178 
179 	/*
180 	 * Find out what tables I want to select.
181 	 * if length => rawblkno then we should used that table.
182 	 */
183 	TAILQ_FOREACH(table_en, tbl, next) {
184 		length += table_en->length;
185 	}
186 
187 	dm_table_unbusy(head);
188 
189 	return length;
190 }
191 
192 uint64_t
193 dm_table_size(dm_table_head_t *head)
194 {
195 	return _dm_table_size(head, DM_TABLE_ACTIVE);
196 }
197 
198 uint64_t
199 dm_inactive_table_size(dm_table_head_t *head)
200 {
201 	return _dm_table_size(head, DM_TABLE_INACTIVE);
202 }
203 
204 /*
205  * Return > 0 if table is at least one table entry (returns number of entries)
206  * and return 0 if there is not. Target count returned from this function
207  * doesn't need to be true when userspace user receive it (after return
208  * there can be dm_dev_resume_ioctl), therfore this isonly informative.
209  */
210 int
211 dm_table_get_target_count(dm_table_head_t *head, uint8_t table_id)
212 {
213 	dm_table_entry_t *table_en;
214 	dm_table_t *tbl;
215 	uint32_t target_count;
216 
217 	target_count = 0;
218 
219 	tbl = dm_table_get_entry(head, table_id);
220 
221 	TAILQ_FOREACH(table_en, tbl, next)
222 	    target_count++;
223 
224 	dm_table_unbusy(head);
225 
226 	return target_count;
227 }
228 
229 
230 /*
231  * Initialize table_head structures, I'm trying to keep this structure as
232  * opaque as possible.
233  */
234 void
235 dm_table_head_init(dm_table_head_t *head)
236 {
237 	head->cur_active_table = 0;
238 	head->io_cnt = 0;
239 
240 	/* Initialize tables. */
241 	TAILQ_INIT(&head->tables[0]);
242 	TAILQ_INIT(&head->tables[1]);
243 
244 	lockinit(&head->table_mtx, "dmtbl", 0, LK_CANRECURSE);
245 }
246 /*
247  * Destroy all variables in table_head
248  */
249 void
250 dm_table_head_destroy(dm_table_head_t *head)
251 {
252 	KKASSERT(lockcount(&head->table_mtx) == 0);
253 
254 	/* tables doens't exists when I call this routine, therefore it
255 	 * doesn't make sense to have io_cnt != 0 */
256 	KKASSERT(head->io_cnt == 0);
257 
258 	lockuninit(&head->table_mtx);
259 }
260 
261 void
262 dm_table_init_target(dm_table_entry_t *table_en, uint32_t type, void *cfg)
263 {
264 	table_en->dev->dev_type = type;
265 	table_en->target_config = cfg;
266 }
267 
268 int
269 dm_table_add_deps(dm_table_entry_t *table_en, dm_pdev_t *pdev)
270 {
271 	dm_table_head_t *head;
272 	dm_mapping_t *map;
273 
274 	KKASSERT(pdev);
275 
276 	head = &table_en->dev->table_head;
277 	lockmgr(&head->table_mtx, LK_SHARED);
278 
279 	TAILQ_FOREACH(map, &table_en->pdev_maps, next) {
280 		if (map->data.pdev->udev == pdev->udev) {
281 			lockmgr(&head->table_mtx, LK_RELEASE);
282 			return -1;
283 		}
284 	}
285 
286 	map = kmalloc(sizeof(*map), M_DM, M_WAITOK | M_ZERO);
287 	map->data.pdev = pdev;
288 	TAILQ_INSERT_TAIL(&table_en->pdev_maps, map, next);
289 
290 	lockmgr(&head->table_mtx, LK_RELEASE);
291 
292 	return 0;
293 }
294 
295 void
296 dm_table_free_deps(dm_table_entry_t *table_en)
297 {
298 	dm_table_head_t *head;
299 	dm_mapping_t *map;
300 
301 	head = &table_en->dev->table_head;
302 	lockmgr(&head->table_mtx, LK_SHARED);
303 
304 	while ((map = TAILQ_FIRST(&table_en->pdev_maps)) != NULL) {
305 		TAILQ_REMOVE(&table_en->pdev_maps, map, next);
306 		kfree(map, M_DM);
307 	}
308 	KKASSERT(TAILQ_EMPTY(&table_en->pdev_maps));
309 
310 	lockmgr(&head->table_mtx, LK_RELEASE);
311 }
312