xref: /illumos-gate/usr/src/uts/common/fs/zfs/space_map.c (revision 9fa718d2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  */
26 
27 /*
28  * Copyright (c) 2012 by Delphix. All rights reserved.
29  */
30 
31 #include <sys/zfs_context.h>
32 #include <sys/spa.h>
33 #include <sys/dmu.h>
34 #include <sys/zio.h>
35 #include <sys/space_map.h>
36 
37 /*
38  * Space map routines.
39  * NOTE: caller is responsible for all locking.
40  */
41 static int
42 space_map_seg_compare(const void *x1, const void *x2)
43 {
44 	const space_seg_t *s1 = x1;
45 	const space_seg_t *s2 = x2;
46 
47 	if (s1->ss_start < s2->ss_start) {
48 		if (s1->ss_end > s2->ss_start)
49 			return (0);
50 		return (-1);
51 	}
52 	if (s1->ss_start > s2->ss_start) {
53 		if (s1->ss_start < s2->ss_end)
54 			return (0);
55 		return (1);
56 	}
57 	return (0);
58 }
59 
60 void
61 space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
62 	kmutex_t *lp)
63 {
64 	bzero(sm, sizeof (*sm));
65 
66 	cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
67 
68 	avl_create(&sm->sm_root, space_map_seg_compare,
69 	    sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
70 
71 	sm->sm_start = start;
72 	sm->sm_size = size;
73 	sm->sm_shift = shift;
74 	sm->sm_lock = lp;
75 }
76 
77 void
78 space_map_destroy(space_map_t *sm)
79 {
80 	ASSERT(!sm->sm_loaded && !sm->sm_loading);
81 	VERIFY0(sm->sm_space);
82 	avl_destroy(&sm->sm_root);
83 	cv_destroy(&sm->sm_load_cv);
84 }
85 
86 void
87 space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
88 {
89 	avl_index_t where;
90 	space_seg_t ssearch, *ss_before, *ss_after, *ss;
91 	uint64_t end = start + size;
92 	int merge_before, merge_after;
93 
94 	ASSERT(MUTEX_HELD(sm->sm_lock));
95 	VERIFY(size != 0);
96 	VERIFY3U(start, >=, sm->sm_start);
97 	VERIFY3U(end, <=, sm->sm_start + sm->sm_size);
98 	VERIFY(sm->sm_space + size <= sm->sm_size);
99 	VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
100 	VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
101 
102 	ssearch.ss_start = start;
103 	ssearch.ss_end = end;
104 	ss = avl_find(&sm->sm_root, &ssearch, &where);
105 
106 	if (ss != NULL && ss->ss_start <= start && ss->ss_end >= end) {
107 		zfs_panic_recover("zfs: allocating allocated segment"
108 		    "(offset=%llu size=%llu)\n",
109 		    (longlong_t)start, (longlong_t)size);
110 		return;
111 	}
112 
113 	/* Make sure we don't overlap with either of our neighbors */
114 	VERIFY(ss == NULL);
115 
116 	ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE);
117 	ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER);
118 
119 	merge_before = (ss_before != NULL && ss_before->ss_end == start);
120 	merge_after = (ss_after != NULL && ss_after->ss_start == end);
121 
122 	if (merge_before && merge_after) {
123 		avl_remove(&sm->sm_root, ss_before);
124 		if (sm->sm_pp_root) {
125 			avl_remove(sm->sm_pp_root, ss_before);
126 			avl_remove(sm->sm_pp_root, ss_after);
127 		}
128 		ss_after->ss_start = ss_before->ss_start;
129 		kmem_free(ss_before, sizeof (*ss_before));
130 		ss = ss_after;
131 	} else if (merge_before) {
132 		ss_before->ss_end = end;
133 		if (sm->sm_pp_root)
134 			avl_remove(sm->sm_pp_root, ss_before);
135 		ss = ss_before;
136 	} else if (merge_after) {
137 		ss_after->ss_start = start;
138 		if (sm->sm_pp_root)
139 			avl_remove(sm->sm_pp_root, ss_after);
140 		ss = ss_after;
141 	} else {
142 		ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
143 		ss->ss_start = start;
144 		ss->ss_end = end;
145 		avl_insert(&sm->sm_root, ss, where);
146 	}
147 
148 	if (sm->sm_pp_root)
149 		avl_add(sm->sm_pp_root, ss);
150 
151 	sm->sm_space += size;
152 }
153 
154 void
155 space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
156 {
157 	avl_index_t where;
158 	space_seg_t ssearch, *ss, *newseg;
159 	uint64_t end = start + size;
160 	int left_over, right_over;
161 
162 	ASSERT(MUTEX_HELD(sm->sm_lock));
163 	VERIFY(size != 0);
164 	VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
165 	VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
166 
167 	ssearch.ss_start = start;
168 	ssearch.ss_end = end;
169 	ss = avl_find(&sm->sm_root, &ssearch, &where);
170 
171 	/* Make sure we completely overlap with someone */
172 	if (ss == NULL) {
173 		zfs_panic_recover("zfs: freeing free segment "
174 		    "(offset=%llu size=%llu)",
175 		    (longlong_t)start, (longlong_t)size);
176 		return;
177 	}
178 	VERIFY3U(ss->ss_start, <=, start);
179 	VERIFY3U(ss->ss_end, >=, end);
180 	VERIFY(sm->sm_space - size <= sm->sm_size);
181 
182 	left_over = (ss->ss_start != start);
183 	right_over = (ss->ss_end != end);
184 
185 	if (sm->sm_pp_root)
186 		avl_remove(sm->sm_pp_root, ss);
187 
188 	if (left_over && right_over) {
189 		newseg = kmem_alloc(sizeof (*newseg), KM_SLEEP);
190 		newseg->ss_start = end;
191 		newseg->ss_end = ss->ss_end;
192 		ss->ss_end = start;
193 		avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER);
194 		if (sm->sm_pp_root)
195 			avl_add(sm->sm_pp_root, newseg);
196 	} else if (left_over) {
197 		ss->ss_end = start;
198 	} else if (right_over) {
199 		ss->ss_start = end;
200 	} else {
201 		avl_remove(&sm->sm_root, ss);
202 		kmem_free(ss, sizeof (*ss));
203 		ss = NULL;
204 	}
205 
206 	if (sm->sm_pp_root && ss != NULL)
207 		avl_add(sm->sm_pp_root, ss);
208 
209 	sm->sm_space -= size;
210 }
211 
212 boolean_t
213 space_map_contains(space_map_t *sm, uint64_t start, uint64_t size)
214 {
215 	avl_index_t where;
216 	space_seg_t ssearch, *ss;
217 	uint64_t end = start + size;
218 
219 	ASSERT(MUTEX_HELD(sm->sm_lock));
220 	VERIFY(size != 0);
221 	VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
222 	VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
223 
224 	ssearch.ss_start = start;
225 	ssearch.ss_end = end;
226 	ss = avl_find(&sm->sm_root, &ssearch, &where);
227 
228 	return (ss != NULL && ss->ss_start <= start && ss->ss_end >= end);
229 }
230 
231 void
232 space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
233 {
234 	space_seg_t *ss;
235 	void *cookie = NULL;
236 
237 	ASSERT(MUTEX_HELD(sm->sm_lock));
238 
239 	while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
240 		if (func != NULL)
241 			func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
242 		kmem_free(ss, sizeof (*ss));
243 	}
244 	sm->sm_space = 0;
245 }
246 
247 void
248 space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
249 {
250 	space_seg_t *ss;
251 
252 	ASSERT(MUTEX_HELD(sm->sm_lock));
253 
254 	for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
255 		func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
256 }
257 
258 /*
259  * Wait for any in-progress space_map_load() to complete.
260  */
261 void
262 space_map_load_wait(space_map_t *sm)
263 {
264 	ASSERT(MUTEX_HELD(sm->sm_lock));
265 
266 	while (sm->sm_loading) {
267 		ASSERT(!sm->sm_loaded);
268 		cv_wait(&sm->sm_load_cv, sm->sm_lock);
269 	}
270 }
271 
272 /*
273  * Note: space_map_load() will drop sm_lock across dmu_read() calls.
274  * The caller must be OK with this.
275  */
276 int
277 space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype,
278 	space_map_obj_t *smo, objset_t *os)
279 {
280 	uint64_t *entry, *entry_map, *entry_map_end;
281 	uint64_t bufsize, size, offset, end, space;
282 	uint64_t mapstart = sm->sm_start;
283 	int error = 0;
284 
285 	ASSERT(MUTEX_HELD(sm->sm_lock));
286 	ASSERT(!sm->sm_loaded);
287 	ASSERT(!sm->sm_loading);
288 
289 	sm->sm_loading = B_TRUE;
290 	end = smo->smo_objsize;
291 	space = smo->smo_alloc;
292 
293 	ASSERT(sm->sm_ops == NULL);
294 	VERIFY0(sm->sm_space);
295 
296 	if (maptype == SM_FREE) {
297 		space_map_add(sm, sm->sm_start, sm->sm_size);
298 		space = sm->sm_size - space;
299 	}
300 
301 	bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT;
302 	entry_map = zio_buf_alloc(bufsize);
303 
304 	mutex_exit(sm->sm_lock);
305 	if (end > bufsize)
306 		dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize);
307 	mutex_enter(sm->sm_lock);
308 
309 	for (offset = 0; offset < end; offset += bufsize) {
310 		size = MIN(end - offset, bufsize);
311 		VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
312 		VERIFY(size != 0);
313 
314 		dprintf("object=%llu  offset=%llx  size=%llx\n",
315 		    smo->smo_object, offset, size);
316 
317 		mutex_exit(sm->sm_lock);
318 		error = dmu_read(os, smo->smo_object, offset, size, entry_map,
319 		    DMU_READ_PREFETCH);
320 		mutex_enter(sm->sm_lock);
321 		if (error != 0)
322 			break;
323 
324 		entry_map_end = entry_map + (size / sizeof (uint64_t));
325 		for (entry = entry_map; entry < entry_map_end; entry++) {
326 			uint64_t e = *entry;
327 
328 			if (SM_DEBUG_DECODE(e))		/* Skip debug entries */
329 				continue;
330 
331 			(SM_TYPE_DECODE(e) == maptype ?
332 			    space_map_add : space_map_remove)(sm,
333 			    (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart,
334 			    SM_RUN_DECODE(e) << sm->sm_shift);
335 		}
336 	}
337 
338 	if (error == 0) {
339 		VERIFY3U(sm->sm_space, ==, space);
340 
341 		sm->sm_loaded = B_TRUE;
342 		sm->sm_ops = ops;
343 		if (ops != NULL)
344 			ops->smop_load(sm);
345 	} else {
346 		space_map_vacate(sm, NULL, NULL);
347 	}
348 
349 	zio_buf_free(entry_map, bufsize);
350 
351 	sm->sm_loading = B_FALSE;
352 
353 	cv_broadcast(&sm->sm_load_cv);
354 
355 	return (error);
356 }
357 
358 void
359 space_map_unload(space_map_t *sm)
360 {
361 	ASSERT(MUTEX_HELD(sm->sm_lock));
362 
363 	if (sm->sm_loaded && sm->sm_ops != NULL)
364 		sm->sm_ops->smop_unload(sm);
365 
366 	sm->sm_loaded = B_FALSE;
367 	sm->sm_ops = NULL;
368 
369 	space_map_vacate(sm, NULL, NULL);
370 }
371 
372 uint64_t
373 space_map_maxsize(space_map_t *sm)
374 {
375 	ASSERT(sm->sm_ops != NULL);
376 	return (sm->sm_ops->smop_max(sm));
377 }
378 
379 uint64_t
380 space_map_alloc(space_map_t *sm, uint64_t size)
381 {
382 	uint64_t start;
383 
384 	start = sm->sm_ops->smop_alloc(sm, size);
385 	if (start != -1ULL)
386 		space_map_remove(sm, start, size);
387 	return (start);
388 }
389 
390 void
391 space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
392 {
393 	sm->sm_ops->smop_claim(sm, start, size);
394 	space_map_remove(sm, start, size);
395 }
396 
397 void
398 space_map_free(space_map_t *sm, uint64_t start, uint64_t size)
399 {
400 	space_map_add(sm, start, size);
401 	sm->sm_ops->smop_free(sm, start, size);
402 }
403 
404 /*
405  * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
406  */
407 void
408 space_map_sync(space_map_t *sm, uint8_t maptype,
409 	space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
410 {
411 	spa_t *spa = dmu_objset_spa(os);
412 	void *cookie = NULL;
413 	space_seg_t *ss;
414 	uint64_t bufsize, start, size, run_len;
415 	uint64_t *entry, *entry_map, *entry_map_end;
416 
417 	ASSERT(MUTEX_HELD(sm->sm_lock));
418 
419 	if (sm->sm_space == 0)
420 		return;
421 
422 	dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
423 	    smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa),
424 	    maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root),
425 	    sm->sm_space);
426 
427 	if (maptype == SM_ALLOC)
428 		smo->smo_alloc += sm->sm_space;
429 	else
430 		smo->smo_alloc -= sm->sm_space;
431 
432 	bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t);
433 	bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT);
434 	entry_map = zio_buf_alloc(bufsize);
435 	entry_map_end = entry_map + (bufsize / sizeof (uint64_t));
436 	entry = entry_map;
437 
438 	*entry++ = SM_DEBUG_ENCODE(1) |
439 	    SM_DEBUG_ACTION_ENCODE(maptype) |
440 	    SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
441 	    SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
442 
443 	while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
444 		size = ss->ss_end - ss->ss_start;
445 		start = (ss->ss_start - sm->sm_start) >> sm->sm_shift;
446 
447 		sm->sm_space -= size;
448 		size >>= sm->sm_shift;
449 
450 		while (size) {
451 			run_len = MIN(size, SM_RUN_MAX);
452 
453 			if (entry == entry_map_end) {
454 				mutex_exit(sm->sm_lock);
455 				dmu_write(os, smo->smo_object, smo->smo_objsize,
456 				    bufsize, entry_map, tx);
457 				mutex_enter(sm->sm_lock);
458 				smo->smo_objsize += bufsize;
459 				entry = entry_map;
460 			}
461 
462 			*entry++ = SM_OFFSET_ENCODE(start) |
463 			    SM_TYPE_ENCODE(maptype) |
464 			    SM_RUN_ENCODE(run_len);
465 
466 			start += run_len;
467 			size -= run_len;
468 		}
469 		kmem_free(ss, sizeof (*ss));
470 	}
471 
472 	if (entry != entry_map) {
473 		size = (entry - entry_map) * sizeof (uint64_t);
474 		mutex_exit(sm->sm_lock);
475 		dmu_write(os, smo->smo_object, smo->smo_objsize,
476 		    size, entry_map, tx);
477 		mutex_enter(sm->sm_lock);
478 		smo->smo_objsize += size;
479 	}
480 
481 	zio_buf_free(entry_map, bufsize);
482 
483 	VERIFY0(sm->sm_space);
484 }
485 
486 void
487 space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
488 {
489 	VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0);
490 
491 	smo->smo_objsize = 0;
492 	smo->smo_alloc = 0;
493 }
494 
495 /*
496  * Space map reference trees.
497  *
498  * A space map is a collection of integers.  Every integer is either
499  * in the map, or it's not.  A space map reference tree generalizes
500  * the idea: it allows its members to have arbitrary reference counts,
501  * as opposed to the implicit reference count of 0 or 1 in a space map.
502  * This representation comes in handy when computing the union or
503  * intersection of multiple space maps.  For example, the union of
504  * N space maps is the subset of the reference tree with refcnt >= 1.
505  * The intersection of N space maps is the subset with refcnt >= N.
506  *
507  * [It's very much like a Fourier transform.  Unions and intersections
508  * are hard to perform in the 'space map domain', so we convert the maps
509  * into the 'reference count domain', where it's trivial, then invert.]
510  *
511  * vdev_dtl_reassess() uses computations of this form to determine
512  * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
513  * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
514  * has an outage wherever refcnt >= vdev_children.
515  */
516 static int
517 space_map_ref_compare(const void *x1, const void *x2)
518 {
519 	const space_ref_t *sr1 = x1;
520 	const space_ref_t *sr2 = x2;
521 
522 	if (sr1->sr_offset < sr2->sr_offset)
523 		return (-1);
524 	if (sr1->sr_offset > sr2->sr_offset)
525 		return (1);
526 
527 	if (sr1 < sr2)
528 		return (-1);
529 	if (sr1 > sr2)
530 		return (1);
531 
532 	return (0);
533 }
534 
535 void
536 space_map_ref_create(avl_tree_t *t)
537 {
538 	avl_create(t, space_map_ref_compare,
539 	    sizeof (space_ref_t), offsetof(space_ref_t, sr_node));
540 }
541 
542 void
543 space_map_ref_destroy(avl_tree_t *t)
544 {
545 	space_ref_t *sr;
546 	void *cookie = NULL;
547 
548 	while ((sr = avl_destroy_nodes(t, &cookie)) != NULL)
549 		kmem_free(sr, sizeof (*sr));
550 
551 	avl_destroy(t);
552 }
553 
554 static void
555 space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
556 {
557 	space_ref_t *sr;
558 
559 	sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
560 	sr->sr_offset = offset;
561 	sr->sr_refcnt = refcnt;
562 
563 	avl_add(t, sr);
564 }
565 
566 void
567 space_map_ref_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
568 	int64_t refcnt)
569 {
570 	space_map_ref_add_node(t, start, refcnt);
571 	space_map_ref_add_node(t, end, -refcnt);
572 }
573 
574 /*
575  * Convert (or add) a space map into a reference tree.
576  */
577 void
578 space_map_ref_add_map(avl_tree_t *t, space_map_t *sm, int64_t refcnt)
579 {
580 	space_seg_t *ss;
581 
582 	ASSERT(MUTEX_HELD(sm->sm_lock));
583 
584 	for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
585 		space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt);
586 }
587 
588 /*
589  * Convert a reference tree into a space map.  The space map will contain
590  * all members of the reference tree for which refcnt >= minref.
591  */
592 void
593 space_map_ref_generate_map(avl_tree_t *t, space_map_t *sm, int64_t minref)
594 {
595 	uint64_t start = -1ULL;
596 	int64_t refcnt = 0;
597 	space_ref_t *sr;
598 
599 	ASSERT(MUTEX_HELD(sm->sm_lock));
600 
601 	space_map_vacate(sm, NULL, NULL);
602 
603 	for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
604 		refcnt += sr->sr_refcnt;
605 		if (refcnt >= minref) {
606 			if (start == -1ULL) {
607 				start = sr->sr_offset;
608 			}
609 		} else {
610 			if (start != -1ULL) {
611 				uint64_t end = sr->sr_offset;
612 				ASSERT(start <= end);
613 				if (end > start)
614 					space_map_add(sm, start, end - start);
615 				start = -1ULL;
616 			}
617 		}
618 	}
619 	ASSERT(refcnt == 0);
620 	ASSERT(start == -1ULL);
621 }
622