xref: /dragonfly/sys/vfs/hammer/hammer_volume.c (revision 277350a0)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36 
37 #include "hammer.h"
38 
39 static int
40 hammer_format_volume_header(hammer_mount_t hmp,
41 	struct hammer_ioc_volume *ioc,
42 	struct hammer_volume_ondisk *ondisk,
43 	int vol_no);
44 
45 static int
46 hammer_update_volumes_header(hammer_transaction_t trans,
47 	int64_t total_bigblocks, int64_t empty_bigblocks);
48 
49 static int
50 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip);
51 
52 static int
53 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume);
54 
55 static int
56 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
57 
58 static int
59 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
60 	int64_t *total_bigblocks, int64_t *empty_bigblocks);
61 
62 int
63 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
64 		struct hammer_ioc_volume *ioc)
65 {
66 	struct hammer_mount *hmp = trans->hmp;
67 	struct mount *mp = hmp->mp;
68 	struct hammer_volume_ondisk ondisk;
69 	hammer_volume_t volume;
70 	int64_t total_bigblocks, empty_bigblocks;
71 	int free_vol_no = 0;
72 	int error;
73 
74 	if (mp->mnt_flag & MNT_RDONLY) {
75 		hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
76 		return (EINVAL);
77 	}
78 
79 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
80 		hmkprintf(hmp, "Another volume operation is in progress!\n");
81 		return (EAGAIN);
82 	}
83 
84 	if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
85 		hammer_unlock(&hmp->volume_lock);
86 		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
87 		return (EINVAL);
88 	}
89 
90 	/*
91 	 * Find an unused volume number.
92 	 */
93 	while (free_vol_no < HAMMER_MAX_VOLUMES &&
94 		HAMMER_VOLUME_NUMBER_IS_SET(hmp, free_vol_no)) {
95 		++free_vol_no;
96 	}
97 	if (free_vol_no >= HAMMER_MAX_VOLUMES) {
98 		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
99 		error = EINVAL;
100 		goto end;
101 	}
102 
103 	error = hammer_format_volume_header(hmp, ioc, &ondisk, free_vol_no);
104 	if (error)
105 		goto end;
106 
107 	error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
108 	if (error)
109 		goto end;
110 
111 	hammer_sync_lock_sh(trans);
112 	hammer_lock_ex(&hmp->blkmap_lock);
113 
114 	volume = hammer_get_volume(hmp, free_vol_no, &error);
115 	KKASSERT(volume != NULL && error == 0);
116 
117 	error =	hammer_format_freemap(trans, volume);
118 	KKASSERT(error == 0);
119 
120 	error = hammer_count_bigblocks(hmp, volume,
121 			&total_bigblocks, &empty_bigblocks);
122 	KKASSERT(error == 0);
123 	KKASSERT(total_bigblocks == empty_bigblocks);
124 
125 	hammer_rel_volume(volume, 0);
126 
127 	++hmp->nvolumes;
128 	error = hammer_update_volumes_header(trans,
129 			total_bigblocks, empty_bigblocks);
130 	KKASSERT(error == 0);
131 
132 	hammer_unlock(&hmp->blkmap_lock);
133 	hammer_sync_unlock(trans);
134 
135 	KKASSERT(error == 0);
136 end:
137 	hammer_unlock(&hmp->volume_lock);
138 	if (error)
139 		hmkprintf(hmp, "An error occurred: %d\n", error);
140 	return (error);
141 }
142 
143 
144 /*
145  * Remove a volume.
146  */
147 int
148 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
149 		struct hammer_ioc_volume *ioc)
150 {
151 	struct hammer_mount *hmp = trans->hmp;
152 	struct mount *mp = hmp->mp;
153 	struct hammer_volume_ondisk ondisk;
154 	hammer_volume_t volume;
155 	int64_t total_bigblocks, empty_bigblocks;
156 	int vol_no;
157 	int error = 0;
158 
159 	if (mp->mnt_flag & MNT_RDONLY) {
160 		hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n");
161 		return (EINVAL);
162 	}
163 
164 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
165 		hmkprintf(hmp, "Another volume operation is in progress!\n");
166 		return (EAGAIN);
167 	}
168 
169 	if (hmp->nvolumes <= 1) {
170 		hammer_unlock(&hmp->volume_lock);
171 		hmkprintf(hmp, "No HAMMER volume to delete\n");
172 		return (EINVAL);
173 	}
174 
175 	/*
176 	 * find volume by volname
177 	 */
178 	volume = NULL;
179 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
180 		volume = hammer_get_volume(hmp, vol_no, &error);
181 		KKASSERT(volume != NULL && error == 0);
182 		if (strcmp(volume->vol_name, ioc->device_name) == 0) {
183 			break;
184 		}
185 		hammer_rel_volume(volume, 0);
186 		volume = NULL;
187 	}
188 
189 	if (volume == NULL) {
190 		hmkprintf(hmp, "Couldn't find volume\n");
191 		error = EINVAL;
192 		goto end;
193 	}
194 
195 	if (volume == trans->rootvol) {
196 		hmkprintf(hmp, "Cannot remove root-volume\n");
197 		hammer_rel_volume(volume, 0);
198 		error = EINVAL;
199 		goto end;
200 	}
201 
202 	/*
203 	 * Reblock filesystem if the volume is not empty
204 	 */
205 	hmp->volume_to_remove = volume->vol_no;
206 
207 	error = hammer_count_bigblocks(hmp, volume,
208 			&total_bigblocks, &empty_bigblocks);
209 	KKASSERT(error == 0);
210 
211 	if (total_bigblocks == empty_bigblocks) {
212 		hmkprintf(hmp, "%s is already empty\n", volume->vol_name);
213 	} else if (ioc->flag & HAMMER_IOC_VOLUME_REBLOCK) {
214 		error = hammer_do_reblock(trans, ip);
215 		if (error) {
216 			hmp->volume_to_remove = -1;
217 			hammer_rel_volume(volume, 0);
218 			goto end;
219 		}
220 	} else {
221 		hmkprintf(hmp, "%s is not empty\n", volume->vol_name);
222 		hammer_rel_volume(volume, 0);
223 		error = ENOTEMPTY;
224 		goto end;
225 	}
226 
227 	hammer_sync_lock_sh(trans);
228 	hammer_lock_ex(&hmp->blkmap_lock);
229 
230 	error = hammer_count_bigblocks(hmp, volume,
231 			&total_bigblocks, &empty_bigblocks);
232 	KKASSERT(error == 0);
233 
234 	error = hammer_free_freemap(trans, volume);
235 	if (error) {
236 		hmkprintf(hmp, "Failed to free volume: ");
237 		if (error == EBUSY)
238 			kprintf("Volume %d not empty\n", volume->vol_no);
239 		else
240 			kprintf("%d\n", error);
241 		hmp->volume_to_remove = -1;
242 		hammer_rel_volume(volume, 0);
243 		goto end1;
244 	}
245 	hammer_rel_volume(volume, 0);
246 
247 	/*
248 	 * XXX: Temporary solution for
249 	 * http://lists.dragonflybsd.org/pipermail/kernel/2015-August/175027.html
250 	 */
251 	hammer_unlock(&hmp->blkmap_lock);
252 	hammer_sync_unlock(trans);
253 	hammer_flusher_sync(hmp); /* 1 */
254 	hammer_flusher_sync(hmp); /* 2 */
255 	hammer_flusher_sync(hmp); /* 3 */
256 	hammer_sync_lock_sh(trans);
257 	hammer_lock_ex(&hmp->blkmap_lock);
258 
259 	/*
260 	 * Unload buffers
261 	 */
262         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
263 		hammer_unload_buffer, volume);
264 
265 	bzero(&ondisk, sizeof(ondisk));
266 	error = hammer_unload_volume(volume, &ondisk);
267 	if (error == -1) {
268 		hmkprintf(hmp, "Failed to unload volume\n");
269 		goto end1;
270 	}
271 
272 	--hmp->nvolumes;
273 	error = hammer_update_volumes_header(trans,
274 			-total_bigblocks, -empty_bigblocks);
275 	KKASSERT(error == 0);
276 	hmp->volume_to_remove = -1;
277 
278 end1:
279 	hammer_unlock(&hmp->blkmap_lock);
280 	hammer_sync_unlock(trans);
281 
282 end:
283 	hammer_unlock(&hmp->volume_lock);
284 	if (error)
285 		hmkprintf(hmp, "An error occurred: %d\n", error);
286 	return (error);
287 }
288 
289 
290 int
291 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
292     struct hammer_ioc_volume_list *ioc)
293 {
294 	struct hammer_mount *hmp = trans->hmp;
295 	hammer_volume_t volume;
296 	int error = 0;
297 	int i, len, cnt = 0;
298 
299 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
300 		hmkprintf(hmp, "Another volume operation is in progress!\n");
301 		return (EAGAIN);
302 	}
303 
304 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
305 		if (cnt >= ioc->nvols)
306 			break;
307 		volume = hammer_get_volume(hmp, i, &error);
308 		KKASSERT(volume != NULL && error == 0);
309 
310 		len = strlen(volume->vol_name) + 1;
311 		KKASSERT(len <= MAXPATHLEN);
312 
313 		ioc->vols[cnt].vol_no = volume->vol_no;
314 		error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
315 				len);
316 		hammer_rel_volume(volume, 0);
317 		if (error)
318 			goto end;
319 		cnt++;
320 	}
321 	ioc->nvols = cnt;
322 
323 end:
324 	hammer_unlock(&hmp->volume_lock);
325 	return (error);
326 }
327 
328 static
329 int
330 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip)
331 {
332 	struct hammer_mount *hmp = trans->hmp;
333 	int error;
334 	int vol_no;
335 
336 	struct hammer_ioc_reblock reblock;
337 	bzero(&reblock, sizeof(reblock));
338 
339 	vol_no = trans->hmp->volume_to_remove;
340 	KKASSERT(vol_no != -1);
341 
342 	reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
343 	reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
344 	reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
345 	reblock.key_end.obj_id = HAMMER_MAX_OBJID;
346 	reblock.head.flags = HAMMER_IOC_DO_FLAGS;
347 	reblock.free_level = 0;	/* reblock all big-blocks */
348 	reblock.allpfs = 1;	/* reblock all PFS */
349 	reblock.vol_no = vol_no;
350 
351 	hmkprintf(hmp, "reblock started\n");
352 	error = hammer_ioc_reblock(trans, ip, &reblock);
353 
354 	if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
355 		error = EINTR;
356 	}
357 
358 	if (error) {
359 		if (error == EINTR) {
360 			hmkprintf(hmp, "reblock was interrupted\n");
361 		} else {
362 			hmkprintf(hmp, "reblock failed: %d\n", error);
363 		}
364 		return(error);
365 	}
366 
367 	return(0);
368 }
369 
370 /*
371  * XXX This somehow needs to stop doing hammer_modify_buffer() for
372  * layer2 entries.  In theory adding a large block device could
373  * blow away UNDO fifo.  The best way is to format layer2 entries
374  * in userspace without UNDO getting involved before the device is
375  * safely added to the filesystem.  HAMMER has no interest in what
376  * has happened to the device before it safely joins the filesystem.
377  */
378 static int
379 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume)
380 {
381 	struct hammer_mount *hmp = trans->hmp;
382 	struct hammer_volume_ondisk *ondisk;
383 	hammer_blockmap_t freemap;
384 	hammer_off_t alloc_offset;
385 	hammer_off_t phys_offset;
386 	hammer_off_t block_offset;
387 	hammer_off_t layer1_offset;
388 	hammer_off_t layer2_offset;
389 	hammer_off_t vol_free_end;
390 	hammer_off_t aligned_vol_free_end;
391 	struct hammer_blockmap_layer1 *layer1;
392 	struct hammer_blockmap_layer2 *layer2;
393 	hammer_buffer_t buffer1 = NULL;
394 	hammer_buffer_t buffer2 = NULL;
395 	int64_t vol_buf_size;
396 	int64_t layer1_count = 0;
397 	int error = 0;
398 
399 	KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
400 
401 	ondisk = volume->ondisk;
402 	vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
403 	KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0);
404 	vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
405 			vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
406 	aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
407 			& ~HAMMER_BLOCKMAP_LAYER2_MASK;
408 
409 	freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
410 	alloc_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
411 
412 	hmkprintf(hmp, "Initialize freemap volume %d\n", volume->vol_no);
413 
414 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
415 	     phys_offset < aligned_vol_free_end;
416 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
417 		layer1_offset = freemap->phys_offset +
418 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
419 		layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
420 		if (error)
421 			goto end;
422 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
423 			hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
424 			bzero(layer1, sizeof(*layer1));
425 			layer1->phys_offset = alloc_offset;
426 			layer1->blocks_free = 0;
427 			layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
428 			hammer_modify_buffer_done(buffer1);
429 			alloc_offset += HAMMER_BIGBLOCK_SIZE;
430 		}
431 	}
432 
433 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
434 	     phys_offset < aligned_vol_free_end;
435 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
436 		layer1_count = 0;
437 		layer1_offset = freemap->phys_offset +
438 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
439 		layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
440 		if (error)
441 			goto end;
442 		KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
443 
444 		for (block_offset = 0;
445 		     block_offset < HAMMER_BLOCKMAP_LAYER2;
446 		     block_offset += HAMMER_BIGBLOCK_SIZE) {
447 			layer2_offset = layer1->phys_offset +
448 				        HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
449 			layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
450 			if (error)
451 				goto end;
452 
453 			hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
454 			bzero(layer2, sizeof(*layer2));
455 
456 			if (phys_offset + block_offset < alloc_offset) {
457 				layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
458 				layer2->append_off = HAMMER_BIGBLOCK_SIZE;
459 				layer2->bytes_free = 0;
460 			} else if (phys_offset + block_offset < vol_free_end) {
461 				layer2->zone = 0;
462 				layer2->append_off = 0;
463 				layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
464 				++layer1_count;
465 			} else {
466 				layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
467 				layer2->append_off = HAMMER_BIGBLOCK_SIZE;
468 				layer2->bytes_free = 0;
469 			}
470 
471 			layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
472 			hammer_modify_buffer_done(buffer2);
473 		}
474 
475 		hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
476 		layer1->blocks_free += layer1_count;
477 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
478 		hammer_modify_buffer_done(buffer1);
479 	}
480 
481 end:
482 	if (buffer1)
483 		hammer_rel_buffer(buffer1, 0);
484 	if (buffer2)
485 		hammer_rel_buffer(buffer2, 0);
486 
487 	return error;
488 }
489 
490 /*
491  * XXX This somehow needs to stop doing hammer_modify_buffer() for
492  * layer2 entries.  In theory removing a large block device could
493  * blow away UNDO fifo.  The best way is to erase layer2 entries
494  * in userspace without UNDO getting involved after the device has
495  * been safely removed from the filesystem.  HAMMER has no interest
496  * in what happens to the device once it's safely removed.
497  */
498 static int
499 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
500 {
501 	struct hammer_mount *hmp = trans->hmp;
502 	struct hammer_volume_ondisk *ondisk;
503 	hammer_blockmap_t freemap;
504 	hammer_off_t phys_offset;
505 	hammer_off_t block_offset;
506 	hammer_off_t layer1_offset;
507 	hammer_off_t layer2_offset;
508 	hammer_off_t vol_free_end;
509 	hammer_off_t aligned_vol_free_end;
510 	struct hammer_blockmap_layer1 *layer1;
511 	struct hammer_blockmap_layer2 *layer2;
512 	hammer_buffer_t buffer1 = NULL;
513 	hammer_buffer_t buffer2 = NULL;
514 	int64_t vol_buf_size;
515 	int error = 0;
516 
517 	KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
518 
519 	ondisk = volume->ondisk;
520 	vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
521 	KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0);
522 	vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
523 			vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
524 	aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
525 			& ~HAMMER_BLOCKMAP_LAYER2_MASK;
526 
527 	freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
528 
529 	hmkprintf(hmp, "Free freemap volume %d\n", volume->vol_no);
530 
531 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
532 	     phys_offset < aligned_vol_free_end;
533 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
534 		layer1_offset = freemap->phys_offset +
535 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
536 		layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
537 		if (error)
538 			goto end;
539 		KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
540 
541 		for (block_offset = 0;
542 		     block_offset < HAMMER_BLOCKMAP_LAYER2;
543 		     block_offset += HAMMER_BIGBLOCK_SIZE) {
544 			layer2_offset = layer1->phys_offset +
545 				        HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
546 			layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
547 			if (error)
548 				goto end;
549 
550 			switch (layer2->zone) {
551 			case HAMMER_ZONE_UNDO_INDEX:
552 				KKASSERT(0);
553 			case HAMMER_ZONE_FREEMAP_INDEX:
554 			case HAMMER_ZONE_UNAVAIL_INDEX:
555 				continue;
556 			default:
557 				KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
558 				if (layer2->append_off == 0 &&
559 				    layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
560 					continue;
561 				break;
562 			}
563 			return EBUSY;  /* Not empty */
564 		}
565 	}
566 
567 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
568 	     phys_offset < aligned_vol_free_end;
569 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
570 		layer1_offset = freemap->phys_offset +
571 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
572 		layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
573 		if (error)
574 			goto end;
575 		KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
576 
577 		for (block_offset = 0;
578 		     block_offset < HAMMER_BLOCKMAP_LAYER2;
579 		     block_offset += HAMMER_BIGBLOCK_SIZE) {
580 			layer2_offset = layer1->phys_offset +
581 				        HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
582 			layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
583 			if (error)
584 				goto end;
585 
586 			switch (layer2->zone) {
587 			case HAMMER_ZONE_UNDO_INDEX:
588 				KKASSERT(0);
589 			default:
590 				KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
591 				hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
592 				bzero(layer2, sizeof(*layer2));
593 				hammer_modify_buffer_done(buffer2);
594 				break;
595 			}
596 		}
597 
598 		hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
599 		bzero(layer1, sizeof(*layer1));
600 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
601 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
602 		hammer_modify_buffer_done(buffer1);
603 	}
604 
605 end:
606 	if (buffer1)
607 		hammer_rel_buffer(buffer1, 0);
608 	if (buffer2)
609 		hammer_rel_buffer(buffer2, 0);
610 
611 	return error;
612 }
613 
614 static int
615 hammer_format_volume_header(hammer_mount_t hmp,
616 	struct hammer_ioc_volume *ioc,
617 	struct hammer_volume_ondisk *ondisk,
618 	int vol_no)
619 {
620 	struct hammer_volume_ondisk *root_ondisk;
621 	int64_t vol_alloc;
622 
623 	KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
624 
625 	/*
626 	 * Just copy from the root volume header.
627 	 */
628 	root_ondisk = hmp->rootvol->ondisk;
629 	bzero(ondisk, sizeof(struct hammer_volume_ondisk));
630 	ondisk->vol_fsid = root_ondisk->vol_fsid;
631 	ondisk->vol_fstype = root_ondisk->vol_fstype;
632 	ksnprintf(ondisk->vol_label, sizeof(ondisk->vol_label), "%s",
633 		root_ondisk->vol_label);
634 	ondisk->vol_version = root_ondisk->vol_version;
635 	ondisk->vol_rootvol = root_ondisk->vol_no;
636 	ondisk->vol_signature = root_ondisk->vol_signature;
637 
638 	KKASSERT(ondisk->vol_rootvol == HAMMER_ROOT_VOLNO);
639 	KKASSERT(ondisk->vol_signature == HAMMER_FSBUF_VOLUME);
640 
641 	/*
642 	 * Assign the new vol_no and vol_count.
643 	 */
644 	ondisk->vol_no = vol_no;
645 	ondisk->vol_count = root_ondisk->vol_count + 1;
646 
647 	/*
648 	 * Reserve space for (future) header junk.
649 	 */
650 	vol_alloc = root_ondisk->vol_bot_beg;
651 	KKASSERT(vol_alloc == HAMMER_VOL_ALLOC);
652 	ondisk->vol_bot_beg = vol_alloc;
653 	vol_alloc += ioc->boot_area_size;
654 	ondisk->vol_mem_beg = vol_alloc;
655 	vol_alloc += ioc->mem_area_size;
656 
657 	/*
658 	 * The remaining area is the zone 2 buffer allocation area.
659 	 */
660 	ondisk->vol_buf_beg = vol_alloc;
661 	ondisk->vol_buf_end = ioc->vol_size & ~(int64_t)HAMMER_BUFMASK;
662 
663 	if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
664 		hmkprintf(hmp, "volume %d is too small to hold the volume header\n",
665 			ondisk->vol_no);
666 		return(EFTYPE);
667 	}
668 
669 	return(0);
670 }
671 
672 static int
673 hammer_update_volumes_header(hammer_transaction_t trans,
674 	int64_t total_bigblocks, int64_t empty_bigblocks)
675 {
676 	struct hammer_mount *hmp = trans->hmp;
677 	struct mount *mp = hmp->mp;
678 	hammer_volume_t volume;
679 	int vol_no;
680 	int error = 0;
681 
682 	/*
683 	 * Set each volume's new value of the vol_count field.
684 	 */
685 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
686 		volume = hammer_get_volume(hmp, vol_no, &error);
687 		KKASSERT(volume != NULL && error == 0);
688 		hammer_modify_volume_field(trans, volume, vol_count);
689 		volume->ondisk->vol_count = hmp->nvolumes;
690 		hammer_modify_volume_done(volume);
691 
692 		/*
693 		 * Only changes to the header of the root volume
694 		 * are automatically flushed to disk. For all
695 		 * other volumes that we modify we do it here.
696 		 *
697 		 * No interlock is needed, volume buffers are not
698 		 * messed with by bioops.
699 		 */
700 		if (volume != trans->rootvol && volume->io.modified) {
701 			hammer_crc_set_volume(volume->ondisk);
702 			hammer_io_flush(&volume->io, 0);
703 		}
704 
705 		hammer_rel_volume(volume, 0);
706 	}
707 
708 	/*
709 	 * Update the total number of big-blocks.
710 	 */
711 	hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks);
712 	trans->rootvol->ondisk->vol0_stat_bigblocks += total_bigblocks;
713 	hammer_modify_volume_done(trans->rootvol);
714 
715 	/*
716 	 * Big-block count changed so recompute the total number of blocks.
717 	 */
718 	mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
719 				HAMMER_BUFFERS_PER_BIGBLOCK;
720 	mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
721 				HAMMER_BUFFERS_PER_BIGBLOCK;
722 
723 	/*
724 	 * Update the total number of free big-blocks.
725 	 */
726 	hammer_modify_volume_field(trans, trans->rootvol,
727 		vol0_stat_freebigblocks);
728 	trans->rootvol->ondisk->vol0_stat_freebigblocks += empty_bigblocks;
729 	hammer_modify_volume_done(trans->rootvol);
730 
731 	/*
732 	 * Update the copy in hmp.
733 	 */
734 	hmp->copy_stat_freebigblocks =
735 		trans->rootvol->ondisk->vol0_stat_freebigblocks;
736 
737 	return(error);
738 }
739 
740 /*
741  * Count total big-blocks and empty big-blocks within the volume.
742  * The volume must be a non-root volume.
743  *
744  * Note that total big-blocks doesn't include big-blocks for layer2
745  * (and obviously layer1 and undomap).  This is requirement of the
746  * volume header and this function is to retrieve that information.
747  */
748 static int
749 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
750 	int64_t *total_bigblocks, int64_t *empty_bigblocks)
751 {
752 	struct hammer_volume_ondisk *ondisk;
753 	hammer_blockmap_t freemap;
754 	hammer_off_t phys_offset;
755 	hammer_off_t block_offset;
756 	hammer_off_t layer1_offset;
757 	hammer_off_t layer2_offset;
758 	hammer_off_t vol_free_end;
759 	hammer_off_t aligned_vol_free_end;
760 	struct hammer_blockmap_layer1 *layer1;
761 	struct hammer_blockmap_layer2 *layer2;
762 	hammer_buffer_t buffer1 = NULL;
763 	hammer_buffer_t buffer2 = NULL;
764 	int64_t vol_buf_size;
765 	int64_t total = 0;
766 	int64_t empty = 0;
767 	int error = 0;
768 
769 	KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
770 
771 	ondisk = volume->ondisk;
772 	vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
773 	KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0);
774 	vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
775 			vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
776 	aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
777 			& ~HAMMER_BLOCKMAP_LAYER2_MASK;
778 
779 	freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
780 
781 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
782 	     phys_offset < aligned_vol_free_end;
783 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
784 		layer1_offset = freemap->phys_offset +
785 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
786 		layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
787 		if (error)
788 			goto end;
789 
790 		for (block_offset = 0;
791 		     block_offset < HAMMER_BLOCKMAP_LAYER2;
792 		     block_offset += HAMMER_BIGBLOCK_SIZE) {
793 			layer2_offset = layer1->phys_offset +
794 					HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
795 			layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
796 			if (error)
797 				goto end;
798 
799 			switch (layer2->zone) {
800 			case HAMMER_ZONE_UNDO_INDEX:
801 				KKASSERT(0);
802 			case HAMMER_ZONE_FREEMAP_INDEX:
803 			case HAMMER_ZONE_UNAVAIL_INDEX:
804 				continue;
805 			default:
806 				KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
807 				total++;
808 				if (layer2->append_off == 0 &&
809 				    layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
810 					empty++;
811 				break;
812 			}
813 		}
814 	}
815 
816 	hmkprintf(hmp, "big-blocks total=%jd empty=%jd\n", total, empty);
817 	*total_bigblocks = total;
818 	*empty_bigblocks = empty;
819 end:
820 	if (buffer1)
821 		hammer_rel_buffer(buffer1, 0);
822 	if (buffer2)
823 		hammer_rel_buffer(buffer2, 0);
824 
825 	return error;
826 }
827