xref: /dragonfly/sys/vfs/hammer/hammer_volume.c (revision 0ca59c34)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36 
37 #include "hammer.h"
38 
39 struct bigblock_stat {
40 	int64_t total_bigblocks;
41 	int64_t total_free_bigblocks;
42 	int64_t counter;
43 };
44 
45 static int
46 hammer_format_volume_header(struct hammer_mount *hmp,
47 	struct hammer_volume_ondisk *ondisk,
48 	const char *vol_name, int vol_no, int vol_count,
49 	int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
50 
51 static int
52 hammer_update_volumes_header(hammer_transaction_t trans,
53 	struct bigblock_stat *stat);
54 
55 static int
56 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip);
57 
58 static int
59 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
60 	struct bigblock_stat *stat);
61 
62 static int
63 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
64 	struct bigblock_stat *stat);
65 
66 static int
67 hammer_test_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
68 
69 int
70 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
71 		struct hammer_ioc_volume *ioc)
72 {
73 	struct hammer_mount *hmp = trans->hmp;
74 	struct mount *mp = hmp->mp;
75 	struct hammer_volume_ondisk ondisk;
76 	struct bigblock_stat stat;
77 	hammer_volume_t volume;
78 	int free_vol_no = 0;
79 	int error;
80 
81 	if (mp->mnt_flag & MNT_RDONLY) {
82 		hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
83 		return (EINVAL);
84 	}
85 
86 	if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
87 		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
88 		return (EINVAL);
89 	}
90 
91 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
92 		hmkprintf(hmp, "Another volume operation is in progress!\n");
93 		return (EAGAIN);
94 	}
95 
96 	/*
97 	 * Find an unused volume number.
98 	 */
99 	while (free_vol_no < HAMMER_MAX_VOLUMES &&
100 		HAMMER_VOLUME_NUMBER_IS_SET(hmp, free_vol_no)) {
101 		++free_vol_no;
102 	}
103 	if (free_vol_no >= HAMMER_MAX_VOLUMES) {
104 		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
105 		error = EINVAL;
106 		goto end;
107 	}
108 
109 	error = hammer_format_volume_header(
110 		hmp,
111 		&ondisk,
112 		hmp->rootvol->ondisk->vol_name,
113 		free_vol_no,
114 		hmp->nvolumes+1,
115 		ioc->vol_size,
116 		ioc->boot_area_size,
117 		ioc->mem_area_size);
118 	if (error)
119 		goto end;
120 
121 	error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
122 	if (error)
123 		goto end;
124 
125 	hammer_sync_lock_sh(trans);
126 	hammer_lock_ex(&hmp->blkmap_lock);
127 
128 	volume = hammer_get_volume(hmp, free_vol_no, &error);
129 	KKASSERT(volume != NULL && error == 0);
130 
131 	error =	hammer_format_freemap(trans, volume, &stat);
132 	KKASSERT(error == 0);
133 	hammer_rel_volume(volume, 0);
134 
135 	++hmp->nvolumes;
136 	error = hammer_update_volumes_header(trans, &stat);
137 	KKASSERT(error == 0);
138 
139 	hammer_unlock(&hmp->blkmap_lock);
140 	hammer_sync_unlock(trans);
141 
142 	KKASSERT(error == 0);
143 end:
144 	hammer_unlock(&hmp->volume_lock);
145 	if (error)
146 		hmkprintf(hmp, "An error occurred: %d\n", error);
147 	return (error);
148 }
149 
150 
151 /*
152  * Remove a volume.
153  */
154 int
155 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
156 		struct hammer_ioc_volume *ioc)
157 {
158 	struct hammer_mount *hmp = trans->hmp;
159 	struct mount *mp = hmp->mp;
160 	struct hammer_volume_ondisk *ondisk;
161 	struct bigblock_stat stat;
162 	hammer_volume_t volume;
163 	int vol_no;
164 	int error = 0;
165 
166 	if (mp->mnt_flag & MNT_RDONLY) {
167 		hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n");
168 		return (EINVAL);
169 	}
170 
171 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
172 		hmkprintf(hmp, "Another volume operation is in progress!\n");
173 		return (EAGAIN);
174 	}
175 
176 	/*
177 	 * find volume by volname
178 	 */
179 	volume = NULL;
180 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
181 		volume = hammer_get_volume(hmp, vol_no, &error);
182 		KKASSERT(volume != NULL && error == 0);
183 		if (strcmp(volume->vol_name, ioc->device_name) == 0) {
184 			break;
185 		}
186 		hammer_rel_volume(volume, 0);
187 		volume = NULL;
188 	}
189 
190 	if (volume == NULL) {
191 		hmkprintf(hmp, "Couldn't find volume\n");
192 		error = EINVAL;
193 		goto end;
194 	}
195 
196 	if (volume == trans->rootvol) {
197 		hmkprintf(hmp, "Cannot remove root-volume\n");
198 		hammer_rel_volume(volume, 0);
199 		error = EINVAL;
200 		goto end;
201 	}
202 
203 	/*
204 	 * Reblock filesystem if the volume is not empty
205 	 */
206 	hmp->volume_to_remove = volume->vol_no;
207 
208 	if (hammer_test_free_freemap(trans, volume)) {
209 		error = hammer_do_reblock(trans, ip);
210 		if (error) {
211 			hmp->volume_to_remove = -1;
212 			hammer_rel_volume(volume, 0);
213 			goto end;
214 		}
215 	}
216 
217 	/*
218 	 * Sync filesystem
219 	 */
220 	hammer_flush_dirty(hmp, 30);
221 
222 	hammer_sync_lock_sh(trans);
223 	hammer_lock_ex(&hmp->blkmap_lock);
224 
225 	/*
226 	 * We use stat later to update rootvol's big-block stats
227 	 */
228 	error = hammer_free_freemap(trans, volume, &stat);
229 	if (error) {
230 		hmkprintf(hmp, "Failed to free volume: ");
231 		if (error == EBUSY)
232 			kprintf("Volume %d not empty\n", volume->vol_no);
233 		else
234 			kprintf("%d\n", error);
235 		hmp->volume_to_remove = -1;
236 		hammer_rel_volume(volume, 0);
237 		goto end1;
238 	}
239 	hammer_rel_volume(volume, 0);
240 
241 	/*
242 	 * Unload buffers
243 	 */
244         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
245 		hammer_unload_buffer, volume);
246 
247 	bzero(&ondisk, sizeof(ondisk));
248 	error = hammer_unload_volume(volume, &ondisk);
249 	if (error == -1) {
250 		hmkprintf(hmp, "Failed to unload volume\n");
251 		goto end1;
252 	}
253 
254 	--hmp->nvolumes;
255 	error = hammer_update_volumes_header(trans, &stat);
256 	KKASSERT(error == 0);
257 	hmp->volume_to_remove = -1;
258 
259 end1:
260 	hammer_unlock(&hmp->blkmap_lock);
261 	hammer_sync_unlock(trans);
262 
263 end:
264 	hammer_unlock(&hmp->volume_lock);
265 	if (error)
266 		hmkprintf(hmp, "An error occurred: %d\n", error);
267 	return (error);
268 }
269 
270 
271 int
272 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
273     struct hammer_ioc_volume_list *ioc)
274 {
275 	struct hammer_mount *hmp = trans->hmp;
276 	hammer_volume_t volume;
277 	int error = 0;
278 	int i, len, cnt = 0;
279 
280 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
281 		hmkprintf(hmp, "Another volume operation is in progress!\n");
282 		return (EAGAIN);
283 	}
284 
285 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
286 		if (cnt >= ioc->nvols)
287 			break;
288 		volume = hammer_get_volume(hmp, i, &error);
289 		KKASSERT(volume != NULL && error == 0);
290 
291 		len = strlen(volume->vol_name) + 1;
292 		KKASSERT(len <= MAXPATHLEN);
293 
294 		error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
295 				len);
296 		hammer_rel_volume(volume, 0);
297 		if (error)
298 			goto end;
299 		cnt++;
300 	}
301 	ioc->nvols = cnt;
302 
303 end:
304 	hammer_unlock(&hmp->volume_lock);
305 	return (error);
306 }
307 
308 static
309 int
310 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip)
311 {
312 	struct hammer_mount *hmp = trans->hmp;
313 	int error;
314 	int vol_no;
315 
316 	struct hammer_ioc_reblock reblock;
317 	bzero(&reblock, sizeof(reblock));
318 
319 	vol_no = trans->hmp->volume_to_remove;
320 	KKASSERT(vol_no != -1);
321 
322 	reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
323 	reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
324 	reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
325 	reblock.key_end.obj_id = HAMMER_MAX_OBJID;
326 	reblock.head.flags = HAMMER_IOC_DO_FLAGS;
327 	reblock.free_level = 0;	/* reblock all big-blocks */
328 	reblock.allpfs = 1;	/* reblock all PFS */
329 	reblock.vol_no = vol_no;
330 
331 	hmkprintf(hmp, "reblock started\n");
332 	error = hammer_ioc_reblock(trans, ip, &reblock);
333 
334 	if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
335 		error = EINTR;
336 	}
337 
338 	if (error) {
339 		if (error == EINTR) {
340 			hmkprintf(hmp, "reblock was interrupted\n");
341 		} else {
342 			hmkprintf(hmp, "reblock failed: %d\n", error);
343 		}
344 		return(error);
345 	}
346 
347 	return(0);
348 }
349 
350 /*
351  * Iterate over all usable L1 entries of the volume and
352  * the corresponding L2 entries.
353  */
354 static int
355 hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
356 	int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*,
357 		struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*,
358 		hammer_off_t, hammer_off_t, void*),
359 	void *data)
360 {
361 	struct hammer_mount *hmp = trans->hmp;
362 	hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
363 	int error = 0;
364 	hammer_off_t phys_off;
365 	hammer_off_t block_off;
366 	hammer_off_t layer1_off;
367 	hammer_off_t layer2_off;
368 	hammer_off_t aligned_buf_end_off;
369 	hammer_off_t aligned_vol_end_off;
370 	struct hammer_blockmap_layer1 *layer1;
371 	struct hammer_blockmap_layer2 *layer2;
372 	hammer_buffer_t buffer1 = NULL;
373 	hammer_buffer_t buffer2 = NULL;
374 
375 	/*
376 	 * Calculate the usable size of the volume, which
377 	 * must be aligned at a big-block (8 MB) boundary.
378 	 */
379 	aligned_buf_end_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
380 		(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
381 		& ~HAMMER_BIGBLOCK_MASK64);
382 	aligned_vol_end_off = (aligned_buf_end_off + HAMMER_BLOCKMAP_LAYER2_MASK)
383 		& ~HAMMER_BLOCKMAP_LAYER2_MASK;
384 
385 	/*
386 	 * Iterate the volume's address space in chunks of 4 TB, where each
387 	 * chunk consists of at least one physically available 8 MB big-block.
388 	 *
389 	 * For each chunk we need one L1 entry and one L2 big-block.
390 	 * We use the first big-block of each chunk as L2 block.
391 	 */
392 	for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
393 	     phys_off < aligned_vol_end_off;
394 	     phys_off += HAMMER_BLOCKMAP_LAYER2) {
395 		for (block_off = 0;
396 		     block_off < HAMMER_BLOCKMAP_LAYER2;
397 		     block_off += HAMMER_BIGBLOCK_SIZE) {
398 			layer2_off = phys_off +
399 				HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
400 			layer2 = hammer_bread(hmp, layer2_off, &error, &buffer2);
401 			if (error)
402 				goto end;
403 
404 			error = callback(trans, volume, &buffer2, NULL,
405 					 layer2, phys_off, block_off, data);
406 			if (error)
407 				goto end;
408 		}
409 
410 		layer1_off = freemap->phys_offset +
411 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
412 		layer1 = hammer_bread(hmp, layer1_off, &error, &buffer1);
413 		if (error)
414 			goto end;
415 
416 		error = callback(trans, volume, &buffer1, layer1, NULL,
417 				 phys_off, 0, data);
418 		if (error)
419 			goto end;
420 	}
421 
422 end:
423 	if (buffer1)
424 		hammer_rel_buffer(buffer1, 0);
425 	if (buffer2)
426 		hammer_rel_buffer(buffer2, 0);
427 
428 	return error;
429 }
430 
431 
432 static int
433 format_callback(hammer_transaction_t trans, hammer_volume_t volume,
434 	hammer_buffer_t *bufferp,
435 	struct hammer_blockmap_layer1 *layer1,
436 	struct hammer_blockmap_layer2 *layer2,
437 	hammer_off_t phys_off,
438 	hammer_off_t block_off,
439 	void *data)
440 {
441 	struct bigblock_stat *stat = (struct bigblock_stat*)data;
442 
443 	/*
444 	 * Calculate the usable size of the volume, which must be aligned
445 	 * at a big-block (8 MB) boundary.
446 	 */
447 	hammer_off_t aligned_buf_end_off;
448 	aligned_buf_end_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
449 		(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
450 		& ~HAMMER_BIGBLOCK_MASK64);
451 
452 	if (layer1) {
453 		KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
454 
455 		hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
456 		bzero(layer1, sizeof(*layer1));
457 		layer1->phys_offset = phys_off;
458 		layer1->blocks_free = stat->counter;
459 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
460 		hammer_modify_buffer_done(*bufferp);
461 		stat->counter = 0; /* reset */
462 	} else if (layer2) {
463 		hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
464 		bzero(layer2, sizeof(*layer2));
465 
466 		if (block_off == 0) {
467 			/*
468 			 * The first entry represents the L2 big-block itself.
469 			 * Note that the first entry represents the L1 big-block
470 			 * and the second entry represents the L2 big-block for
471 			 * root volume, but this function assumes the volume is
472 			 * non-root given that we can't add a new root volume.
473 			 */
474 			KKASSERT(trans->rootvol && trans->rootvol != volume);
475 			layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
476 			layer2->append_off = HAMMER_BIGBLOCK_SIZE;
477 			layer2->bytes_free = 0;
478 		} else if (phys_off + block_off < aligned_buf_end_off) {
479 			/*
480 			 * Available big-block
481 			 */
482 			layer2->zone = 0;
483 			layer2->append_off = 0;
484 			layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
485 			++stat->total_bigblocks;
486 			++stat->total_free_bigblocks;
487 			++stat->counter;
488 		} else {
489 			/*
490 			 * Big-block outside of physically available
491 			 * space
492 			 */
493 			layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
494 			layer2->append_off = HAMMER_BIGBLOCK_SIZE;
495 			layer2->bytes_free = 0;
496 		}
497 
498 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
499 		hammer_modify_buffer_done(*bufferp);
500 	} else {
501 		KKASSERT(0);
502 	}
503 
504 	return 0;
505 }
506 
507 static int
508 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
509 	struct bigblock_stat *stat)
510 {
511 	stat->total_bigblocks = 0;
512 	stat->total_free_bigblocks = 0;
513 	stat->counter = 0;
514 	return hammer_iterate_l1l2_entries(trans, volume, format_callback, stat);
515 }
516 
517 static int
518 free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
519 	hammer_buffer_t *bufferp,
520 	struct hammer_blockmap_layer1 *layer1,
521 	struct hammer_blockmap_layer2 *layer2,
522 	hammer_off_t phys_off,
523 	hammer_off_t block_off __unused,
524 	void *data)
525 {
526 	struct bigblock_stat *stat = (struct bigblock_stat*)data;
527 
528 	if (layer1) {
529 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
530 			/*
531 			 * This layer1 entry is already free.
532 			 */
533 			return 0;
534 		}
535 
536 		KKASSERT((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
537 			trans->hmp->volume_to_remove);
538 
539 		/*
540 		 * Free the L1 entry
541 		 */
542 		hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
543 		bzero(layer1, sizeof(*layer1));
544 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
545 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
546 		hammer_modify_buffer_done(*bufferp);
547 
548 		return 0;
549 	} else if (layer2) {
550 		if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
551 			return 0;
552 		}
553 
554 		if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) {
555 			return 0;
556 		}
557 
558 		if (layer2->append_off == 0 &&
559 		    layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
560 			--stat->total_bigblocks;
561 			--stat->total_free_bigblocks;
562 			return 0;
563 		}
564 
565 		/*
566 		 * We found a layer2 entry that is not empty!
567 		 */
568 		return EBUSY;
569 	} else {
570 		KKASSERT(0);
571 	}
572 
573 	return EINVAL;
574 }
575 
576 /*
577  * Non-zero return value means we can't free the volume.
578  */
579 static int
580 test_free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
581 	hammer_buffer_t *bufferp,
582 	struct hammer_blockmap_layer1 *layer1,
583 	struct hammer_blockmap_layer2 *layer2,
584 	hammer_off_t phys_off,
585 	hammer_off_t block_off __unused,
586 	void *data)
587 {
588 	if (layer2 == NULL) {
589 		return(0);  /* only layer2 needs to be tested */
590 	}
591 
592 	if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
593 		return(0);  /* beyond physically available space */
594 	}
595 	if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) {
596 		return(0);  /* big-block for layer1/2 */
597 	}
598 	if (layer2->append_off == 0 &&
599 	    layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
600 		return(0);  /* big-block is 0% used */
601 	}
602 
603 	return(EBUSY);  /* big-block has data */
604 }
605 
606 static int
607 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
608 	struct bigblock_stat *stat)
609 {
610 	int error;
611 
612 	error = hammer_test_free_freemap(trans, volume);
613 	if (error)
614 		return error;  /* not ready to free */
615 
616 	stat->total_bigblocks = 0;
617 	stat->total_free_bigblocks = 0;
618 	stat->counter = 0;
619 	return hammer_iterate_l1l2_entries(trans, volume, free_callback, stat);
620 }
621 
622 static int
623 hammer_test_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
624 {
625 	return hammer_iterate_l1l2_entries(trans, volume, test_free_callback, NULL);
626 }
627 
628 static int
629 hammer_format_volume_header(struct hammer_mount *hmp,
630 	struct hammer_volume_ondisk *ondisk,
631 	const char *vol_name, int vol_no, int vol_count,
632 	int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
633 {
634 	int64_t vol_alloc;
635 
636 	KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
637 
638 	bzero(ondisk, sizeof(struct hammer_volume_ondisk));
639 	ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
640 	ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
641 	ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
642 	ondisk->vol_fsid = hmp->fsid;
643 	ondisk->vol_rootvol = hmp->rootvol->vol_no;
644 	ondisk->vol_no = vol_no;
645 	ondisk->vol_count = vol_count;
646 	ondisk->vol_version = hmp->version;
647 
648 	/*
649 	 * Reserve space for (future) header junk, copy volume relative
650 	 * offset from the existing root volume.
651 	 */
652 	vol_alloc = hmp->rootvol->ondisk->vol_bot_beg;
653 	ondisk->vol_bot_beg = vol_alloc;
654 	vol_alloc += boot_area_size;
655 	ondisk->vol_mem_beg = vol_alloc;
656 	vol_alloc += mem_area_size;
657 
658 	/*
659 	 * The remaining area is the zone 2 buffer allocation area.
660 	 */
661 	ondisk->vol_buf_beg = vol_alloc;
662 	ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
663 
664 	if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
665 		hmkprintf(hmp, "volume %d %s is too small to hold the volume header\n",
666 		     ondisk->vol_no, ondisk->vol_name);
667 		return(EFTYPE);
668 	}
669 
670 	ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
671 			      HAMMER_BUFSIZE;
672 	ondisk->vol_blocksize = HAMMER_BUFSIZE;
673 	return(0);
674 }
675 
676 static int
677 hammer_update_volumes_header(hammer_transaction_t trans,
678 	struct bigblock_stat *stat)
679 {
680 	struct hammer_mount *hmp = trans->hmp;
681 	struct mount *mp = hmp->mp;
682 	hammer_volume_t volume;
683 	int vol_no;
684 	int error = 0;
685 
686 	/*
687 	 * Set each volume's new value of the vol_count field.
688 	 */
689 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
690 		volume = hammer_get_volume(hmp, vol_no, &error);
691 		KKASSERT(volume != NULL && error == 0);
692 		hammer_modify_volume_field(trans, volume, vol_count);
693 		volume->ondisk->vol_count = hmp->nvolumes;
694 		hammer_modify_volume_done(volume);
695 
696 		/*
697 		 * Only changes to the header of the root volume
698 		 * are automatically flushed to disk. For all
699 		 * other volumes that we modify we do it here.
700 		 *
701 		 * No interlock is needed, volume buffers are not
702 		 * messed with by bioops.
703 		 */
704 		if (volume != trans->rootvol && volume->io.modified) {
705 			hammer_crc_set_volume(volume->ondisk);
706 			hammer_io_flush(&volume->io, 0);
707 		}
708 
709 		hammer_rel_volume(volume, 0);
710 	}
711 
712 	/*
713 	 * Update the total number of big-blocks.
714 	 */
715 	hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks);
716 	trans->rootvol->ondisk->vol0_stat_bigblocks += stat->total_bigblocks;
717 	hammer_modify_volume_done(trans->rootvol);
718 
719 	/*
720 	 * Big-block count changed so recompute the total number of blocks.
721 	 */
722 	mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
723 				HAMMER_BUFFERS_PER_BIGBLOCK;
724 	mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
725 				HAMMER_BUFFERS_PER_BIGBLOCK;
726 
727 	/*
728 	 * Update the total number of free big-blocks.
729 	 */
730 	hammer_modify_volume_field(trans, trans->rootvol,
731 		vol0_stat_freebigblocks);
732 	trans->rootvol->ondisk->vol0_stat_freebigblocks +=
733 		stat->total_free_bigblocks;
734 	hammer_modify_volume_done(trans->rootvol);
735 
736 	/*
737 	 * Update the copy in hmp.
738 	 */
739 	hmp->copy_stat_freebigblocks =
740 		trans->rootvol->ondisk->vol0_stat_freebigblocks;
741 
742 	return(error);
743 }
744