xref: /dragonfly/sys/vfs/hammer/hammer_volume.c (revision 279dd846)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36 
37 #include <sys/fcntl.h>
38 #include <sys/nlookup.h>
39 
40 #include "hammer.h"
41 
42 struct bigblock_stat {
43 	int64_t total_bigblocks;
44 	int64_t total_free_bigblocks;
45 	int64_t counter;
46 };
47 
48 static int
49 hammer_format_volume_header(struct hammer_mount *hmp,
50 	struct hammer_volume_ondisk *ondisk,
51 	const char *vol_name, int vol_no, int vol_count,
52 	int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
53 
54 static int
55 hammer_update_volumes_header(hammer_transaction_t trans,
56 	struct bigblock_stat *stat);
57 
58 static int
59 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip);
60 
61 static int
62 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
63 	struct bigblock_stat *stat);
64 
65 static int
66 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
67 	struct bigblock_stat *stat);
68 
69 static int
70 hammer_test_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
71 
72 int
73 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
74 		struct hammer_ioc_volume *ioc)
75 {
76 	struct hammer_mount *hmp = trans->hmp;
77 	struct mount *mp = hmp->mp;
78 	struct hammer_volume_ondisk ondisk;
79 	struct bigblock_stat stat;
80 	hammer_volume_t volume;
81 	int free_vol_no = 0;
82 	int error;
83 
84 	if (mp->mnt_flag & MNT_RDONLY) {
85 		kprintf("Cannot add volume to read-only HAMMER filesystem\n");
86 		return (EINVAL);
87 	}
88 
89 	if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
90 		kprintf("Max number of HAMMER volumes exceeded\n");
91 		return (EINVAL);
92 	}
93 
94 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
95 		kprintf("Another volume operation is in progress!\n");
96 		return (EAGAIN);
97 	}
98 
99 	/*
100 	 * Find an unused volume number.
101 	 */
102 	while (free_vol_no < HAMMER_MAX_VOLUMES &&
103 		HAMMER_VOLUME_NUMBER_IS_SET(hmp, free_vol_no)) {
104 		++free_vol_no;
105 	}
106 	if (free_vol_no >= HAMMER_MAX_VOLUMES) {
107 		kprintf("Max number of HAMMER volumes exceeded\n");
108 		error = EINVAL;
109 		goto end;
110 	}
111 
112 	error = hammer_format_volume_header(
113 		hmp,
114 		&ondisk,
115 		hmp->rootvol->ondisk->vol_name,
116 		free_vol_no,
117 		hmp->nvolumes+1,
118 		ioc->vol_size,
119 		ioc->boot_area_size,
120 		ioc->mem_area_size);
121 	if (error)
122 		goto end;
123 
124 	error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
125 	if (error)
126 		goto end;
127 
128 	hammer_sync_lock_sh(trans);
129 	hammer_lock_ex(&hmp->blkmap_lock);
130 
131 	volume = hammer_get_volume(hmp, free_vol_no, &error);
132 	KKASSERT(volume != NULL && error == 0);
133 
134 	error =	hammer_format_freemap(trans, volume, &stat);
135 	KKASSERT(error == 0);
136 	hammer_rel_volume(volume, 0);
137 
138 	++hmp->nvolumes;
139 	error = hammer_update_volumes_header(trans, &stat);
140 	KKASSERT(error == 0);
141 
142 	hammer_unlock(&hmp->blkmap_lock);
143 	hammer_sync_unlock(trans);
144 
145 	KKASSERT(error == 0);
146 end:
147 	hammer_unlock(&hmp->volume_lock);
148 	if (error)
149 		kprintf("An error occurred: %d\n", error);
150 	return (error);
151 }
152 
153 
154 /*
155  * Remove a volume.
156  */
157 int
158 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
159 		struct hammer_ioc_volume *ioc)
160 {
161 	struct hammer_mount *hmp = trans->hmp;
162 	struct mount *mp = hmp->mp;
163 	struct hammer_volume_ondisk *ondisk;
164 	struct bigblock_stat stat;
165 	hammer_volume_t volume;
166 	int vol_no;
167 	int error = 0;
168 
169 	if (mp->mnt_flag & MNT_RDONLY) {
170 		kprintf("Cannot del volume from read-only HAMMER filesystem\n");
171 		return (EINVAL);
172 	}
173 
174 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
175 		kprintf("Another volume operation is in progress!\n");
176 		return (EAGAIN);
177 	}
178 
179 	/*
180 	 * find volume by volname
181 	 */
182 	volume = NULL;
183 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
184 		volume = hammer_get_volume(hmp, vol_no, &error);
185 		KKASSERT(volume != NULL && error == 0);
186 		if (strcmp(volume->vol_name, ioc->device_name) == 0) {
187 			break;
188 		}
189 		hammer_rel_volume(volume, 0);
190 		volume = NULL;
191 	}
192 
193 	if (volume == NULL) {
194 		kprintf("Couldn't find volume\n");
195 		error = EINVAL;
196 		goto end;
197 	}
198 
199 	if (volume == trans->rootvol) {
200 		kprintf("Cannot remove root-volume\n");
201 		hammer_rel_volume(volume, 0);
202 		error = EINVAL;
203 		goto end;
204 	}
205 
206 	/*
207 	 * Reblock filesystem if the volume is not empty
208 	 */
209 	hmp->volume_to_remove = volume->vol_no;
210 
211 	if (hammer_test_free_freemap(trans, volume)) {
212 		error = hammer_do_reblock(trans, ip);
213 		if (error) {
214 			hmp->volume_to_remove = -1;
215 			hammer_rel_volume(volume, 0);
216 			goto end;
217 		}
218 	}
219 
220 	/*
221 	 * Sync filesystem
222 	 */
223 	hammer_flush_dirty(hmp, 30);
224 
225 	hammer_sync_lock_sh(trans);
226 	hammer_lock_ex(&hmp->blkmap_lock);
227 
228 	/*
229 	 * We use stat later to update rootvol's big-block stats
230 	 */
231 	error = hammer_free_freemap(trans, volume, &stat);
232 	if (error) {
233 		kprintf("Failed to free volume: ");
234 		if (error == EBUSY)
235 			kprintf("Volume %d not empty\n", volume->vol_no);
236 		else
237 			kprintf("%d\n", error);
238 		hmp->volume_to_remove = -1;
239 		hammer_rel_volume(volume, 0);
240 		goto end1;
241 	}
242 
243 	hmp->volume_to_remove = -1;
244 	hammer_rel_volume(volume, 0);
245 
246 	/*
247 	 * Unload buffers
248 	 */
249         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
250 		hammer_unload_buffer, volume);
251 
252 	bzero(&ondisk, sizeof(ondisk));
253 	error = hammer_unload_volume(volume, &ondisk);
254 	if (error == -1) {
255 		kprintf("Failed to unload volume\n");
256 		goto end1;
257 	}
258 
259 	--hmp->nvolumes;
260 	error = hammer_update_volumes_header(trans, &stat);
261 	KKASSERT(error == 0);
262 
263 end1:
264 	hammer_unlock(&hmp->blkmap_lock);
265 	hammer_sync_unlock(trans);
266 
267 end:
268 	hammer_unlock(&hmp->volume_lock);
269 	if (error)
270 		kprintf("An error occurred: %d\n", error);
271 	return (error);
272 }
273 
274 
275 int
276 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
277     struct hammer_ioc_volume_list *ioc)
278 {
279 	struct hammer_mount *hmp = trans->hmp;
280 	hammer_volume_t volume;
281 	int error = 0;
282 	int i, len, cnt = 0;
283 
284 	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
285 		kprintf("Another volume operation is in progress!\n");
286 		return (EAGAIN);
287 	}
288 
289 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
290 		if (cnt >= ioc->nvols)
291 			break;
292 		volume = hammer_get_volume(hmp, i, &error);
293 		KKASSERT(volume != NULL && error == 0);
294 
295 		len = strlen(volume->vol_name) + 1;
296 		KKASSERT(len <= MAXPATHLEN);
297 
298 		error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
299 				len);
300 		hammer_rel_volume(volume, 0);
301 		if (error)
302 			goto end;
303 		cnt++;
304 	}
305 	ioc->nvols = cnt;
306 
307 end:
308 	hammer_unlock(&hmp->volume_lock);
309 	return (error);
310 }
311 
312 static
313 int
314 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip)
315 {
316 	int error;
317 	int vol_no;
318 
319 	struct hammer_ioc_reblock reblock;
320 	bzero(&reblock, sizeof(reblock));
321 
322 	vol_no = trans->hmp->volume_to_remove;
323 	KKASSERT(vol_no != -1);
324 
325 	reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
326 	reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
327 	reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
328 	reblock.key_end.obj_id = HAMMER_MAX_OBJID;
329 	reblock.head.flags = HAMMER_IOC_DO_FLAGS;
330 	reblock.free_level = 0;	/* reblock all big-blocks */
331 	reblock.allpfs = 1;	/* reblock all PFS */
332 	reblock.vol_no = vol_no;
333 
334 	kprintf("reblock started\n");
335 	error = hammer_ioc_reblock(trans, ip, &reblock);
336 
337 	if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
338 		error = EINTR;
339 	}
340 
341 	if (error) {
342 		if (error == EINTR) {
343 			kprintf("reblock was interrupted\n");
344 		} else {
345 			kprintf("reblock failed: %d\n", error);
346 		}
347 		return(error);
348 	}
349 
350 	return(0);
351 }
352 
353 /*
354  * Iterate over all usable L1 entries of the volume and
355  * the corresponding L2 entries.
356  */
357 static int
358 hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
359 	int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*,
360 		struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*,
361 		hammer_off_t, hammer_off_t, void*),
362 	void *data)
363 {
364 	struct hammer_mount *hmp = trans->hmp;
365 	hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
366 	int error = 0;
367 	hammer_off_t phys_off;
368 	hammer_off_t block_off;
369 	hammer_off_t layer1_off;
370 	hammer_off_t layer2_off;
371 	hammer_off_t aligned_buf_end_off;
372 	hammer_off_t aligned_vol_end_off;
373 	struct hammer_blockmap_layer1 *layer1;
374 	struct hammer_blockmap_layer2 *layer2;
375 	hammer_buffer_t buffer1 = NULL;
376 	hammer_buffer_t buffer2 = NULL;
377 
378 	/*
379 	 * Calculate the usable size of the volume, which
380 	 * must be aligned at a big-block (8 MB) boundary.
381 	 */
382 	aligned_buf_end_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
383 		(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
384 		& ~HAMMER_BIGBLOCK_MASK64);
385 	aligned_vol_end_off = (aligned_buf_end_off + HAMMER_BLOCKMAP_LAYER2_MASK)
386 		& ~HAMMER_BLOCKMAP_LAYER2_MASK;
387 
388 	/*
389 	 * Iterate the volume's address space in chunks of 4 TB, where each
390 	 * chunk consists of at least one physically available 8 MB big-block.
391 	 *
392 	 * For each chunk we need one L1 entry and one L2 big-block.
393 	 * We use the first big-block of each chunk as L2 block.
394 	 */
395 	for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
396 	     phys_off < aligned_vol_end_off;
397 	     phys_off += HAMMER_BLOCKMAP_LAYER2) {
398 		for (block_off = 0;
399 		     block_off < HAMMER_BLOCKMAP_LAYER2;
400 		     block_off += HAMMER_BIGBLOCK_SIZE) {
401 			layer2_off = phys_off +
402 				HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
403 			layer2 = hammer_bread(hmp, layer2_off, &error, &buffer2);
404 			if (error)
405 				goto end;
406 
407 			error = callback(trans, volume, &buffer2, NULL,
408 					 layer2, phys_off, block_off, data);
409 			if (error)
410 				goto end;
411 		}
412 
413 		layer1_off = freemap->phys_offset +
414 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
415 		layer1 = hammer_bread(hmp, layer1_off, &error, &buffer1);
416 		if (error)
417 			goto end;
418 
419 		error = callback(trans, volume, &buffer1, layer1, NULL,
420 				 phys_off, 0, data);
421 		if (error)
422 			goto end;
423 	}
424 
425 end:
426 	if (buffer1)
427 		hammer_rel_buffer(buffer1, 0);
428 	if (buffer2)
429 		hammer_rel_buffer(buffer2, 0);
430 
431 	return error;
432 }
433 
434 
435 static int
436 format_callback(hammer_transaction_t trans, hammer_volume_t volume,
437 	hammer_buffer_t *bufferp,
438 	struct hammer_blockmap_layer1 *layer1,
439 	struct hammer_blockmap_layer2 *layer2,
440 	hammer_off_t phys_off,
441 	hammer_off_t block_off,
442 	void *data)
443 {
444 	struct bigblock_stat *stat = (struct bigblock_stat*)data;
445 
446 	/*
447 	 * Calculate the usable size of the volume, which must be aligned
448 	 * at a big-block (8 MB) boundary.
449 	 */
450 	hammer_off_t aligned_buf_end_off;
451 	aligned_buf_end_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
452 		(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
453 		& ~HAMMER_BIGBLOCK_MASK64);
454 
455 	if (layer1) {
456 		KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
457 
458 		hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
459 		bzero(layer1, sizeof(*layer1));
460 		layer1->phys_offset = phys_off;
461 		layer1->blocks_free = stat->counter;
462 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
463 		hammer_modify_buffer_done(*bufferp);
464 		stat->counter = 0; /* reset */
465 	} else if (layer2) {
466 		hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
467 		bzero(layer2, sizeof(*layer2));
468 
469 		if (block_off == 0) {
470 			/*
471 			 * The first entry represents the L2 big-block itself.
472 			 * Note that the first entry represents the L1 big-block
473 			 * and the second entry represents the L2 big-block for
474 			 * root volume, but this function assumes the volume is
475 			 * non-root given that we can't add a new root volume.
476 			 */
477 			KKASSERT(trans->rootvol && trans->rootvol != volume);
478 			layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
479 			layer2->append_off = HAMMER_BIGBLOCK_SIZE;
480 			layer2->bytes_free = 0;
481 		} else if (phys_off + block_off < aligned_buf_end_off) {
482 			/*
483 			 * Available big-block
484 			 */
485 			layer2->zone = 0;
486 			layer2->append_off = 0;
487 			layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
488 			++stat->total_bigblocks;
489 			++stat->total_free_bigblocks;
490 			++stat->counter;
491 		} else {
492 			/*
493 			 * Big-block outside of physically available
494 			 * space
495 			 */
496 			layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
497 			layer2->append_off = HAMMER_BIGBLOCK_SIZE;
498 			layer2->bytes_free = 0;
499 		}
500 
501 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
502 		hammer_modify_buffer_done(*bufferp);
503 	} else {
504 		KKASSERT(0);
505 	}
506 
507 	return 0;
508 }
509 
510 static int
511 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
512 	struct bigblock_stat *stat)
513 {
514 	stat->total_bigblocks = 0;
515 	stat->total_free_bigblocks = 0;
516 	stat->counter = 0;
517 	return hammer_iterate_l1l2_entries(trans, volume, format_callback, stat);
518 }
519 
520 static int
521 free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
522 	hammer_buffer_t *bufferp,
523 	struct hammer_blockmap_layer1 *layer1,
524 	struct hammer_blockmap_layer2 *layer2,
525 	hammer_off_t phys_off,
526 	hammer_off_t block_off __unused,
527 	void *data)
528 {
529 	struct bigblock_stat *stat = (struct bigblock_stat*)data;
530 
531 	if (layer1) {
532 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
533 			/*
534 			 * This layer1 entry is already free.
535 			 */
536 			return 0;
537 		}
538 
539 		KKASSERT((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
540 			trans->hmp->volume_to_remove);
541 
542 		/*
543 		 * Free the L1 entry
544 		 */
545 		hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
546 		bzero(layer1, sizeof(*layer1));
547 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
548 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
549 		hammer_modify_buffer_done(*bufferp);
550 
551 		return 0;
552 	} else if (layer2) {
553 		if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
554 			return 0;
555 		}
556 
557 		if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) {
558 			return 0;
559 		}
560 
561 		if (layer2->append_off == 0 &&
562 		    layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
563 			--stat->total_bigblocks;
564 			--stat->total_free_bigblocks;
565 			return 0;
566 		}
567 
568 		/*
569 		 * We found a layer2 entry that is not empty!
570 		 */
571 		return EBUSY;
572 	} else {
573 		KKASSERT(0);
574 	}
575 
576 	return EINVAL;
577 }
578 
579 /*
580  * Non-zero return value means we can't free the volume.
581  */
582 static int
583 test_free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
584 	hammer_buffer_t *bufferp,
585 	struct hammer_blockmap_layer1 *layer1,
586 	struct hammer_blockmap_layer2 *layer2,
587 	hammer_off_t phys_off,
588 	hammer_off_t block_off __unused,
589 	void *data)
590 {
591 	if (layer2 == NULL) {
592 		return(0);  /* only layer2 needs to be tested */
593 	}
594 
595 	if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
596 		return(0);  /* beyond physically available space */
597 	}
598 	if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) {
599 		return(0);  /* big-block for layer1/2 */
600 	}
601 	if (layer2->append_off == 0 &&
602 	    layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
603 		return(0);  /* big-block is 0% used */
604 	}
605 
606 	return(EBUSY);  /* big-block has data */
607 }
608 
609 static int
610 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
611 	struct bigblock_stat *stat)
612 {
613 	int error;
614 
615 	error = hammer_test_free_freemap(trans, volume);
616 	if (error)
617 		return error;  /* not ready to free */
618 
619 	stat->total_bigblocks = 0;
620 	stat->total_free_bigblocks = 0;
621 	stat->counter = 0;
622 	return hammer_iterate_l1l2_entries(trans, volume, free_callback, stat);
623 }
624 
625 static int
626 hammer_test_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
627 {
628 	return hammer_iterate_l1l2_entries(trans, volume, test_free_callback, NULL);
629 }
630 
631 static int
632 hammer_format_volume_header(struct hammer_mount *hmp,
633 	struct hammer_volume_ondisk *ondisk,
634 	const char *vol_name, int vol_no, int vol_count,
635 	int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
636 {
637 	int64_t vol_alloc;
638 
639 	KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
640 
641 	bzero(ondisk, sizeof(struct hammer_volume_ondisk));
642 	ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
643 	ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
644 	ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
645 	ondisk->vol_fsid = hmp->fsid;
646 	ondisk->vol_rootvol = hmp->rootvol->vol_no;
647 	ondisk->vol_no = vol_no;
648 	ondisk->vol_count = vol_count;
649 	ondisk->vol_version = hmp->version;
650 
651 	/*
652 	 * Reserve space for (future) header junk, copy volume relative
653 	 * offset from the existing root volume.
654 	 */
655 	vol_alloc = hmp->rootvol->ondisk->vol_bot_beg;
656 	ondisk->vol_bot_beg = vol_alloc;
657 	vol_alloc += boot_area_size;
658 	ondisk->vol_mem_beg = vol_alloc;
659 	vol_alloc += mem_area_size;
660 
661 	/*
662 	 * The remaining area is the zone 2 buffer allocation area.  These
663 	 * buffers
664 	 */
665 	ondisk->vol_buf_beg = vol_alloc;
666 	ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
667 
668 	if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
669 		kprintf("volume %d %s is too small to hold the volume header\n",
670 		     ondisk->vol_no, ondisk->vol_name);
671 		return(EFTYPE);
672 	}
673 
674 	ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
675 			      HAMMER_BUFSIZE;
676 	ondisk->vol_blocksize = HAMMER_BUFSIZE;
677 	return(0);
678 }
679 
680 static int
681 hammer_update_volumes_header(hammer_transaction_t trans,
682 	struct bigblock_stat *stat)
683 {
684 	struct hammer_mount *hmp = trans->hmp;
685 	struct mount *mp = hmp->mp;
686 	hammer_volume_t volume;
687 	int vol_no;
688 	int error = 0;
689 
690 	/*
691 	 * Set each volume's new value of the vol_count field.
692 	 */
693 	HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
694 		volume = hammer_get_volume(hmp, vol_no, &error);
695 		KKASSERT(volume != NULL && error == 0);
696 		hammer_modify_volume_field(trans, volume, vol_count);
697 		volume->ondisk->vol_count = hmp->nvolumes;
698 		hammer_modify_volume_done(volume);
699 
700 		/*
701 		 * Only changes to the header of the root volume
702 		 * are automatically flushed to disk. For all
703 		 * other volumes that we modify we do it here.
704 		 *
705 		 * No interlock is needed, volume buffers are not
706 		 * messed with by bioops.
707 		 */
708 		if (volume != trans->rootvol && volume->io.modified) {
709 			hammer_crc_set_volume(volume->ondisk);
710 			hammer_io_flush(&volume->io, 0);
711 		}
712 
713 		hammer_rel_volume(volume, 0);
714 	}
715 
716 	/*
717 	 * Update the total number of big-blocks.
718 	 */
719 	hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks);
720 	trans->rootvol->ondisk->vol0_stat_bigblocks += stat->total_bigblocks;
721 	hammer_modify_volume_done(trans->rootvol);
722 
723 	/*
724 	 * Big-block count changed so recompute the total number of blocks.
725 	 */
726 	mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
727 				HAMMER_BUFFERS_PER_BIGBLOCK;
728 	mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
729 				HAMMER_BUFFERS_PER_BIGBLOCK;
730 
731 	/*
732 	 * Update the total number of free big-blocks.
733 	 */
734 	hammer_modify_volume_field(trans, trans->rootvol,
735 		vol0_stat_freebigblocks);
736 	trans->rootvol->ondisk->vol0_stat_freebigblocks +=
737 		stat->total_free_bigblocks;
738 	hammer_modify_volume_done(trans->rootvol);
739 
740 	/*
741 	 * Update the copy in hmp.
742 	 */
743 	hmp->copy_stat_freebigblocks =
744 		trans->rootvol->ondisk->vol0_stat_freebigblocks;
745 
746 	return(error);
747 }
748