xref: /dragonfly/sbin/hammer/ondisk.c (revision 409b4c59)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.25 2008/08/21 23:28:43 thomas Exp $
35  */
36 
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47 #include "hammer_util.h"
48 
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50 			struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
52 #if 0
53 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
54 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
55 			struct buffer_info **bufp, u_int16_t hdr_type);
56 static void readhammerbuf(struct volume_info *vol, void *data,
57 			int64_t offset);
58 #endif
59 static void writehammerbuf(struct volume_info *vol, const void *data,
60 			int64_t offset);
61 
62 int DebugOpt;
63 
64 uuid_t Hammer_FSType;
65 uuid_t Hammer_FSId;
66 int64_t BootAreaSize;
67 int64_t MemAreaSize;
68 int64_t UndoBufferSize;
69 int     UsingSuperClusters;
70 int     NumVolumes;
71 int	RootVolNo = -1;
72 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
73 
74 static __inline
75 int
76 buffer_hash(hammer_off_t buf_offset)
77 {
78 	int hi;
79 
80 	hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
81 	return(hi);
82 }
83 
84 /*
85  * Lookup the requested information structure and related on-disk buffer.
86  * Missing structures are created.
87  */
88 struct volume_info *
89 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
90 {
91 	struct volume_info *vol;
92 	struct volume_info *scan;
93 	struct hammer_volume_ondisk *ondisk;
94 	int i, n;
95 
96 	/*
97 	 * Allocate the volume structure
98 	 */
99 	vol = malloc(sizeof(*vol));
100 	bzero(vol, sizeof(*vol));
101 	for (i = 0; i < HAMMER_BUFLISTS; ++i)
102 		TAILQ_INIT(&vol->buffer_lists[i]);
103 	vol->name = strdup(filename);
104 	vol->fd = open(filename, oflags);
105 	if (vol->fd < 0) {
106 		free(vol->name);
107 		free(vol);
108 		err(1, "setup_volume: %s: Open failed", filename);
109 	}
110 
111 	/*
112 	 * Read or initialize the volume header
113 	 */
114 	vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
115 	if (isnew) {
116 		bzero(ondisk, HAMMER_BUFSIZE);
117 	} else {
118 		n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
119 		if (n != HAMMER_BUFSIZE) {
120 			err(1, "setup_volume: %s: Read failed at offset 0",
121 			    filename);
122 		}
123 		vol_no = ondisk->vol_no;
124 		if (RootVolNo < 0) {
125 			RootVolNo = ondisk->vol_rootvol;
126 		} else if (RootVolNo != (int)ondisk->vol_rootvol) {
127 			errx(1, "setup_volume: %s: root volume disagreement: "
128 				"%d vs %d",
129 				vol->name, RootVolNo, ondisk->vol_rootvol);
130 		}
131 
132 		if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
133 			errx(1, "setup_volume: %s: Header does not indicate "
134 				"that this is a hammer volume", vol->name);
135 		}
136 		if (TAILQ_EMPTY(&VolList)) {
137 			Hammer_FSId = vol->ondisk->vol_fsid;
138 		} else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
139 			errx(1, "setup_volume: %s: FSId does match other "
140 				"volumes!", vol->name);
141 		}
142 	}
143 	vol->vol_no = vol_no;
144 
145 	if (isnew) {
146 		/*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
147 		vol->cache.modified = 1;
148         }
149 
150 	/*
151 	 * Link the volume structure in
152 	 */
153 	TAILQ_FOREACH(scan, &VolList, entry) {
154 		if (scan->vol_no == vol_no) {
155 			errx(1, "setup_volume %s: Duplicate volume number %d "
156 				"against %s", filename, vol_no, scan->name);
157 		}
158 	}
159 	TAILQ_INSERT_TAIL(&VolList, vol, entry);
160 	return(vol);
161 }
162 
163 struct volume_info *
164 get_volume(int32_t vol_no)
165 {
166 	struct volume_info *vol;
167 
168 	TAILQ_FOREACH(vol, &VolList, entry) {
169 		if (vol->vol_no == vol_no)
170 			break;
171 	}
172 	if (vol == NULL)
173 		errx(1, "get_volume: Volume %d does not exist!", vol_no);
174 	++vol->cache.refs;
175 	/* not added to or removed from hammer cache */
176 	return(vol);
177 }
178 
179 void
180 rel_volume(struct volume_info *volume)
181 {
182 	/* not added to or removed from hammer cache */
183 	--volume->cache.refs;
184 }
185 
186 /*
187  * Acquire the specified buffer.
188  */
189 struct buffer_info *
190 get_buffer(hammer_off_t buf_offset, int isnew)
191 {
192 	void *ondisk;
193 	struct buffer_info *buf;
194 	struct volume_info *volume;
195 	hammer_off_t orig_offset = buf_offset;
196 	int vol_no;
197 	int zone;
198 	int hi, n;
199 
200 	zone = HAMMER_ZONE_DECODE(buf_offset);
201 	if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
202 		buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
203 	}
204 	assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
205 	vol_no = HAMMER_VOL_DECODE(buf_offset);
206 	volume = get_volume(vol_no);
207 	buf_offset &= ~HAMMER_BUFMASK64;
208 
209 	hi = buffer_hash(buf_offset);
210 
211 	TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
212 		if (buf->buf_offset == buf_offset)
213 			break;
214 	}
215 	if (buf == NULL) {
216 		buf = malloc(sizeof(*buf));
217 		bzero(buf, sizeof(*buf));
218 		if (DebugOpt) {
219 			fprintf(stderr, "get_buffer %016llx %016llx\n",
220 				orig_offset, buf_offset);
221 		}
222 		buf->buf_offset = buf_offset;
223 		buf->buf_disk_offset = volume->ondisk->vol_buf_beg +
224 					(buf_offset & HAMMER_OFF_SHORT_MASK);
225 		buf->volume = volume;
226 		TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
227 		++volume->cache.refs;
228 		buf->cache.u.buffer = buf;
229 		hammer_cache_add(&buf->cache, ISBUFFER);
230 	}
231 	++buf->cache.refs;
232 	hammer_cache_flush();
233 	if ((ondisk = buf->ondisk) == NULL) {
234 		buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
235 		if (isnew == 0) {
236 			n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
237 				  buf->buf_disk_offset);
238 			if (n != HAMMER_BUFSIZE) {
239 				err(1, "get_buffer: %s:%016llx Read failed at "
240 				       "offset %lld",
241 				    volume->name, buf->buf_offset,
242 				    buf->buf_disk_offset);
243 			}
244 		}
245 	}
246 	if (isnew) {
247 		bzero(ondisk, HAMMER_BUFSIZE);
248 		buf->cache.modified = 1;
249 	}
250 	return(buf);
251 }
252 
253 void
254 rel_buffer(struct buffer_info *buffer)
255 {
256 	struct volume_info *volume;
257 	int hi;
258 
259 	assert(buffer->cache.refs > 0);
260 	if (--buffer->cache.refs == 0) {
261 		if (buffer->cache.delete) {
262 			hi = buffer_hash(buffer->buf_offset);
263 			volume = buffer->volume;
264 			if (buffer->cache.modified)
265 				flush_buffer(buffer);
266 			TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
267 			hammer_cache_del(&buffer->cache);
268 			free(buffer->ondisk);
269 			free(buffer);
270 			rel_volume(volume);
271 		}
272 	}
273 }
274 
275 void *
276 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
277 		int isnew)
278 {
279 	struct buffer_info *buffer;
280 
281 	if ((buffer = *bufferp) != NULL) {
282 		if (isnew ||
283 		    ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
284 			rel_buffer(buffer);
285 			buffer = *bufferp = NULL;
286 		}
287 	}
288 	if (buffer == NULL)
289 		buffer = *bufferp = get_buffer(buf_offset, isnew);
290 	return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
291 }
292 
293 /*
294  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
295  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
296  */
297 hammer_node_ondisk_t
298 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
299 {
300 	struct buffer_info *buf;
301 
302 	if (*bufp)
303 		rel_buffer(*bufp);
304 	*bufp = buf = get_buffer(node_offset, 0);
305 	return((void *)((char *)buf->ondisk +
306 			(int32_t)(node_offset & HAMMER_BUFMASK)));
307 }
308 
309 /*
310  * Allocate HAMMER elements - btree nodes, data storage, and record elements
311  *
312  * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
313  * item and zero's out the remainder, so don't bzero() it.
314  */
315 void *
316 alloc_btree_element(hammer_off_t *offp)
317 {
318 	struct buffer_info *buffer = NULL;
319 	hammer_node_ondisk_t node;
320 
321 	node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
322 			      offp, &buffer);
323 	bzero(node, sizeof(*node));
324 	/* XXX buffer not released, pointer remains valid */
325 	return(node);
326 }
327 
328 void *
329 alloc_data_element(hammer_off_t *offp, int32_t data_len,
330 		   struct buffer_info **data_bufferp)
331 {
332 	void *data;
333 
334 	if (data_len >= HAMMER_BUFSIZE) {
335 		assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
336 		data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
337 				      offp, data_bufferp);
338 		bzero(data, data_len);
339 	} else if (data_len) {
340 		data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
341 				      offp, data_bufferp);
342 		bzero(data, data_len);
343 	} else {
344 		data = NULL;
345 	}
346 	return (data);
347 }
348 
349 /*
350  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
351  * code will load each volume's freemap.
352  */
353 void
354 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
355 {
356 	struct buffer_info *buffer = NULL;
357 	hammer_off_t layer1_offset;
358 	struct hammer_blockmap_layer1 *layer1;
359 	int i, isnew;
360 
361 	layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
362 	for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
363 		isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
364 		layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
365 					 &buffer, isnew);
366 		bzero(layer1, sizeof(*layer1));
367 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
368 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
369 		layer1->blocks_free = 0;
370 	}
371 	rel_buffer(buffer);
372 
373 	blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
374 	blockmap->phys_offset = layer1_offset;
375 	blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
376 	blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
377 	blockmap->reserved01 = 0;
378 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
379 	root_vol->cache.modified = 1;
380 }
381 
382 /*
383  * Load the volume's remaining free space into the freemap.
384  *
385  * Returns the number of bigblocks available.
386  */
387 int64_t
388 initialize_freemap(struct volume_info *vol)
389 {
390 	struct volume_info *root_vol;
391 	struct buffer_info *buffer1 = NULL;
392 	struct buffer_info *buffer2 = NULL;
393 	struct hammer_blockmap_layer1 *layer1;
394 	struct hammer_blockmap_layer2 *layer2;
395 	hammer_off_t layer1_base;
396 	hammer_off_t layer1_offset;
397 	hammer_off_t layer2_offset;
398 	hammer_off_t phys_offset;
399 	hammer_off_t aligned_vol_free_end;
400 	int64_t count = 0;
401 	int modified1 = 0;
402 
403 	root_vol = get_volume(RootVolNo);
404 	aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
405 				& ~HAMMER_BLOCKMAP_LAYER2_MASK;
406 
407 	printf("initialize freemap volume %d\n", vol->vol_no);
408 
409 	/*
410 	 * Initialize the freemap.  First preallocate the bigblocks required
411 	 * to implement layer2.   This preallocation is a bootstrap allocation
412 	 * using blocks from the target volume.
413 	 */
414 	layer1_base = root_vol->ondisk->vol0_blockmap[
415 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
416 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
417 	     phys_offset < aligned_vol_free_end;
418 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
419 		layer1_offset = layer1_base +
420 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
421 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
422 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
423 			layer1->phys_offset = alloc_bigblock(vol,
424 						HAMMER_ZONE_FREEMAP_INDEX);
425 			layer1->blocks_free = 0;
426 			buffer1->cache.modified = 1;
427 			layer1->layer1_crc = crc32(layer1,
428 						   HAMMER_LAYER1_CRCSIZE);
429 		}
430 	}
431 
432 	/*
433 	 * Now fill everything in.
434 	 */
435 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
436 	     phys_offset < aligned_vol_free_end;
437 	     phys_offset += HAMMER_LARGEBLOCK_SIZE) {
438 		modified1 = 0;
439 		layer1_offset = layer1_base +
440 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
441 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
442 
443 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
444 		layer2_offset = layer1->phys_offset +
445 				HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
446 
447 		layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
448 		bzero(layer2, sizeof(*layer2));
449 		if (phys_offset < vol->vol_free_off) {
450 			/*
451 			 * Fixups XXX - bigblocks already allocated as part
452 			 * of the freemap bootstrap.
453 			 */
454 			if (layer2->zone == 0) {
455 				layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
456 				layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
457 				layer2->bytes_free = 0;
458 			}
459 		} else if (phys_offset < vol->vol_free_end) {
460 			++layer1->blocks_free;
461 			buffer1->cache.modified = 1;
462 			layer2->zone = 0;
463 			layer2->append_off = 0;
464 			layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
465 			++count;
466 			modified1 = 1;
467 		} else {
468 			layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
469 			layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
470 			layer2->bytes_free = 0;
471 		}
472 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
473 		buffer2->cache.modified = 1;
474 
475 		/*
476 		 * Finish-up layer 1
477 		 */
478 		if (modified1) {
479 			layer1->layer1_crc = crc32(layer1,
480 						   HAMMER_LAYER1_CRCSIZE);
481 			buffer1->cache.modified = 1;
482 		}
483 	}
484 	rel_buffer(buffer1);
485 	rel_buffer(buffer2);
486 	rel_volume(root_vol);
487 	return(count);
488 }
489 
490 /*
491  * Allocate big-blocks using our poor-man's volume->vol_free_off.
492  *
493  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
494  * itself and cannot update it yet.
495  */
496 hammer_off_t
497 alloc_bigblock(struct volume_info *volume, int zone)
498 {
499 	struct buffer_info *buffer = NULL;
500 	struct volume_info *root_vol;
501 	hammer_off_t result_offset;
502 	hammer_off_t layer_offset;
503 	struct hammer_blockmap_layer1 *layer1;
504 	struct hammer_blockmap_layer2 *layer2;
505 	int didget;
506 
507 	if (volume == NULL) {
508 		volume = get_volume(RootVolNo);
509 		didget = 1;
510 	} else {
511 		didget = 0;
512 	}
513 	result_offset = volume->vol_free_off;
514 	if (result_offset >= volume->vol_free_end)
515 		panic("alloc_bigblock: Ran out of room, filesystem too small");
516 	volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
517 
518 	/*
519 	 * Update the freemap.
520 	 */
521 	if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
522 		root_vol = get_volume(RootVolNo);
523 		layer_offset = root_vol->ondisk->vol0_blockmap[
524 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
525 		layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
526 		layer1 = get_buffer_data(layer_offset, &buffer, 0);
527 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
528 		--layer1->blocks_free;
529 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
530 		buffer->cache.modified = 1;
531 		layer_offset = layer1->phys_offset +
532 			       HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
533 		layer2 = get_buffer_data(layer_offset, &buffer, 0);
534 		assert(layer2->zone == 0);
535 		layer2->zone = zone;
536 		layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
537 		layer2->bytes_free = 0;
538 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
539 		buffer->cache.modified = 1;
540 
541 		--root_vol->ondisk->vol0_stat_freebigblocks;
542 		root_vol->cache.modified = 1;
543 
544 		rel_buffer(buffer);
545 		rel_volume(root_vol);
546 	}
547 
548 	if (didget)
549 		rel_volume(volume);
550 	return(result_offset);
551 }
552 
553 /*
554  * Format the undo-map for the root volume.
555  */
556 void
557 format_undomap(hammer_volume_ondisk_t ondisk)
558 {
559 	const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
560 	hammer_off_t undo_limit;
561 	hammer_blockmap_t blockmap;
562 	hammer_off_t scan;
563 	int n;
564 	int limit_index;
565 
566 	/*
567 	 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
568 	 * up to HAMMER_UNDO_LAYER2 large blocks.  Size to approximately
569 	 * 0.1% of the disk.
570 	 *
571 	 * The minimum UNDO fifo size is 100MB.
572 	 */
573 	undo_limit = UndoBufferSize;
574 	if (undo_limit == 0) {
575 		undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
576 		if (undo_limit < 100*1024*1024)
577 			undo_limit = 100*1024*1024;
578 	}
579 	undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
580 		     ~HAMMER_LARGEBLOCK_MASK64;
581 	if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
582 		undo_limit = HAMMER_LARGEBLOCK_SIZE;
583 	if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
584 		undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
585 	UndoBufferSize = undo_limit;
586 
587 	blockmap = &ondisk->vol0_blockmap[undo_zone];
588 	bzero(blockmap, sizeof(*blockmap));
589 	blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
590 	blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
591 	blockmap->next_offset = blockmap->first_offset;
592 	blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
593 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
594 
595 	n = 0;
596 	scan = blockmap->next_offset;
597 	limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
598 
599 	assert(limit_index <= HAMMER_UNDO_LAYER2);
600 
601 	for (n = 0; n < limit_index; ++n) {
602 		ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
603 							HAMMER_ZONE_UNDO_INDEX);
604 		scan += HAMMER_LARGEBLOCK_SIZE;
605 	}
606 	while (n < HAMMER_UNDO_LAYER2) {
607 		ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
608 		++n;
609 	}
610 }
611 
612 /*
613  * Format a new blockmap.  This is mostly a degenerate case because
614  * all allocations are now actually done from the freemap.
615  */
616 void
617 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
618 {
619 	blockmap->phys_offset = 0;
620 	blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
621 				 HAMMER_SHORT_OFF_ENCODE(-1);
622 	blockmap->first_offset = zone_base;
623 	blockmap->next_offset = zone_base;
624 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
625 }
626 
627 /*
628  * Allocate a chunk of data out of a blockmap.  This is a simplified
629  * version which uses next_offset as a simple allocation iterator.
630  */
631 static
632 void *
633 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
634 	       struct buffer_info **bufferp)
635 {
636 	struct buffer_info *buffer1 = NULL;
637 	struct buffer_info *buffer2 = NULL;
638 	struct volume_info *volume;
639 	hammer_blockmap_t blockmap;
640 	hammer_blockmap_t freemap;
641 	struct hammer_blockmap_layer1 *layer1;
642 	struct hammer_blockmap_layer2 *layer2;
643 	hammer_off_t layer1_offset;
644 	hammer_off_t layer2_offset;
645 	hammer_off_t zone2_offset;
646 	void *ptr;
647 
648 	volume = get_volume(RootVolNo);
649 
650 	blockmap = &volume->ondisk->vol0_blockmap[zone];
651 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
652 
653 	/*
654 	 * Alignment and buffer-boundary issues.  If the allocation would
655 	 * cross a buffer boundary we have to skip to the next buffer.
656 	 */
657 	bytes = (bytes + 15) & ~15;
658 
659 again:
660 	if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
661 	    ~HAMMER_BUFMASK64) {
662 		volume->cache.modified = 1;
663 		blockmap->next_offset = (blockmap->next_offset + bytes) &
664 				        ~HAMMER_BUFMASK64;
665 	}
666 
667 	/*
668 	 * Dive layer 1.  For now we can't allocate data outside of volume 0.
669 	 */
670 	layer1_offset = freemap->phys_offset +
671 			HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
672 
673 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
674 
675 	if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
676 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
677 		exit(1);
678 	}
679 
680 	/*
681 	 * Dive layer 2
682 	 */
683 	layer2_offset = layer1->phys_offset +
684 			HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
685 
686 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
687 
688 	if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
689 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
690 		exit(1);
691 	}
692 
693 	/*
694 	 * If we are entering a new bigblock assign ownership to our
695 	 * zone.  If the bigblock is owned by another zone skip it.
696 	 */
697 	if (layer2->zone == 0) {
698 		--layer1->blocks_free;
699 		layer2->zone = zone;
700 		assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
701 		assert(layer2->append_off == 0);
702 	}
703 	if (layer2->zone != zone) {
704 		blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) &
705 					~HAMMER_LARGEBLOCK_MASK64;
706 		goto again;
707 	}
708 
709 	buffer1->cache.modified = 1;
710 	buffer2->cache.modified = 1;
711 	volume->cache.modified = 1;
712 	assert(layer2->append_off ==
713 	       (blockmap->next_offset & HAMMER_LARGEBLOCK_MASK));
714 	layer2->bytes_free -= bytes;
715 	*result_offp = blockmap->next_offset;
716 	blockmap->next_offset += bytes;
717 	layer2->append_off = (int)blockmap->next_offset &
718 			      HAMMER_LARGEBLOCK_MASK;
719 
720 	layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
721 	layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
722 
723 	zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
724 			HAMMER_ZONE_ENCODE(zone, 0);
725 
726 	ptr = get_buffer_data(zone2_offset, bufferp, 0);
727 	(*bufferp)->cache.modified = 1;
728 
729 	if (buffer1)
730 		rel_buffer(buffer1);
731 	if (buffer2)
732 		rel_buffer(buffer2);
733 
734 	rel_volume(volume);
735 	return(ptr);
736 }
737 
738 /*
739  * Flush various tracking structures to disk
740  */
741 
742 /*
743  * Flush various tracking structures to disk
744  */
745 void
746 flush_all_volumes(void)
747 {
748 	struct volume_info *vol;
749 
750 	TAILQ_FOREACH(vol, &VolList, entry)
751 		flush_volume(vol);
752 }
753 
754 void
755 flush_volume(struct volume_info *volume)
756 {
757 	struct buffer_info *buffer;
758 	int i;
759 
760 	for (i = 0; i < HAMMER_BUFLISTS; ++i) {
761 		TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
762 			flush_buffer(buffer);
763 	}
764 	writehammerbuf(volume, volume->ondisk, 0);
765 	volume->cache.modified = 0;
766 }
767 
768 void
769 flush_buffer(struct buffer_info *buffer)
770 {
771 	writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset);
772 	buffer->cache.modified = 0;
773 }
774 
775 #if 0
776 /*
777  * Generic buffer initialization
778  */
779 static void
780 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
781 {
782 	head->hdr_signature = HAMMER_HEAD_SIGNATURE;
783 	head->hdr_type = hdr_type;
784 	head->hdr_size = 0;
785 	head->hdr_crc = 0;
786 	head->hdr_seq = 0;
787 }
788 
789 #endif
790 
791 #if 0
792 /*
793  * Core I/O operations
794  */
795 static void
796 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
797 {
798 	ssize_t n;
799 
800 	n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
801 	if (n != HAMMER_BUFSIZE)
802 		err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
803 }
804 
805 #endif
806 
807 static void
808 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
809 {
810 	ssize_t n;
811 
812 	n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
813 	if (n != HAMMER_BUFSIZE)
814 		err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
815 }
816 
817 void
818 panic(const char *ctl, ...)
819 {
820 	va_list va;
821 
822 	va_start(va, ctl);
823 	vfprintf(stderr, ctl, va);
824 	va_end(va);
825 	fprintf(stderr, "\n");
826 	exit(1);
827 }
828 
829