xref: /dragonfly/sbin/hammer/ondisk.c (revision b0d289c2)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <assert.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <stdarg.h>
40 #include <string.h>
41 #include <unistd.h>
42 #include <stddef.h>
43 #include <err.h>
44 #include <fcntl.h>
45 #include "hammer_util.h"
46 
47 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
48 			struct buffer_info **bufferp);
49 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
50 static void get_buffer_readahead(struct buffer_info *base);
51 static __inline void *get_ondisk(hammer_off_t buf_offset,
52 			struct buffer_info **bufferp, int isnew);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static void readhammerbuf(struct volume_info *vol, void *data,
56 			int64_t offset);
57 #endif
58 static void writehammerbuf(struct volume_info *vol, const void *data,
59 			int64_t offset);
60 
61 int DebugOpt;
62 
63 uuid_t Hammer_FSType;
64 uuid_t Hammer_FSId;
65 int64_t BootAreaSize;
66 int64_t MemAreaSize;
67 int64_t UndoBufferSize;
68 int     UsingSuperClusters;
69 int     NumVolumes;
70 int	RootVolNo = -1;
71 int	UseReadBehind = -4;
72 int	UseReadAhead = 4;
73 int	AssertOnFailure = 1;
74 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
75 
76 static __inline
77 int
78 buffer_hash(hammer_off_t buf_offset)
79 {
80 	int hi;
81 
82 	hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
83 	return(hi);
84 }
85 
86 static struct buffer_info*
87 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
88 {
89 	int hi;
90 	struct buffer_info *buf;
91 
92 	hi = buffer_hash(buf_offset);
93 	TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
94 		if (buf->buf_offset == buf_offset)
95 			return(buf);
96 	return(NULL);
97 }
98 
99 /*
100  * Lookup the requested information structure and related on-disk buffer.
101  * Missing structures are created.
102  */
103 struct volume_info *
104 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
105 {
106 	struct volume_info *vol;
107 	struct volume_info *scan;
108 	struct hammer_volume_ondisk *ondisk;
109 	int i, n;
110 
111 	/*
112 	 * Allocate the volume structure
113 	 */
114 	vol = malloc(sizeof(*vol));
115 	bzero(vol, sizeof(*vol));
116 	for (i = 0; i < HAMMER_BUFLISTS; ++i)
117 		TAILQ_INIT(&vol->buffer_lists[i]);
118 	vol->name = strdup(filename);
119 	vol->fd = open(filename, oflags);
120 	if (vol->fd < 0) {
121 		free(vol->name);
122 		free(vol);
123 		err(1, "setup_volume: %s: Open failed", filename);
124 	}
125 
126 	/*
127 	 * Read or initialize the volume header
128 	 */
129 	vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
130 	if (isnew > 0) {
131 		bzero(ondisk, HAMMER_BUFSIZE);
132 	} else {
133 		n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
134 		if (n != HAMMER_BUFSIZE) {
135 			err(1, "setup_volume: %s: Read failed at offset 0",
136 			    filename);
137 		}
138 		vol_no = ondisk->vol_no;
139 		if (RootVolNo < 0) {
140 			RootVolNo = ondisk->vol_rootvol;
141 		} else if (RootVolNo != (int)ondisk->vol_rootvol) {
142 			errx(1, "setup_volume: %s: root volume disagreement: "
143 				"%d vs %d",
144 				vol->name, RootVolNo, ondisk->vol_rootvol);
145 		}
146 
147 		if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
148 			errx(1, "setup_volume: %s: Header does not indicate "
149 				"that this is a hammer volume", vol->name);
150 		}
151 		if (TAILQ_EMPTY(&VolList)) {
152 			Hammer_FSId = vol->ondisk->vol_fsid;
153 		} else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
154 			errx(1, "setup_volume: %s: FSId does match other "
155 				"volumes!", vol->name);
156 		}
157 	}
158 	vol->vol_no = vol_no;
159 
160 	if (isnew > 0) {
161 		/*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
162 		vol->cache.modified = 1;
163         }
164 
165 	/*
166 	 * Link the volume structure in
167 	 */
168 	TAILQ_FOREACH(scan, &VolList, entry) {
169 		if (scan->vol_no == vol_no) {
170 			errx(1, "setup_volume %s: Duplicate volume number %d "
171 				"against %s", filename, vol_no, scan->name);
172 		}
173 	}
174 	TAILQ_INSERT_TAIL(&VolList, vol, entry);
175 	return(vol);
176 }
177 
178 struct volume_info *
179 test_volume(int32_t vol_no)
180 {
181 	struct volume_info *vol;
182 
183 	TAILQ_FOREACH(vol, &VolList, entry) {
184 		if (vol->vol_no == vol_no)
185 			break;
186 	}
187 	if (vol == NULL)
188 		return(NULL);
189 	++vol->cache.refs;
190 	/* not added to or removed from hammer cache */
191 	return(vol);
192 }
193 
194 struct volume_info *
195 get_volume(int32_t vol_no)
196 {
197 	struct volume_info *vol;
198 
199 	TAILQ_FOREACH(vol, &VolList, entry) {
200 		if (vol->vol_no == vol_no)
201 			break;
202 	}
203 	if (vol == NULL)
204 		errx(1, "get_volume: Volume %d does not exist!", vol_no);
205 	++vol->cache.refs;
206 	/* not added to or removed from hammer cache */
207 	return(vol);
208 }
209 
210 void
211 rel_volume(struct volume_info *volume)
212 {
213 	if (volume == NULL)
214 		return;
215 	/* not added to or removed from hammer cache */
216 	--volume->cache.refs;
217 }
218 
219 /*
220  * Acquire the specified buffer.  isnew is -1 only when called
221  * via get_buffer_readahead() to prevent another readahead.
222  */
223 struct buffer_info *
224 get_buffer(hammer_off_t buf_offset, int isnew)
225 {
226 	void *ondisk;
227 	struct buffer_info *buf;
228 	struct volume_info *volume;
229 	hammer_off_t orig_offset = buf_offset;
230 	int vol_no;
231 	int zone;
232 	int hi, n;
233 	int dora = 0;
234 
235 	zone = HAMMER_ZONE_DECODE(buf_offset);
236 	if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
237 		buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
238 	}
239 	if (buf_offset == HAMMER_OFF_BAD)
240 		return(NULL);
241 
242 	if (AssertOnFailure) {
243 		assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
244 		       HAMMER_ZONE_RAW_BUFFER);
245 	}
246 	vol_no = HAMMER_VOL_DECODE(buf_offset);
247 	volume = test_volume(vol_no);
248 	if (volume == NULL) {
249 		if (AssertOnFailure)
250 			errx(1, "get_buffer: Volume %d not found!", vol_no);
251 		return(NULL);
252 	}
253 
254 	buf_offset &= ~HAMMER_BUFMASK64;
255 	buf = find_buffer(volume, buf_offset);
256 
257 	if (buf == NULL) {
258 		buf = malloc(sizeof(*buf));
259 		bzero(buf, sizeof(*buf));
260 		if (DebugOpt) {
261 			fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
262 				(long long)orig_offset, (long long)buf_offset,
263 				buf);
264 		}
265 		buf->buf_offset = buf_offset;
266 		buf->raw_offset = volume->ondisk->vol_buf_beg +
267 				  (buf_offset & HAMMER_OFF_SHORT_MASK);
268 		buf->volume = volume;
269 		hi = buffer_hash(buf_offset);
270 		TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
271 		++volume->cache.refs;
272 		buf->cache.u.buffer = buf;
273 		hammer_cache_add(&buf->cache, ISBUFFER);
274 		dora = (isnew == 0);
275 	} else {
276 		if (DebugOpt) {
277 			fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
278 				(long long)orig_offset, (long long)buf_offset,
279 				buf);
280 		}
281 		hammer_cache_used(&buf->cache);
282 		++buf->use_count;
283 	}
284 	++buf->cache.refs;
285 	hammer_cache_flush();
286 	if ((ondisk = buf->ondisk) == NULL) {
287 		buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
288 		if (isnew <= 0) {
289 			n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
290 				  buf->raw_offset);
291 			if (n != HAMMER_BUFSIZE) {
292 				if (AssertOnFailure)
293 					err(1, "get_buffer: %s:%016llx "
294 					    "Read failed at offset %016llx",
295 					    volume->name,
296 					    (long long)buf->buf_offset,
297 					    (long long)buf->raw_offset);
298 				bzero(ondisk, HAMMER_BUFSIZE);
299 			}
300 		}
301 	}
302 	if (isnew > 0) {
303 		bzero(ondisk, HAMMER_BUFSIZE);
304 		buf->cache.modified = 1;
305 	}
306 	if (dora)
307 		get_buffer_readahead(buf);
308 	return(buf);
309 }
310 
311 static void
312 get_buffer_readahead(struct buffer_info *base)
313 {
314 	struct buffer_info *buf;
315 	struct volume_info *vol;
316 	hammer_off_t buf_offset;
317 	int64_t raw_offset;
318 	int ri = UseReadBehind;
319 	int re = UseReadAhead;
320 
321 	raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
322 	vol = base->volume;
323 
324 	while (ri < re) {
325 		if (raw_offset >= vol->ondisk->vol_buf_end)
326 			break;
327 		if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
328 			++ri;
329 			raw_offset += HAMMER_BUFSIZE;
330 			continue;
331 		}
332 		buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
333 			raw_offset - vol->ondisk->vol_buf_beg);
334 		buf = find_buffer(vol, buf_offset);
335 		if (buf == NULL) {
336 			buf = get_buffer(buf_offset, -1);
337 			rel_buffer(buf);
338 		}
339 		++ri;
340 		raw_offset += HAMMER_BUFSIZE;
341 	}
342 }
343 
344 void
345 rel_buffer(struct buffer_info *buffer)
346 {
347 	struct volume_info *volume;
348 	int hi;
349 
350 	if (buffer == NULL)
351 		return;
352 	assert(buffer->cache.refs > 0);
353 	if (--buffer->cache.refs == 0) {
354 		if (buffer->cache.delete) {
355 			hi = buffer_hash(buffer->buf_offset);
356 			volume = buffer->volume;
357 			if (buffer->cache.modified)
358 				flush_buffer(buffer);
359 			TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
360 			hammer_cache_del(&buffer->cache);
361 			free(buffer->ondisk);
362 			free(buffer);
363 			rel_volume(volume);
364 		}
365 	}
366 }
367 
368 /*
369  * Retrieve a pointer to a buffer data given a buffer offset.  The underlying
370  * bufferp is freed if isnew or the offset is out of range of the cached data.
371  * If bufferp is freed a referenced buffer is loaded into it.
372  */
373 void *
374 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
375 		int isnew)
376 {
377 	if (*bufferp != NULL) {
378 		if (isnew > 0 ||
379 		    (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
380 			rel_buffer(*bufferp);
381 			*bufferp = NULL;
382 		}
383 	}
384 	return(get_ondisk(buf_offset, bufferp, isnew));
385 }
386 
387 /*
388  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
389  * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
390  */
391 hammer_node_ondisk_t
392 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
393 {
394 	if (*bufferp != NULL) {
395 		rel_buffer(*bufferp);
396 		*bufferp = NULL;
397 	}
398 	return(get_ondisk(node_offset, bufferp, 0));
399 }
400 
401 /*
402  * Return a pointer to a buffer data given a buffer offset.
403  * If *bufferp is NULL acquire the buffer otherwise use that buffer.
404  */
405 static __inline
406 void *
407 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp,
408 	int isnew)
409 {
410 	struct buffer_info *buffer;
411 
412 	buffer = *bufferp;
413 	if (buffer == NULL) {
414 		buffer = *bufferp = get_buffer(buf_offset, isnew);
415 		if (buffer == NULL)
416 			return(NULL);
417 	}
418 
419 	return((char *)buffer->ondisk +
420 		((int32_t)buf_offset & HAMMER_BUFMASK));
421 }
422 
423 /*
424  * Allocate HAMMER elements - btree nodes, meta data, data storage
425  */
426 void *
427 alloc_btree_element(hammer_off_t *offp,
428 		    struct buffer_info **data_bufferp)
429 {
430 	hammer_node_ondisk_t node;
431 
432 	node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
433 			      offp, data_bufferp);
434 	bzero(node, sizeof(*node));
435 	return (node);
436 }
437 
438 void *
439 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
440 		   struct buffer_info **data_bufferp)
441 {
442 	void *data;
443 
444 	data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
445 			      offp, data_bufferp);
446 	bzero(data, data_len);
447 	return (data);
448 }
449 
450 void *
451 alloc_data_element(hammer_off_t *offp, int32_t data_len,
452 		   struct buffer_info **data_bufferp)
453 {
454 	void *data;
455 
456 	if (data_len >= HAMMER_BUFSIZE) {
457 		assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
458 		data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
459 				      offp, data_bufferp);
460 		bzero(data, data_len);
461 	} else if (data_len) {
462 		data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
463 				      offp, data_bufferp);
464 		bzero(data, data_len);
465 	} else {
466 		data = NULL;
467 	}
468 	return (data);
469 }
470 
471 /*
472  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
473  * code will load each volume's freemap.
474  */
475 void
476 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
477 {
478 	struct buffer_info *buffer = NULL;
479 	hammer_off_t layer1_offset;
480 	struct hammer_blockmap_layer1 *layer1;
481 	int i, isnew;
482 
483 	layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
484 	for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
485 		isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
486 		layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
487 					 &buffer, isnew);
488 		bzero(layer1, sizeof(*layer1));
489 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
490 		layer1->blocks_free = 0;
491 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
492 	}
493 	rel_buffer(buffer);
494 
495 	blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
496 	blockmap->phys_offset = layer1_offset;
497 	blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
498 	blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
499 	blockmap->reserved01 = 0;
500 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
501 	root_vol->cache.modified = 1;
502 }
503 
504 /*
505  * Load the volume's remaining free space into the freemap.
506  *
507  * Returns the number of bigblocks available.
508  */
509 int64_t
510 initialize_freemap(struct volume_info *vol)
511 {
512 	struct volume_info *root_vol;
513 	struct buffer_info *buffer1 = NULL;
514 	struct buffer_info *buffer2 = NULL;
515 	struct hammer_blockmap_layer1 *layer1;
516 	struct hammer_blockmap_layer2 *layer2;
517 	hammer_off_t layer1_base;
518 	hammer_off_t layer1_offset;
519 	hammer_off_t layer2_offset;
520 	hammer_off_t phys_offset;
521 	hammer_off_t aligned_vol_free_end;
522 	int64_t count = 0;
523 	int modified1 = 0;
524 
525 	root_vol = get_volume(RootVolNo);
526 	aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
527 				& ~HAMMER_BLOCKMAP_LAYER2_MASK;
528 
529 	printf("initialize freemap volume %d\n", vol->vol_no);
530 
531 	/*
532 	 * Initialize the freemap.  First preallocate the bigblocks required
533 	 * to implement layer2.   This preallocation is a bootstrap allocation
534 	 * using blocks from the target volume.
535 	 */
536 	layer1_base = root_vol->ondisk->vol0_blockmap[
537 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
538 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
539 	     phys_offset < aligned_vol_free_end;
540 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
541 		layer1_offset = layer1_base +
542 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
543 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
544 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
545 			layer1->phys_offset = alloc_bigblock(vol,
546 						HAMMER_ZONE_FREEMAP_INDEX);
547 			layer1->blocks_free = 0;
548 			buffer1->cache.modified = 1;
549 			layer1->layer1_crc = crc32(layer1,
550 						   HAMMER_LAYER1_CRCSIZE);
551 		}
552 	}
553 
554 	/*
555 	 * Now fill everything in.
556 	 */
557 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
558 	     phys_offset < aligned_vol_free_end;
559 	     phys_offset += HAMMER_BIGBLOCK_SIZE) {
560 		modified1 = 0;
561 		layer1_offset = layer1_base +
562 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
563 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
564 
565 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
566 		layer2_offset = layer1->phys_offset +
567 				HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
568 
569 		layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
570 		bzero(layer2, sizeof(*layer2));
571 		if (phys_offset < vol->vol_free_off) {
572 			/*
573 			 * Fixups XXX - bigblocks already allocated as part
574 			 * of the freemap bootstrap.
575 			 */
576 			if (layer2->zone == 0) {
577 				layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
578 				layer2->append_off = HAMMER_BIGBLOCK_SIZE;
579 				layer2->bytes_free = 0;
580 			}
581 		} else if (phys_offset < vol->vol_free_end) {
582 			++layer1->blocks_free;
583 			buffer1->cache.modified = 1;
584 			layer2->zone = 0;
585 			layer2->append_off = 0;
586 			layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
587 			++count;
588 			modified1 = 1;
589 		} else {
590 			layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
591 			layer2->append_off = HAMMER_BIGBLOCK_SIZE;
592 			layer2->bytes_free = 0;
593 		}
594 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
595 		buffer2->cache.modified = 1;
596 
597 		/*
598 		 * Finish-up layer 1
599 		 */
600 		if (modified1) {
601 			layer1->layer1_crc = crc32(layer1,
602 						   HAMMER_LAYER1_CRCSIZE);
603 			buffer1->cache.modified = 1;
604 		}
605 	}
606 	rel_buffer(buffer1);
607 	rel_buffer(buffer2);
608 	rel_volume(root_vol);
609 	return(count);
610 }
611 
612 /*
613  * Allocate big-blocks using our poor-man's volume->vol_free_off.
614  *
615  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
616  * itself and cannot update it yet.
617  */
618 hammer_off_t
619 alloc_bigblock(struct volume_info *volume, int zone)
620 {
621 	struct buffer_info *buffer1 = NULL;
622 	struct buffer_info *buffer2 = NULL;
623 	struct volume_info *root_vol;
624 	hammer_off_t result_offset;
625 	hammer_off_t layer_offset;
626 	struct hammer_blockmap_layer1 *layer1;
627 	struct hammer_blockmap_layer2 *layer2;
628 
629 	if (volume == NULL)
630 		volume = get_volume(RootVolNo);
631 
632 	result_offset = volume->vol_free_off;
633 	if (result_offset >= volume->vol_free_end)
634 		panic("alloc_bigblock: Ran out of room, filesystem too small");
635 	volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
636 
637 	/*
638 	 * Update the freemap.
639 	 */
640 	if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
641 		root_vol = get_volume(RootVolNo);
642 		layer_offset = root_vol->ondisk->vol0_blockmap[
643 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
644 		layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
645 		layer1 = get_buffer_data(layer_offset, &buffer1, 0);
646 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
647 		--layer1->blocks_free;
648 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
649 		buffer1->cache.modified = 1;
650 		layer_offset = layer1->phys_offset +
651 			       HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
652 		layer2 = get_buffer_data(layer_offset, &buffer2, 0);
653 		assert(layer2->zone == 0);
654 		layer2->zone = zone;
655 		layer2->append_off = HAMMER_BIGBLOCK_SIZE;
656 		layer2->bytes_free = 0;
657 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
658 		buffer2->cache.modified = 1;
659 
660 		--root_vol->ondisk->vol0_stat_freebigblocks;
661 		root_vol->cache.modified = 1;
662 
663 		rel_buffer(buffer1);
664 		rel_buffer(buffer2);
665 		rel_volume(root_vol);
666 	}
667 
668 	rel_volume(volume);
669 	return(result_offset);
670 }
671 
672 /*
673  * Format the undo-map for the root volume.
674  */
675 void
676 format_undomap(hammer_volume_ondisk_t ondisk)
677 {
678 	const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
679 	hammer_off_t undo_limit;
680 	hammer_blockmap_t blockmap;
681 	struct buffer_info *buffer = NULL;
682 	hammer_off_t scan;
683 	int n;
684 	int limit_index;
685 	u_int32_t seqno;
686 
687 	/*
688 	 * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
689 	 * up to HAMMER_UNDO_LAYER2 big blocks.  Size to approximately
690 	 * 0.1% of the disk.
691 	 *
692 	 * The minimum UNDO fifo size is 500MB, or approximately 1% of
693 	 * the recommended 50G disk.
694 	 *
695 	 * Changing this minimum is rather dangerous as complex filesystem
696 	 * operations can cause the UNDO FIFO to fill up otherwise.
697 	 */
698 	undo_limit = UndoBufferSize;
699 	if (undo_limit == 0) {
700 		undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
701 		if (undo_limit < 500*1024*1024)
702 			undo_limit = 500*1024*1024;
703 	}
704 	undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
705 		     ~HAMMER_BIGBLOCK_MASK64;
706 	if (undo_limit < HAMMER_BIGBLOCK_SIZE)
707 		undo_limit = HAMMER_BIGBLOCK_SIZE;
708 	if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
709 		undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
710 	UndoBufferSize = undo_limit;
711 
712 	blockmap = &ondisk->vol0_blockmap[undo_zone];
713 	bzero(blockmap, sizeof(*blockmap));
714 	blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
715 	blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
716 	blockmap->next_offset = blockmap->first_offset;
717 	blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
718 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
719 
720 	n = 0;
721 	scan = blockmap->next_offset;
722 	limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
723 
724 	assert(limit_index <= HAMMER_UNDO_LAYER2);
725 
726 	for (n = 0; n < limit_index; ++n) {
727 		ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
728 							HAMMER_ZONE_UNDO_INDEX);
729 		scan += HAMMER_BIGBLOCK_SIZE;
730 	}
731 	while (n < HAMMER_UNDO_LAYER2) {
732 		ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
733 		++n;
734 	}
735 
736 	/*
737 	 * Pre-initialize the UNDO blocks (HAMMER version 4+)
738 	 */
739 	printf("initializing the undo map (%jd MB)\n",
740 		(intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
741 		(1024 * 1024));
742 
743 	scan = blockmap->first_offset;
744 	seqno = 0;
745 
746 	while (scan < blockmap->alloc_offset) {
747 		hammer_fifo_head_t head;
748 		hammer_fifo_tail_t tail;
749 		int isnew;
750 		int bytes = HAMMER_UNDO_ALIGN;
751 
752 		isnew = ((scan & HAMMER_BUFMASK64) == 0);
753 		head = get_buffer_data(scan, &buffer, isnew);
754 		buffer->cache.modified = 1;
755 		tail = (void *)((char *)head + bytes - sizeof(*tail));
756 
757 		bzero(head, bytes);
758 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
759 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
760 		head->hdr_size = bytes;
761 		head->hdr_seq = seqno++;
762 
763 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
764 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
765 		tail->tail_size = bytes;
766 
767 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
768 				crc32(head + 1, bytes - sizeof(*head));
769 
770 		scan += bytes;
771 	}
772 	rel_buffer(buffer);
773 }
774 
775 /*
776  * Format a new blockmap.  This is mostly a degenerate case because
777  * all allocations are now actually done from the freemap.
778  */
779 void
780 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
781 {
782 	blockmap->phys_offset = 0;
783 	blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
784 				 HAMMER_SHORT_OFF_ENCODE(-1);
785 	blockmap->first_offset = zone_base;
786 	blockmap->next_offset = zone_base;
787 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
788 }
789 
790 /*
791  * Allocate a chunk of data out of a blockmap.  This is a simplified
792  * version which uses next_offset as a simple allocation iterator.
793  */
794 static
795 void *
796 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
797 	       struct buffer_info **bufferp)
798 {
799 	struct buffer_info *buffer1 = NULL;
800 	struct buffer_info *buffer2 = NULL;
801 	struct volume_info *volume;
802 	hammer_blockmap_t blockmap;
803 	hammer_blockmap_t freemap;
804 	struct hammer_blockmap_layer1 *layer1;
805 	struct hammer_blockmap_layer2 *layer2;
806 	hammer_off_t layer1_offset;
807 	hammer_off_t layer2_offset;
808 	hammer_off_t zone2_offset;
809 	void *ptr;
810 
811 	volume = get_volume(RootVolNo);
812 
813 	blockmap = &volume->ondisk->vol0_blockmap[zone];
814 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
815 
816 	/*
817 	 * Alignment and buffer-boundary issues.  If the allocation would
818 	 * cross a buffer boundary we have to skip to the next buffer.
819 	 */
820 	bytes = (bytes + 15) & ~15;
821 
822 again:
823 	if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
824 	    ~HAMMER_BUFMASK64) {
825 		volume->cache.modified = 1;
826 		blockmap->next_offset = (blockmap->next_offset + bytes) &
827 				        ~HAMMER_BUFMASK64;
828 	}
829 
830 	/*
831 	 * Dive layer 1.  For now we can't allocate data outside of volume 0.
832 	 */
833 	layer1_offset = freemap->phys_offset +
834 			HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
835 
836 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
837 
838 	if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
839 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
840 		exit(1);
841 	}
842 
843 	/*
844 	 * Dive layer 2
845 	 */
846 	layer2_offset = layer1->phys_offset +
847 			HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
848 
849 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
850 
851 	if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
852 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
853 		exit(1);
854 	}
855 
856 	/*
857 	 * If we are entering a new bigblock assign ownership to our
858 	 * zone.  If the bigblock is owned by another zone skip it.
859 	 */
860 	if (layer2->zone == 0) {
861 		--layer1->blocks_free;
862 		layer2->zone = zone;
863 		assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
864 		assert(layer2->append_off == 0);
865 	}
866 	if (layer2->zone != zone) {
867 		blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
868 					~HAMMER_BIGBLOCK_MASK64;
869 		goto again;
870 	}
871 
872 	buffer1->cache.modified = 1;
873 	buffer2->cache.modified = 1;
874 	volume->cache.modified = 1;
875 	assert(layer2->append_off ==
876 	       (blockmap->next_offset & HAMMER_BIGBLOCK_MASK));
877 	layer2->bytes_free -= bytes;
878 	*result_offp = blockmap->next_offset;
879 	blockmap->next_offset += bytes;
880 	layer2->append_off = (int)blockmap->next_offset &
881 			      HAMMER_BIGBLOCK_MASK;
882 
883 	layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
884 	layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
885 
886 	zone2_offset = HAMMER_ZONE_ENCODE(zone,
887 			*result_offp & ~HAMMER_OFF_ZONE_MASK);
888 
889 	ptr = get_buffer_data(zone2_offset, bufferp, 0);
890 	(*bufferp)->cache.modified = 1;
891 
892 	rel_buffer(buffer1);
893 	rel_buffer(buffer2);
894 	rel_volume(volume);
895 	return(ptr);
896 }
897 
898 /*
899  * Flush various tracking structures to disk
900  */
901 void
902 flush_all_volumes(void)
903 {
904 	struct volume_info *vol;
905 
906 	TAILQ_FOREACH(vol, &VolList, entry)
907 		flush_volume(vol);
908 }
909 
910 void
911 flush_volume(struct volume_info *volume)
912 {
913 	struct buffer_info *buffer;
914 	int i;
915 
916 	for (i = 0; i < HAMMER_BUFLISTS; ++i) {
917 		TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
918 			flush_buffer(buffer);
919 	}
920 	writehammerbuf(volume, volume->ondisk, 0);
921 	volume->cache.modified = 0;
922 }
923 
924 void
925 flush_buffer(struct buffer_info *buffer)
926 {
927 	writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
928 	buffer->cache.modified = 0;
929 }
930 
931 #if 0
932 /*
933  * Generic buffer initialization
934  */
935 static void
936 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
937 {
938 	head->hdr_signature = HAMMER_HEAD_SIGNATURE;
939 	head->hdr_type = hdr_type;
940 	head->hdr_size = 0;
941 	head->hdr_crc = 0;
942 	head->hdr_seq = 0;
943 }
944 
945 #endif
946 
947 #if 0
948 /*
949  * Core I/O operations
950  */
951 static void
952 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
953 {
954 	ssize_t n;
955 
956 	n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
957 	if (n != HAMMER_BUFSIZE)
958 		err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
959 }
960 
961 #endif
962 
963 static void
964 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
965 {
966 	ssize_t n;
967 
968 	n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
969 	if (n != HAMMER_BUFSIZE)
970 		err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
971 }
972 
973 void
974 panic(const char *ctl, ...)
975 {
976 	va_list va;
977 
978 	va_start(va, ctl);
979 	vfprintf(stderr, ctl, va);
980 	va_end(va);
981 	fprintf(stderr, "\n");
982 	exit(1);
983 }
984 
985