xref: /dragonfly/sbin/hammer/ondisk.c (revision 36a3d1d6)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.25 2008/08/21 23:28:43 thomas Exp $
35  */
36 
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47 #include "hammer_util.h"
48 
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50 			struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
52 static void get_buffer_readahead(struct buffer_info *base);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
56 			struct buffer_info **bufp, u_int16_t hdr_type);
57 static void readhammerbuf(struct volume_info *vol, void *data,
58 			int64_t offset);
59 #endif
60 static void writehammerbuf(struct volume_info *vol, const void *data,
61 			int64_t offset);
62 
63 int DebugOpt;
64 
65 uuid_t Hammer_FSType;
66 uuid_t Hammer_FSId;
67 int64_t BootAreaSize;
68 int64_t MemAreaSize;
69 int64_t UndoBufferSize;
70 int     UsingSuperClusters;
71 int     NumVolumes;
72 int	RootVolNo = -1;
73 int	UseReadBehind = -4;
74 int	UseReadAhead = 4;
75 int	AssertOnFailure = 1;
76 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
77 
78 static __inline
79 int
80 buffer_hash(hammer_off_t buf_offset)
81 {
82 	int hi;
83 
84 	hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
85 	return(hi);
86 }
87 
88 /*
89  * Lookup the requested information structure and related on-disk buffer.
90  * Missing structures are created.
91  */
92 struct volume_info *
93 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
94 {
95 	struct volume_info *vol;
96 	struct volume_info *scan;
97 	struct hammer_volume_ondisk *ondisk;
98 	int i, n;
99 
100 	/*
101 	 * Allocate the volume structure
102 	 */
103 	vol = malloc(sizeof(*vol));
104 	bzero(vol, sizeof(*vol));
105 	for (i = 0; i < HAMMER_BUFLISTS; ++i)
106 		TAILQ_INIT(&vol->buffer_lists[i]);
107 	vol->name = strdup(filename);
108 	vol->fd = open(filename, oflags);
109 	if (vol->fd < 0) {
110 		free(vol->name);
111 		free(vol);
112 		err(1, "setup_volume: %s: Open failed", filename);
113 	}
114 
115 	/*
116 	 * Read or initialize the volume header
117 	 */
118 	vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
119 	if (isnew > 0) {
120 		bzero(ondisk, HAMMER_BUFSIZE);
121 	} else {
122 		n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
123 		if (n != HAMMER_BUFSIZE) {
124 			err(1, "setup_volume: %s: Read failed at offset 0",
125 			    filename);
126 		}
127 		vol_no = ondisk->vol_no;
128 		if (RootVolNo < 0) {
129 			RootVolNo = ondisk->vol_rootvol;
130 		} else if (RootVolNo != (int)ondisk->vol_rootvol) {
131 			errx(1, "setup_volume: %s: root volume disagreement: "
132 				"%d vs %d",
133 				vol->name, RootVolNo, ondisk->vol_rootvol);
134 		}
135 
136 		if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
137 			errx(1, "setup_volume: %s: Header does not indicate "
138 				"that this is a hammer volume", vol->name);
139 		}
140 		if (TAILQ_EMPTY(&VolList)) {
141 			Hammer_FSId = vol->ondisk->vol_fsid;
142 		} else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
143 			errx(1, "setup_volume: %s: FSId does match other "
144 				"volumes!", vol->name);
145 		}
146 	}
147 	vol->vol_no = vol_no;
148 
149 	if (isnew > 0) {
150 		/*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
151 		vol->cache.modified = 1;
152         }
153 
154 	/*
155 	 * Link the volume structure in
156 	 */
157 	TAILQ_FOREACH(scan, &VolList, entry) {
158 		if (scan->vol_no == vol_no) {
159 			errx(1, "setup_volume %s: Duplicate volume number %d "
160 				"against %s", filename, vol_no, scan->name);
161 		}
162 	}
163 	TAILQ_INSERT_TAIL(&VolList, vol, entry);
164 	return(vol);
165 }
166 
167 struct volume_info *
168 test_volume(int32_t vol_no)
169 {
170 	struct volume_info *vol;
171 
172 	TAILQ_FOREACH(vol, &VolList, entry) {
173 		if (vol->vol_no == vol_no)
174 			break;
175 	}
176 	if (vol == NULL)
177 		return(NULL);
178 	++vol->cache.refs;
179 	/* not added to or removed from hammer cache */
180 	return(vol);
181 }
182 
183 struct volume_info *
184 get_volume(int32_t vol_no)
185 {
186 	struct volume_info *vol;
187 
188 	TAILQ_FOREACH(vol, &VolList, entry) {
189 		if (vol->vol_no == vol_no)
190 			break;
191 	}
192 	if (vol == NULL)
193 		errx(1, "get_volume: Volume %d does not exist!", vol_no);
194 	++vol->cache.refs;
195 	/* not added to or removed from hammer cache */
196 	return(vol);
197 }
198 
199 void
200 rel_volume(struct volume_info *volume)
201 {
202 	/* not added to or removed from hammer cache */
203 	--volume->cache.refs;
204 }
205 
206 /*
207  * Acquire the specified buffer.
208  */
209 struct buffer_info *
210 get_buffer(hammer_off_t buf_offset, int isnew)
211 {
212 	void *ondisk;
213 	struct buffer_info *buf;
214 	struct volume_info *volume;
215 	hammer_off_t orig_offset = buf_offset;
216 	int vol_no;
217 	int zone;
218 	int hi, n;
219 	int dora = 0;
220 
221 	zone = HAMMER_ZONE_DECODE(buf_offset);
222 	if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
223 		buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
224 	}
225 	if (buf_offset == HAMMER_OFF_BAD)
226 		return(NULL);
227 
228 	if (AssertOnFailure) {
229 		assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
230 		       HAMMER_ZONE_RAW_BUFFER);
231 	}
232 	vol_no = HAMMER_VOL_DECODE(buf_offset);
233 	volume = test_volume(vol_no);
234 	if (volume == NULL) {
235 		if (AssertOnFailure)
236 			errx(1, "get_buffer: Volume %d not found!", vol_no);
237 		return(NULL);
238 	}
239 
240 	buf_offset &= ~HAMMER_BUFMASK64;
241 
242 	hi = buffer_hash(buf_offset);
243 
244 	TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
245 		if (buf->buf_offset == buf_offset)
246 			break;
247 	}
248 	if (buf == NULL) {
249 		buf = malloc(sizeof(*buf));
250 		bzero(buf, sizeof(*buf));
251 		if (DebugOpt) {
252 			fprintf(stderr, "get_buffer %016llx %016llx\n",
253 				(long long)orig_offset, (long long)buf_offset);
254 		}
255 		buf->buf_offset = buf_offset;
256 		buf->raw_offset = volume->ondisk->vol_buf_beg +
257 				  (buf_offset & HAMMER_OFF_SHORT_MASK);
258 		buf->volume = volume;
259 		TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
260 		++volume->cache.refs;
261 		buf->cache.u.buffer = buf;
262 		hammer_cache_add(&buf->cache, ISBUFFER);
263 		dora = (isnew == 0);
264 		if (isnew < 0)
265 			buf->flags |= HAMMER_BUFINFO_READAHEAD;
266 	} else {
267 		if (isnew >= 0) {
268 			buf->flags &= ~HAMMER_BUFINFO_READAHEAD;
269 			hammer_cache_used(&buf->cache);
270 		}
271 		++buf->use_count;
272 	}
273 	++buf->cache.refs;
274 	hammer_cache_flush();
275 	if ((ondisk = buf->ondisk) == NULL) {
276 		buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
277 		if (isnew <= 0) {
278 			n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
279 				  buf->raw_offset);
280 			if (n != HAMMER_BUFSIZE) {
281 				if (AssertOnFailure)
282 				err(1, "get_buffer: %s:%016llx Read failed at "
283 				       "offset %016llx",
284 				    volume->name,
285 				    (long long)buf->buf_offset,
286 				    (long long)buf->raw_offset);
287 				bzero(ondisk, HAMMER_BUFSIZE);
288 			}
289 		}
290 	}
291 	if (isnew > 0) {
292 		bzero(ondisk, HAMMER_BUFSIZE);
293 		buf->cache.modified = 1;
294 	}
295 	if (dora)
296 		get_buffer_readahead(buf);
297 	return(buf);
298 }
299 
300 static void
301 get_buffer_readahead(struct buffer_info *base)
302 {
303 	struct buffer_info *buf;
304 	struct volume_info *vol;
305 	hammer_off_t buf_offset;
306 	int64_t raw_offset;
307 	int ri = UseReadBehind;
308 	int re = UseReadAhead;
309 	int hi;
310 
311 	raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
312 	vol = base->volume;
313 
314 	while (ri < re) {
315 		if (raw_offset >= vol->ondisk->vol_buf_end)
316 			break;
317 		if (raw_offset < vol->ondisk->vol_buf_beg) {
318 			++ri;
319 			raw_offset += HAMMER_BUFSIZE;
320 			continue;
321 		}
322 		buf_offset = HAMMER_VOL_ENCODE(vol->vol_no) |
323 			     HAMMER_ZONE_RAW_BUFFER |
324 			     (raw_offset - vol->ondisk->vol_buf_beg);
325 		hi = buffer_hash(raw_offset);
326 		TAILQ_FOREACH(buf, &vol->buffer_lists[hi], entry) {
327 			if (buf->raw_offset == raw_offset)
328 				break;
329 		}
330 		if (buf == NULL) {
331 			buf = get_buffer(buf_offset, -1);
332 			rel_buffer(buf);
333 		}
334 		++ri;
335 		raw_offset += HAMMER_BUFSIZE;
336 	}
337 }
338 
339 void
340 rel_buffer(struct buffer_info *buffer)
341 {
342 	struct volume_info *volume;
343 	int hi;
344 
345 	assert(buffer->cache.refs > 0);
346 	if (--buffer->cache.refs == 0) {
347 		if (buffer->cache.delete) {
348 			hi = buffer_hash(buffer->buf_offset);
349 			volume = buffer->volume;
350 			if (buffer->cache.modified)
351 				flush_buffer(buffer);
352 			TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
353 			hammer_cache_del(&buffer->cache);
354 			free(buffer->ondisk);
355 			free(buffer);
356 			rel_volume(volume);
357 		}
358 	}
359 }
360 
361 void *
362 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
363 		int isnew)
364 {
365 	struct buffer_info *buffer;
366 
367 	if ((buffer = *bufferp) != NULL) {
368 		if (isnew > 0 ||
369 		    ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
370 			rel_buffer(buffer);
371 			buffer = *bufferp = NULL;
372 		}
373 	}
374 	if (buffer == NULL)
375 		buffer = *bufferp = get_buffer(buf_offset, isnew);
376 	if (buffer == NULL)
377 		return (NULL);
378 	return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
379 }
380 
381 /*
382  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
383  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
384  */
385 hammer_node_ondisk_t
386 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
387 {
388 	struct buffer_info *buf;
389 
390 	if (*bufp)
391 		rel_buffer(*bufp);
392 	*bufp = buf = get_buffer(node_offset, 0);
393 	if (buf) {
394 		return((void *)((char *)buf->ondisk +
395 				(int32_t)(node_offset & HAMMER_BUFMASK)));
396 	} else {
397 		return(NULL);
398 	}
399 }
400 
401 /*
402  * Allocate HAMMER elements - btree nodes, data storage, and record elements
403  *
404  * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
405  * item and zero's out the remainder, so don't bzero() it.
406  */
407 void *
408 alloc_btree_element(hammer_off_t *offp)
409 {
410 	struct buffer_info *buffer = NULL;
411 	hammer_node_ondisk_t node;
412 
413 	node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
414 			      offp, &buffer);
415 	bzero(node, sizeof(*node));
416 	/* XXX buffer not released, pointer remains valid */
417 	return(node);
418 }
419 
420 void *
421 alloc_data_element(hammer_off_t *offp, int32_t data_len,
422 		   struct buffer_info **data_bufferp)
423 {
424 	void *data;
425 
426 	if (data_len >= HAMMER_BUFSIZE) {
427 		assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
428 		data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
429 				      offp, data_bufferp);
430 		bzero(data, data_len);
431 	} else if (data_len) {
432 		data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
433 				      offp, data_bufferp);
434 		bzero(data, data_len);
435 	} else {
436 		data = NULL;
437 	}
438 	return (data);
439 }
440 
441 /*
442  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
443  * code will load each volume's freemap.
444  */
445 void
446 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
447 {
448 	struct buffer_info *buffer = NULL;
449 	hammer_off_t layer1_offset;
450 	struct hammer_blockmap_layer1 *layer1;
451 	int i, isnew;
452 
453 	layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
454 	for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
455 		isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
456 		layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
457 					 &buffer, isnew);
458 		bzero(layer1, sizeof(*layer1));
459 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
460 		layer1->blocks_free = 0;
461 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
462 	}
463 	rel_buffer(buffer);
464 
465 	blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
466 	blockmap->phys_offset = layer1_offset;
467 	blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
468 	blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
469 	blockmap->reserved01 = 0;
470 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
471 	root_vol->cache.modified = 1;
472 }
473 
474 /*
475  * Load the volume's remaining free space into the freemap.
476  *
477  * Returns the number of bigblocks available.
478  */
479 int64_t
480 initialize_freemap(struct volume_info *vol)
481 {
482 	struct volume_info *root_vol;
483 	struct buffer_info *buffer1 = NULL;
484 	struct buffer_info *buffer2 = NULL;
485 	struct hammer_blockmap_layer1 *layer1;
486 	struct hammer_blockmap_layer2 *layer2;
487 	hammer_off_t layer1_base;
488 	hammer_off_t layer1_offset;
489 	hammer_off_t layer2_offset;
490 	hammer_off_t phys_offset;
491 	hammer_off_t aligned_vol_free_end;
492 	int64_t count = 0;
493 	int modified1 = 0;
494 
495 	root_vol = get_volume(RootVolNo);
496 	aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
497 				& ~HAMMER_BLOCKMAP_LAYER2_MASK;
498 
499 	printf("initialize freemap volume %d\n", vol->vol_no);
500 
501 	/*
502 	 * Initialize the freemap.  First preallocate the bigblocks required
503 	 * to implement layer2.   This preallocation is a bootstrap allocation
504 	 * using blocks from the target volume.
505 	 */
506 	layer1_base = root_vol->ondisk->vol0_blockmap[
507 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
508 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
509 	     phys_offset < aligned_vol_free_end;
510 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
511 		layer1_offset = layer1_base +
512 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
513 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
514 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
515 			layer1->phys_offset = alloc_bigblock(vol,
516 						HAMMER_ZONE_FREEMAP_INDEX);
517 			layer1->blocks_free = 0;
518 			buffer1->cache.modified = 1;
519 			layer1->layer1_crc = crc32(layer1,
520 						   HAMMER_LAYER1_CRCSIZE);
521 		}
522 	}
523 
524 	/*
525 	 * Now fill everything in.
526 	 */
527 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
528 	     phys_offset < aligned_vol_free_end;
529 	     phys_offset += HAMMER_LARGEBLOCK_SIZE) {
530 		modified1 = 0;
531 		layer1_offset = layer1_base +
532 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
533 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
534 
535 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
536 		layer2_offset = layer1->phys_offset +
537 				HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
538 
539 		layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
540 		bzero(layer2, sizeof(*layer2));
541 		if (phys_offset < vol->vol_free_off) {
542 			/*
543 			 * Fixups XXX - bigblocks already allocated as part
544 			 * of the freemap bootstrap.
545 			 */
546 			if (layer2->zone == 0) {
547 				layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
548 				layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
549 				layer2->bytes_free = 0;
550 			}
551 		} else if (phys_offset < vol->vol_free_end) {
552 			++layer1->blocks_free;
553 			buffer1->cache.modified = 1;
554 			layer2->zone = 0;
555 			layer2->append_off = 0;
556 			layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
557 			++count;
558 			modified1 = 1;
559 		} else {
560 			layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
561 			layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
562 			layer2->bytes_free = 0;
563 		}
564 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
565 		buffer2->cache.modified = 1;
566 
567 		/*
568 		 * Finish-up layer 1
569 		 */
570 		if (modified1) {
571 			layer1->layer1_crc = crc32(layer1,
572 						   HAMMER_LAYER1_CRCSIZE);
573 			buffer1->cache.modified = 1;
574 		}
575 	}
576 	rel_buffer(buffer1);
577 	rel_buffer(buffer2);
578 	rel_volume(root_vol);
579 	return(count);
580 }
581 
582 /*
583  * Allocate big-blocks using our poor-man's volume->vol_free_off.
584  *
585  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
586  * itself and cannot update it yet.
587  */
588 hammer_off_t
589 alloc_bigblock(struct volume_info *volume, int zone)
590 {
591 	struct buffer_info *buffer = NULL;
592 	struct volume_info *root_vol;
593 	hammer_off_t result_offset;
594 	hammer_off_t layer_offset;
595 	struct hammer_blockmap_layer1 *layer1;
596 	struct hammer_blockmap_layer2 *layer2;
597 	int didget;
598 
599 	if (volume == NULL) {
600 		volume = get_volume(RootVolNo);
601 		didget = 1;
602 	} else {
603 		didget = 0;
604 	}
605 	result_offset = volume->vol_free_off;
606 	if (result_offset >= volume->vol_free_end)
607 		panic("alloc_bigblock: Ran out of room, filesystem too small");
608 	volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
609 
610 	/*
611 	 * Update the freemap.
612 	 */
613 	if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
614 		root_vol = get_volume(RootVolNo);
615 		layer_offset = root_vol->ondisk->vol0_blockmap[
616 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
617 		layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
618 		layer1 = get_buffer_data(layer_offset, &buffer, 0);
619 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
620 		--layer1->blocks_free;
621 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
622 		buffer->cache.modified = 1;
623 		layer_offset = layer1->phys_offset +
624 			       HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
625 		layer2 = get_buffer_data(layer_offset, &buffer, 0);
626 		assert(layer2->zone == 0);
627 		layer2->zone = zone;
628 		layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
629 		layer2->bytes_free = 0;
630 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
631 		buffer->cache.modified = 1;
632 
633 		--root_vol->ondisk->vol0_stat_freebigblocks;
634 		root_vol->cache.modified = 1;
635 
636 		rel_buffer(buffer);
637 		rel_volume(root_vol);
638 	}
639 
640 	if (didget)
641 		rel_volume(volume);
642 	return(result_offset);
643 }
644 
645 /*
646  * Format the undo-map for the root volume.
647  */
648 void
649 format_undomap(hammer_volume_ondisk_t ondisk)
650 {
651 	const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
652 	hammer_off_t undo_limit;
653 	hammer_blockmap_t blockmap;
654 	struct buffer_info *buffer = NULL;
655 	hammer_off_t scan;
656 	int n;
657 	int limit_index;
658 	u_int32_t seqno;
659 
660 	/*
661 	 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
662 	 * up to HAMMER_UNDO_LAYER2 large blocks.  Size to approximately
663 	 * 0.1% of the disk.
664 	 *
665 	 * The minimum UNDO fifo size is 100MB.
666 	 */
667 	undo_limit = UndoBufferSize;
668 	if (undo_limit == 0) {
669 		undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
670 		if (undo_limit < 100*1024*1024)
671 			undo_limit = 100*1024*1024;
672 	}
673 	undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
674 		     ~HAMMER_LARGEBLOCK_MASK64;
675 	if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
676 		undo_limit = HAMMER_LARGEBLOCK_SIZE;
677 	if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
678 		undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
679 	UndoBufferSize = undo_limit;
680 
681 	blockmap = &ondisk->vol0_blockmap[undo_zone];
682 	bzero(blockmap, sizeof(*blockmap));
683 	blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
684 	blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
685 	blockmap->next_offset = blockmap->first_offset;
686 	blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
687 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
688 
689 	n = 0;
690 	scan = blockmap->next_offset;
691 	limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
692 
693 	assert(limit_index <= HAMMER_UNDO_LAYER2);
694 
695 	for (n = 0; n < limit_index; ++n) {
696 		ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
697 							HAMMER_ZONE_UNDO_INDEX);
698 		scan += HAMMER_LARGEBLOCK_SIZE;
699 	}
700 	while (n < HAMMER_UNDO_LAYER2) {
701 		ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
702 		++n;
703 	}
704 
705 	/*
706 	 * Pre-initialize the UNDO blocks (HAMMER version 4+)
707 	 */
708 	printf("initializing the undo map (%jd MB)\n",
709 		(intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
710 		(1024 * 1024));
711 
712 	scan = blockmap->first_offset;
713 	seqno = 0;
714 
715 	while (scan < blockmap->alloc_offset) {
716 		hammer_fifo_head_t head;
717 		hammer_fifo_tail_t tail;
718 		int isnew;
719 		int bytes = HAMMER_UNDO_ALIGN;
720 
721 		isnew = ((scan & HAMMER_BUFMASK64) == 0);
722 		head = get_buffer_data(scan, &buffer, isnew);
723 		buffer->cache.modified = 1;
724 		tail = (void *)((char *)head + bytes - sizeof(*tail));
725 
726 		bzero(head, bytes);
727 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
728 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
729 		head->hdr_size = bytes;
730 		head->hdr_seq = seqno++;
731 
732 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
733 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
734 		tail->tail_size = bytes;
735 
736 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
737 				crc32(head + 1, bytes - sizeof(*head));
738 
739 		scan += bytes;
740 	}
741 	if (buffer)
742 		rel_buffer(buffer);
743 }
744 
745 /*
746  * Format a new blockmap.  This is mostly a degenerate case because
747  * all allocations are now actually done from the freemap.
748  */
749 void
750 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
751 {
752 	blockmap->phys_offset = 0;
753 	blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
754 				 HAMMER_SHORT_OFF_ENCODE(-1);
755 	blockmap->first_offset = zone_base;
756 	blockmap->next_offset = zone_base;
757 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
758 }
759 
760 /*
761  * Allocate a chunk of data out of a blockmap.  This is a simplified
762  * version which uses next_offset as a simple allocation iterator.
763  */
764 static
765 void *
766 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
767 	       struct buffer_info **bufferp)
768 {
769 	struct buffer_info *buffer1 = NULL;
770 	struct buffer_info *buffer2 = NULL;
771 	struct volume_info *volume;
772 	hammer_blockmap_t blockmap;
773 	hammer_blockmap_t freemap;
774 	struct hammer_blockmap_layer1 *layer1;
775 	struct hammer_blockmap_layer2 *layer2;
776 	hammer_off_t layer1_offset;
777 	hammer_off_t layer2_offset;
778 	hammer_off_t zone2_offset;
779 	void *ptr;
780 
781 	volume = get_volume(RootVolNo);
782 
783 	blockmap = &volume->ondisk->vol0_blockmap[zone];
784 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
785 
786 	/*
787 	 * Alignment and buffer-boundary issues.  If the allocation would
788 	 * cross a buffer boundary we have to skip to the next buffer.
789 	 */
790 	bytes = (bytes + 15) & ~15;
791 
792 again:
793 	if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
794 	    ~HAMMER_BUFMASK64) {
795 		volume->cache.modified = 1;
796 		blockmap->next_offset = (blockmap->next_offset + bytes) &
797 				        ~HAMMER_BUFMASK64;
798 	}
799 
800 	/*
801 	 * Dive layer 1.  For now we can't allocate data outside of volume 0.
802 	 */
803 	layer1_offset = freemap->phys_offset +
804 			HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
805 
806 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
807 
808 	if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
809 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
810 		exit(1);
811 	}
812 
813 	/*
814 	 * Dive layer 2
815 	 */
816 	layer2_offset = layer1->phys_offset +
817 			HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
818 
819 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
820 
821 	if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
822 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
823 		exit(1);
824 	}
825 
826 	/*
827 	 * If we are entering a new bigblock assign ownership to our
828 	 * zone.  If the bigblock is owned by another zone skip it.
829 	 */
830 	if (layer2->zone == 0) {
831 		--layer1->blocks_free;
832 		layer2->zone = zone;
833 		assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
834 		assert(layer2->append_off == 0);
835 	}
836 	if (layer2->zone != zone) {
837 		blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) &
838 					~HAMMER_LARGEBLOCK_MASK64;
839 		goto again;
840 	}
841 
842 	buffer1->cache.modified = 1;
843 	buffer2->cache.modified = 1;
844 	volume->cache.modified = 1;
845 	assert(layer2->append_off ==
846 	       (blockmap->next_offset & HAMMER_LARGEBLOCK_MASK));
847 	layer2->bytes_free -= bytes;
848 	*result_offp = blockmap->next_offset;
849 	blockmap->next_offset += bytes;
850 	layer2->append_off = (int)blockmap->next_offset &
851 			      HAMMER_LARGEBLOCK_MASK;
852 
853 	layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
854 	layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
855 
856 	zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
857 			HAMMER_ZONE_ENCODE(zone, 0);
858 
859 	ptr = get_buffer_data(zone2_offset, bufferp, 0);
860 	(*bufferp)->cache.modified = 1;
861 
862 	if (buffer1)
863 		rel_buffer(buffer1);
864 	if (buffer2)
865 		rel_buffer(buffer2);
866 
867 	rel_volume(volume);
868 	return(ptr);
869 }
870 
871 /*
872  * Flush various tracking structures to disk
873  */
874 
875 /*
876  * Flush various tracking structures to disk
877  */
878 void
879 flush_all_volumes(void)
880 {
881 	struct volume_info *vol;
882 
883 	TAILQ_FOREACH(vol, &VolList, entry)
884 		flush_volume(vol);
885 }
886 
887 void
888 flush_volume(struct volume_info *volume)
889 {
890 	struct buffer_info *buffer;
891 	int i;
892 
893 	for (i = 0; i < HAMMER_BUFLISTS; ++i) {
894 		TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
895 			flush_buffer(buffer);
896 	}
897 	writehammerbuf(volume, volume->ondisk, 0);
898 	volume->cache.modified = 0;
899 }
900 
901 void
902 flush_buffer(struct buffer_info *buffer)
903 {
904 	writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
905 	buffer->cache.modified = 0;
906 }
907 
908 #if 0
909 /*
910  * Generic buffer initialization
911  */
912 static void
913 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
914 {
915 	head->hdr_signature = HAMMER_HEAD_SIGNATURE;
916 	head->hdr_type = hdr_type;
917 	head->hdr_size = 0;
918 	head->hdr_crc = 0;
919 	head->hdr_seq = 0;
920 }
921 
922 #endif
923 
924 #if 0
925 /*
926  * Core I/O operations
927  */
928 static void
929 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
930 {
931 	ssize_t n;
932 
933 	n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
934 	if (n != HAMMER_BUFSIZE)
935 		err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
936 }
937 
938 #endif
939 
940 static void
941 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
942 {
943 	ssize_t n;
944 
945 	n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
946 	if (n != HAMMER_BUFSIZE)
947 		err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
948 }
949 
950 void
951 panic(const char *ctl, ...)
952 {
953 	va_list va;
954 
955 	va_start(va, ctl);
956 	vfprintf(stderr, ctl, va);
957 	va_end(va);
958 	fprintf(stderr, "\n");
959 	exit(1);
960 }
961 
962