xref: /dragonfly/sbin/hammer/ondisk.c (revision 25a2db75)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <assert.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <stdarg.h>
40 #include <string.h>
41 #include <unistd.h>
42 #include <stddef.h>
43 #include <err.h>
44 #include <fcntl.h>
45 #include "hammer_util.h"
46 
47 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
48 			struct buffer_info **bufferp);
49 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
50 static void get_buffer_readahead(struct buffer_info *base);
51 #if 0
52 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
53 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
54 			struct buffer_info **bufp, u_int16_t hdr_type);
55 static void readhammerbuf(struct volume_info *vol, void *data,
56 			int64_t offset);
57 #endif
58 static void writehammerbuf(struct volume_info *vol, const void *data,
59 			int64_t offset);
60 
61 int DebugOpt;
62 
63 uuid_t Hammer_FSType;
64 uuid_t Hammer_FSId;
65 int64_t BootAreaSize;
66 int64_t MemAreaSize;
67 int64_t UndoBufferSize;
68 int     UsingSuperClusters;
69 int     NumVolumes;
70 int	RootVolNo = -1;
71 int	UseReadBehind = -4;
72 int	UseReadAhead = 4;
73 int	AssertOnFailure = 1;
74 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
75 
76 static __inline
77 int
78 buffer_hash(hammer_off_t buf_offset)
79 {
80 	int hi;
81 
82 	hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
83 	return(hi);
84 }
85 
86 /*
87  * Lookup the requested information structure and related on-disk buffer.
88  * Missing structures are created.
89  */
90 struct volume_info *
91 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
92 {
93 	struct volume_info *vol;
94 	struct volume_info *scan;
95 	struct hammer_volume_ondisk *ondisk;
96 	int i, n;
97 
98 	/*
99 	 * Allocate the volume structure
100 	 */
101 	vol = malloc(sizeof(*vol));
102 	bzero(vol, sizeof(*vol));
103 	for (i = 0; i < HAMMER_BUFLISTS; ++i)
104 		TAILQ_INIT(&vol->buffer_lists[i]);
105 	vol->name = strdup(filename);
106 	vol->fd = open(filename, oflags);
107 	if (vol->fd < 0) {
108 		free(vol->name);
109 		free(vol);
110 		err(1, "setup_volume: %s: Open failed", filename);
111 	}
112 
113 	/*
114 	 * Read or initialize the volume header
115 	 */
116 	vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
117 	if (isnew > 0) {
118 		bzero(ondisk, HAMMER_BUFSIZE);
119 	} else {
120 		n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
121 		if (n != HAMMER_BUFSIZE) {
122 			err(1, "setup_volume: %s: Read failed at offset 0",
123 			    filename);
124 		}
125 		vol_no = ondisk->vol_no;
126 		if (RootVolNo < 0) {
127 			RootVolNo = ondisk->vol_rootvol;
128 		} else if (RootVolNo != (int)ondisk->vol_rootvol) {
129 			errx(1, "setup_volume: %s: root volume disagreement: "
130 				"%d vs %d",
131 				vol->name, RootVolNo, ondisk->vol_rootvol);
132 		}
133 
134 		if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
135 			errx(1, "setup_volume: %s: Header does not indicate "
136 				"that this is a hammer volume", vol->name);
137 		}
138 		if (TAILQ_EMPTY(&VolList)) {
139 			Hammer_FSId = vol->ondisk->vol_fsid;
140 		} else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
141 			errx(1, "setup_volume: %s: FSId does match other "
142 				"volumes!", vol->name);
143 		}
144 	}
145 	vol->vol_no = vol_no;
146 
147 	if (isnew > 0) {
148 		/*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
149 		vol->cache.modified = 1;
150         }
151 
152 	/*
153 	 * Link the volume structure in
154 	 */
155 	TAILQ_FOREACH(scan, &VolList, entry) {
156 		if (scan->vol_no == vol_no) {
157 			errx(1, "setup_volume %s: Duplicate volume number %d "
158 				"against %s", filename, vol_no, scan->name);
159 		}
160 	}
161 	TAILQ_INSERT_TAIL(&VolList, vol, entry);
162 	return(vol);
163 }
164 
165 struct volume_info *
166 test_volume(int32_t vol_no)
167 {
168 	struct volume_info *vol;
169 
170 	TAILQ_FOREACH(vol, &VolList, entry) {
171 		if (vol->vol_no == vol_no)
172 			break;
173 	}
174 	if (vol == NULL)
175 		return(NULL);
176 	++vol->cache.refs;
177 	/* not added to or removed from hammer cache */
178 	return(vol);
179 }
180 
181 struct volume_info *
182 get_volume(int32_t vol_no)
183 {
184 	struct volume_info *vol;
185 
186 	TAILQ_FOREACH(vol, &VolList, entry) {
187 		if (vol->vol_no == vol_no)
188 			break;
189 	}
190 	if (vol == NULL)
191 		errx(1, "get_volume: Volume %d does not exist!", vol_no);
192 	++vol->cache.refs;
193 	/* not added to or removed from hammer cache */
194 	return(vol);
195 }
196 
197 void
198 rel_volume(struct volume_info *volume)
199 {
200 	/* not added to or removed from hammer cache */
201 	--volume->cache.refs;
202 }
203 
204 /*
205  * Acquire the specified buffer.
206  */
207 struct buffer_info *
208 get_buffer(hammer_off_t buf_offset, int isnew)
209 {
210 	void *ondisk;
211 	struct buffer_info *buf;
212 	struct volume_info *volume;
213 	hammer_off_t orig_offset = buf_offset;
214 	int vol_no;
215 	int zone;
216 	int hi, n;
217 	int dora = 0;
218 
219 	zone = HAMMER_ZONE_DECODE(buf_offset);
220 	if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
221 		buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
222 	}
223 	if (buf_offset == HAMMER_OFF_BAD)
224 		return(NULL);
225 
226 	if (AssertOnFailure) {
227 		assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
228 		       HAMMER_ZONE_RAW_BUFFER);
229 	}
230 	vol_no = HAMMER_VOL_DECODE(buf_offset);
231 	volume = test_volume(vol_no);
232 	if (volume == NULL) {
233 		if (AssertOnFailure)
234 			errx(1, "get_buffer: Volume %d not found!", vol_no);
235 		return(NULL);
236 	}
237 
238 	buf_offset &= ~HAMMER_BUFMASK64;
239 
240 	hi = buffer_hash(buf_offset);
241 
242 	TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
243 		if (buf->buf_offset == buf_offset)
244 			break;
245 	}
246 	if (buf == NULL) {
247 		buf = malloc(sizeof(*buf));
248 		bzero(buf, sizeof(*buf));
249 		if (DebugOpt) {
250 			fprintf(stderr, "get_buffer %016llx %016llx\n",
251 				(long long)orig_offset, (long long)buf_offset);
252 		}
253 		buf->buf_offset = buf_offset;
254 		buf->raw_offset = volume->ondisk->vol_buf_beg +
255 				  (buf_offset & HAMMER_OFF_SHORT_MASK);
256 		buf->volume = volume;
257 		TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
258 		++volume->cache.refs;
259 		buf->cache.u.buffer = buf;
260 		hammer_cache_add(&buf->cache, ISBUFFER);
261 		dora = (isnew == 0);
262 		if (isnew < 0)
263 			buf->flags |= HAMMER_BUFINFO_READAHEAD;
264 	} else {
265 		if (isnew >= 0) {
266 			buf->flags &= ~HAMMER_BUFINFO_READAHEAD;
267 			hammer_cache_used(&buf->cache);
268 		}
269 		++buf->use_count;
270 	}
271 	++buf->cache.refs;
272 	hammer_cache_flush();
273 	if ((ondisk = buf->ondisk) == NULL) {
274 		buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
275 		if (isnew <= 0) {
276 			n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
277 				  buf->raw_offset);
278 			if (n != HAMMER_BUFSIZE) {
279 				if (AssertOnFailure)
280 					err(1, "get_buffer: %s:%016llx "
281 					    "Read failed at offset %016llx",
282 					    volume->name,
283 					    (long long)buf->buf_offset,
284 					    (long long)buf->raw_offset);
285 				bzero(ondisk, HAMMER_BUFSIZE);
286 			}
287 		}
288 	}
289 	if (isnew > 0) {
290 		bzero(ondisk, HAMMER_BUFSIZE);
291 		buf->cache.modified = 1;
292 	}
293 	if (dora)
294 		get_buffer_readahead(buf);
295 	return(buf);
296 }
297 
298 static void
299 get_buffer_readahead(struct buffer_info *base)
300 {
301 	struct buffer_info *buf;
302 	struct volume_info *vol;
303 	hammer_off_t buf_offset;
304 	int64_t raw_offset;
305 	int ri = UseReadBehind;
306 	int re = UseReadAhead;
307 	int hi;
308 
309 	raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
310 	vol = base->volume;
311 
312 	while (ri < re) {
313 		if (raw_offset >= vol->ondisk->vol_buf_end)
314 			break;
315 		if (raw_offset < vol->ondisk->vol_buf_beg) {
316 			++ri;
317 			raw_offset += HAMMER_BUFSIZE;
318 			continue;
319 		}
320 		buf_offset = HAMMER_VOL_ENCODE(vol->vol_no) |
321 			     HAMMER_ZONE_RAW_BUFFER |
322 			     (raw_offset - vol->ondisk->vol_buf_beg);
323 		hi = buffer_hash(raw_offset);
324 		TAILQ_FOREACH(buf, &vol->buffer_lists[hi], entry) {
325 			if (buf->raw_offset == raw_offset)
326 				break;
327 		}
328 		if (buf == NULL) {
329 			buf = get_buffer(buf_offset, -1);
330 			rel_buffer(buf);
331 		}
332 		++ri;
333 		raw_offset += HAMMER_BUFSIZE;
334 	}
335 }
336 
337 void
338 rel_buffer(struct buffer_info *buffer)
339 {
340 	struct volume_info *volume;
341 	int hi;
342 
343 	assert(buffer->cache.refs > 0);
344 	if (--buffer->cache.refs == 0) {
345 		if (buffer->cache.delete) {
346 			hi = buffer_hash(buffer->buf_offset);
347 			volume = buffer->volume;
348 			if (buffer->cache.modified)
349 				flush_buffer(buffer);
350 			TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
351 			hammer_cache_del(&buffer->cache);
352 			free(buffer->ondisk);
353 			free(buffer);
354 			rel_volume(volume);
355 		}
356 	}
357 }
358 
359 void *
360 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
361 		int isnew)
362 {
363 	struct buffer_info *buffer;
364 
365 	if ((buffer = *bufferp) != NULL) {
366 		if (isnew > 0 ||
367 		    ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
368 			rel_buffer(buffer);
369 			buffer = *bufferp = NULL;
370 		}
371 	}
372 	if (buffer == NULL)
373 		buffer = *bufferp = get_buffer(buf_offset, isnew);
374 	if (buffer == NULL)
375 		return (NULL);
376 	return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
377 }
378 
379 /*
380  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
381  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
382  */
383 hammer_node_ondisk_t
384 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
385 {
386 	struct buffer_info *buf;
387 
388 	if (*bufp)
389 		rel_buffer(*bufp);
390 	*bufp = buf = get_buffer(node_offset, 0);
391 	if (buf) {
392 		return((void *)((char *)buf->ondisk +
393 				(int32_t)(node_offset & HAMMER_BUFMASK)));
394 	} else {
395 		return(NULL);
396 	}
397 }
398 
399 /*
400  * Allocate HAMMER elements - btree nodes, data storage, and record elements
401  *
402  * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
403  * item and zero's out the remainder, so don't bzero() it.
404  */
405 void *
406 alloc_btree_element(hammer_off_t *offp)
407 {
408 	struct buffer_info *buffer = NULL;
409 	hammer_node_ondisk_t node;
410 
411 	node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
412 			      offp, &buffer);
413 	bzero(node, sizeof(*node));
414 	/* XXX buffer not released, pointer remains valid */
415 	return(node);
416 }
417 
418 void *
419 alloc_data_element(hammer_off_t *offp, int32_t data_len,
420 		   struct buffer_info **data_bufferp)
421 {
422 	void *data;
423 
424 	if (data_len >= HAMMER_BUFSIZE) {
425 		assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
426 		data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
427 				      offp, data_bufferp);
428 		bzero(data, data_len);
429 	} else if (data_len) {
430 		data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
431 				      offp, data_bufferp);
432 		bzero(data, data_len);
433 	} else {
434 		data = NULL;
435 	}
436 	return (data);
437 }
438 
439 /*
440  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
441  * code will load each volume's freemap.
442  */
443 void
444 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
445 {
446 	struct buffer_info *buffer = NULL;
447 	hammer_off_t layer1_offset;
448 	struct hammer_blockmap_layer1 *layer1;
449 	int i, isnew;
450 
451 	layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
452 	for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
453 		isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
454 		layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
455 					 &buffer, isnew);
456 		bzero(layer1, sizeof(*layer1));
457 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
458 		layer1->blocks_free = 0;
459 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
460 	}
461 	rel_buffer(buffer);
462 
463 	blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
464 	blockmap->phys_offset = layer1_offset;
465 	blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
466 	blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
467 	blockmap->reserved01 = 0;
468 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
469 	root_vol->cache.modified = 1;
470 }
471 
472 /*
473  * Load the volume's remaining free space into the freemap.
474  *
475  * Returns the number of bigblocks available.
476  */
477 int64_t
478 initialize_freemap(struct volume_info *vol)
479 {
480 	struct volume_info *root_vol;
481 	struct buffer_info *buffer1 = NULL;
482 	struct buffer_info *buffer2 = NULL;
483 	struct hammer_blockmap_layer1 *layer1;
484 	struct hammer_blockmap_layer2 *layer2;
485 	hammer_off_t layer1_base;
486 	hammer_off_t layer1_offset;
487 	hammer_off_t layer2_offset;
488 	hammer_off_t phys_offset;
489 	hammer_off_t aligned_vol_free_end;
490 	int64_t count = 0;
491 	int modified1 = 0;
492 
493 	root_vol = get_volume(RootVolNo);
494 	aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
495 				& ~HAMMER_BLOCKMAP_LAYER2_MASK;
496 
497 	printf("initialize freemap volume %d\n", vol->vol_no);
498 
499 	/*
500 	 * Initialize the freemap.  First preallocate the bigblocks required
501 	 * to implement layer2.   This preallocation is a bootstrap allocation
502 	 * using blocks from the target volume.
503 	 */
504 	layer1_base = root_vol->ondisk->vol0_blockmap[
505 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
506 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
507 	     phys_offset < aligned_vol_free_end;
508 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
509 		layer1_offset = layer1_base +
510 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
511 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
512 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
513 			layer1->phys_offset = alloc_bigblock(vol,
514 						HAMMER_ZONE_FREEMAP_INDEX);
515 			layer1->blocks_free = 0;
516 			buffer1->cache.modified = 1;
517 			layer1->layer1_crc = crc32(layer1,
518 						   HAMMER_LAYER1_CRCSIZE);
519 		}
520 	}
521 
522 	/*
523 	 * Now fill everything in.
524 	 */
525 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
526 	     phys_offset < aligned_vol_free_end;
527 	     phys_offset += HAMMER_LARGEBLOCK_SIZE) {
528 		modified1 = 0;
529 		layer1_offset = layer1_base +
530 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
531 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
532 
533 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
534 		layer2_offset = layer1->phys_offset +
535 				HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
536 
537 		layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
538 		bzero(layer2, sizeof(*layer2));
539 		if (phys_offset < vol->vol_free_off) {
540 			/*
541 			 * Fixups XXX - bigblocks already allocated as part
542 			 * of the freemap bootstrap.
543 			 */
544 			if (layer2->zone == 0) {
545 				layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
546 				layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
547 				layer2->bytes_free = 0;
548 			}
549 		} else if (phys_offset < vol->vol_free_end) {
550 			++layer1->blocks_free;
551 			buffer1->cache.modified = 1;
552 			layer2->zone = 0;
553 			layer2->append_off = 0;
554 			layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
555 			++count;
556 			modified1 = 1;
557 		} else {
558 			layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
559 			layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
560 			layer2->bytes_free = 0;
561 		}
562 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
563 		buffer2->cache.modified = 1;
564 
565 		/*
566 		 * Finish-up layer 1
567 		 */
568 		if (modified1) {
569 			layer1->layer1_crc = crc32(layer1,
570 						   HAMMER_LAYER1_CRCSIZE);
571 			buffer1->cache.modified = 1;
572 		}
573 	}
574 	rel_buffer(buffer1);
575 	rel_buffer(buffer2);
576 	rel_volume(root_vol);
577 	return(count);
578 }
579 
580 /*
581  * Allocate big-blocks using our poor-man's volume->vol_free_off.
582  *
583  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
584  * itself and cannot update it yet.
585  */
586 hammer_off_t
587 alloc_bigblock(struct volume_info *volume, int zone)
588 {
589 	struct buffer_info *buffer = NULL;
590 	struct volume_info *root_vol;
591 	hammer_off_t result_offset;
592 	hammer_off_t layer_offset;
593 	struct hammer_blockmap_layer1 *layer1;
594 	struct hammer_blockmap_layer2 *layer2;
595 	int didget;
596 
597 	if (volume == NULL) {
598 		volume = get_volume(RootVolNo);
599 		didget = 1;
600 	} else {
601 		didget = 0;
602 	}
603 	result_offset = volume->vol_free_off;
604 	if (result_offset >= volume->vol_free_end)
605 		panic("alloc_bigblock: Ran out of room, filesystem too small");
606 	volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
607 
608 	/*
609 	 * Update the freemap.
610 	 */
611 	if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
612 		root_vol = get_volume(RootVolNo);
613 		layer_offset = root_vol->ondisk->vol0_blockmap[
614 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
615 		layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
616 		layer1 = get_buffer_data(layer_offset, &buffer, 0);
617 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
618 		--layer1->blocks_free;
619 		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
620 		buffer->cache.modified = 1;
621 		layer_offset = layer1->phys_offset +
622 			       HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
623 		layer2 = get_buffer_data(layer_offset, &buffer, 0);
624 		assert(layer2->zone == 0);
625 		layer2->zone = zone;
626 		layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
627 		layer2->bytes_free = 0;
628 		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
629 		buffer->cache.modified = 1;
630 
631 		--root_vol->ondisk->vol0_stat_freebigblocks;
632 		root_vol->cache.modified = 1;
633 
634 		rel_buffer(buffer);
635 		rel_volume(root_vol);
636 	}
637 
638 	if (didget)
639 		rel_volume(volume);
640 	return(result_offset);
641 }
642 
643 /*
644  * Format the undo-map for the root volume.
645  */
646 void
647 format_undomap(hammer_volume_ondisk_t ondisk)
648 {
649 	const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
650 	hammer_off_t undo_limit;
651 	hammer_blockmap_t blockmap;
652 	struct buffer_info *buffer = NULL;
653 	hammer_off_t scan;
654 	int n;
655 	int limit_index;
656 	u_int32_t seqno;
657 
658 	/*
659 	 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
660 	 * up to HAMMER_UNDO_LAYER2 large blocks.  Size to approximately
661 	 * 0.1% of the disk.
662 	 *
663 	 * The minimum UNDO fifo size is 500MB, or approximately 1% of
664 	 * the recommended 50G disk.
665 	 *
666 	 * Changing this minimum is rather dangerous as complex filesystem
667 	 * operations can cause the UNDO FIFO to fill up otherwise.
668 	 */
669 	undo_limit = UndoBufferSize;
670 	if (undo_limit == 0) {
671 		undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
672 		if (undo_limit < 500*1024*1024)
673 			undo_limit = 500*1024*1024;
674 	}
675 	undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
676 		     ~HAMMER_LARGEBLOCK_MASK64;
677 	if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
678 		undo_limit = HAMMER_LARGEBLOCK_SIZE;
679 	if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
680 		undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
681 	UndoBufferSize = undo_limit;
682 
683 	blockmap = &ondisk->vol0_blockmap[undo_zone];
684 	bzero(blockmap, sizeof(*blockmap));
685 	blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
686 	blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
687 	blockmap->next_offset = blockmap->first_offset;
688 	blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
689 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
690 
691 	n = 0;
692 	scan = blockmap->next_offset;
693 	limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
694 
695 	assert(limit_index <= HAMMER_UNDO_LAYER2);
696 
697 	for (n = 0; n < limit_index; ++n) {
698 		ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
699 							HAMMER_ZONE_UNDO_INDEX);
700 		scan += HAMMER_LARGEBLOCK_SIZE;
701 	}
702 	while (n < HAMMER_UNDO_LAYER2) {
703 		ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
704 		++n;
705 	}
706 
707 	/*
708 	 * Pre-initialize the UNDO blocks (HAMMER version 4+)
709 	 */
710 	printf("initializing the undo map (%jd MB)\n",
711 		(intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
712 		(1024 * 1024));
713 
714 	scan = blockmap->first_offset;
715 	seqno = 0;
716 
717 	while (scan < blockmap->alloc_offset) {
718 		hammer_fifo_head_t head;
719 		hammer_fifo_tail_t tail;
720 		int isnew;
721 		int bytes = HAMMER_UNDO_ALIGN;
722 
723 		isnew = ((scan & HAMMER_BUFMASK64) == 0);
724 		head = get_buffer_data(scan, &buffer, isnew);
725 		buffer->cache.modified = 1;
726 		tail = (void *)((char *)head + bytes - sizeof(*tail));
727 
728 		bzero(head, bytes);
729 		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
730 		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
731 		head->hdr_size = bytes;
732 		head->hdr_seq = seqno++;
733 
734 		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
735 		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
736 		tail->tail_size = bytes;
737 
738 		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
739 				crc32(head + 1, bytes - sizeof(*head));
740 
741 		scan += bytes;
742 	}
743 	if (buffer)
744 		rel_buffer(buffer);
745 }
746 
747 /*
748  * Format a new blockmap.  This is mostly a degenerate case because
749  * all allocations are now actually done from the freemap.
750  */
751 void
752 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
753 {
754 	blockmap->phys_offset = 0;
755 	blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
756 				 HAMMER_SHORT_OFF_ENCODE(-1);
757 	blockmap->first_offset = zone_base;
758 	blockmap->next_offset = zone_base;
759 	blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
760 }
761 
762 /*
763  * Allocate a chunk of data out of a blockmap.  This is a simplified
764  * version which uses next_offset as a simple allocation iterator.
765  */
766 static
767 void *
768 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
769 	       struct buffer_info **bufferp)
770 {
771 	struct buffer_info *buffer1 = NULL;
772 	struct buffer_info *buffer2 = NULL;
773 	struct volume_info *volume;
774 	hammer_blockmap_t blockmap;
775 	hammer_blockmap_t freemap;
776 	struct hammer_blockmap_layer1 *layer1;
777 	struct hammer_blockmap_layer2 *layer2;
778 	hammer_off_t layer1_offset;
779 	hammer_off_t layer2_offset;
780 	hammer_off_t zone2_offset;
781 	void *ptr;
782 
783 	volume = get_volume(RootVolNo);
784 
785 	blockmap = &volume->ondisk->vol0_blockmap[zone];
786 	freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
787 
788 	/*
789 	 * Alignment and buffer-boundary issues.  If the allocation would
790 	 * cross a buffer boundary we have to skip to the next buffer.
791 	 */
792 	bytes = (bytes + 15) & ~15;
793 
794 again:
795 	if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
796 	    ~HAMMER_BUFMASK64) {
797 		volume->cache.modified = 1;
798 		blockmap->next_offset = (blockmap->next_offset + bytes) &
799 				        ~HAMMER_BUFMASK64;
800 	}
801 
802 	/*
803 	 * Dive layer 1.  For now we can't allocate data outside of volume 0.
804 	 */
805 	layer1_offset = freemap->phys_offset +
806 			HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
807 
808 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
809 
810 	if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
811 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
812 		exit(1);
813 	}
814 
815 	/*
816 	 * Dive layer 2
817 	 */
818 	layer2_offset = layer1->phys_offset +
819 			HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
820 
821 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
822 
823 	if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
824 		fprintf(stderr, "alloc_blockmap: ran out of space!\n");
825 		exit(1);
826 	}
827 
828 	/*
829 	 * If we are entering a new bigblock assign ownership to our
830 	 * zone.  If the bigblock is owned by another zone skip it.
831 	 */
832 	if (layer2->zone == 0) {
833 		--layer1->blocks_free;
834 		layer2->zone = zone;
835 		assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
836 		assert(layer2->append_off == 0);
837 	}
838 	if (layer2->zone != zone) {
839 		blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) &
840 					~HAMMER_LARGEBLOCK_MASK64;
841 		goto again;
842 	}
843 
844 	buffer1->cache.modified = 1;
845 	buffer2->cache.modified = 1;
846 	volume->cache.modified = 1;
847 	assert(layer2->append_off ==
848 	       (blockmap->next_offset & HAMMER_LARGEBLOCK_MASK));
849 	layer2->bytes_free -= bytes;
850 	*result_offp = blockmap->next_offset;
851 	blockmap->next_offset += bytes;
852 	layer2->append_off = (int)blockmap->next_offset &
853 			      HAMMER_LARGEBLOCK_MASK;
854 
855 	layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
856 	layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
857 
858 	zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
859 			HAMMER_ZONE_ENCODE(zone, 0);
860 
861 	ptr = get_buffer_data(zone2_offset, bufferp, 0);
862 	(*bufferp)->cache.modified = 1;
863 
864 	if (buffer1)
865 		rel_buffer(buffer1);
866 	if (buffer2)
867 		rel_buffer(buffer2);
868 
869 	rel_volume(volume);
870 	return(ptr);
871 }
872 
873 /*
874  * Flush various tracking structures to disk
875  */
876 
877 /*
878  * Flush various tracking structures to disk
879  */
880 void
881 flush_all_volumes(void)
882 {
883 	struct volume_info *vol;
884 
885 	TAILQ_FOREACH(vol, &VolList, entry)
886 		flush_volume(vol);
887 }
888 
889 void
890 flush_volume(struct volume_info *volume)
891 {
892 	struct buffer_info *buffer;
893 	int i;
894 
895 	for (i = 0; i < HAMMER_BUFLISTS; ++i) {
896 		TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
897 			flush_buffer(buffer);
898 	}
899 	writehammerbuf(volume, volume->ondisk, 0);
900 	volume->cache.modified = 0;
901 }
902 
903 void
904 flush_buffer(struct buffer_info *buffer)
905 {
906 	writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
907 	buffer->cache.modified = 0;
908 }
909 
910 #if 0
911 /*
912  * Generic buffer initialization
913  */
914 static void
915 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
916 {
917 	head->hdr_signature = HAMMER_HEAD_SIGNATURE;
918 	head->hdr_type = hdr_type;
919 	head->hdr_size = 0;
920 	head->hdr_crc = 0;
921 	head->hdr_seq = 0;
922 }
923 
924 #endif
925 
926 #if 0
927 /*
928  * Core I/O operations
929  */
930 static void
931 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
932 {
933 	ssize_t n;
934 
935 	n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
936 	if (n != HAMMER_BUFSIZE)
937 		err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
938 }
939 
940 #endif
941 
942 static void
943 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
944 {
945 	ssize_t n;
946 
947 	n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
948 	if (n != HAMMER_BUFSIZE)
949 		err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
950 }
951 
952 void
953 panic(const char *ctl, ...)
954 {
955 	va_list va;
956 
957 	va_start(va, ctl);
958 	vfprintf(stderr, ctl, va);
959 	va_end(va);
960 	fprintf(stderr, "\n");
961 	exit(1);
962 }
963 
964