xref: /dragonfly/sbin/hammer/ondisk.c (revision dc861544)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.14 2008/03/18 05:21:53 dillon Exp $
35  */
36 
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <err.h>
45 #include <fcntl.h>
46 #include "hammer_util.h"
47 
48 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
49 			struct buffer_info **bufferp);
50 static hammer_off_t alloc_bigblock(struct volume_info *volume,
51 			hammer_off_t owner);
52 #if 0
53 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
54 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
55 			struct buffer_info **bufp, u_int16_t hdr_type);
56 static void readhammerbuf(struct volume_info *vol, void *data,
57 			int64_t offset);
58 #endif
59 static void writehammerbuf(struct volume_info *vol, const void *data,
60 			int64_t offset);
61 
62 
63 uuid_t Hammer_FSType;
64 uuid_t Hammer_FSId;
65 int64_t BootAreaSize;
66 int64_t MemAreaSize;
67 int     UsingSuperClusters;
68 int     NumVolumes;
69 int	RootVolNo = -1;
70 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
71 
72 /*
73  * Lookup the requested information structure and related on-disk buffer.
74  * Missing structures are created.
75  */
76 struct volume_info *
77 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
78 {
79 	struct volume_info *vol;
80 	struct volume_info *scan;
81 	struct hammer_volume_ondisk *ondisk;
82 	int n;
83 
84 	/*
85 	 * Allocate the volume structure
86 	 */
87 	vol = malloc(sizeof(*vol));
88 	bzero(vol, sizeof(*vol));
89 	TAILQ_INIT(&vol->buffer_list);
90 	vol->name = strdup(filename);
91 	vol->fd = open(filename, oflags);
92 	if (vol->fd < 0) {
93 		free(vol->name);
94 		free(vol);
95 		err(1, "setup_volume: %s: Open failed", filename);
96 	}
97 
98 	/*
99 	 * Read or initialize the volume header
100 	 */
101 	vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
102 	if (isnew) {
103 		bzero(ondisk, HAMMER_BUFSIZE);
104 	} else {
105 		n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
106 		if (n != HAMMER_BUFSIZE) {
107 			err(1, "setup_volume: %s: Read failed at offset 0",
108 			    filename);
109 		}
110 		vol_no = ondisk->vol_no;
111 		if (RootVolNo < 0) {
112 			RootVolNo = ondisk->vol_rootvol;
113 		} else if (RootVolNo != (int)ondisk->vol_rootvol) {
114 			errx(1, "setup_volume: %s: root volume disagreement: "
115 				"%d vs %d",
116 				vol->name, RootVolNo, ondisk->vol_rootvol);
117 		}
118 
119 		if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
120 			errx(1, "setup_volume: %s: Header does not indicate "
121 				"that this is a hammer volume", vol->name);
122 		}
123 		if (TAILQ_EMPTY(&VolList)) {
124 			Hammer_FSId = vol->ondisk->vol_fsid;
125 		} else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
126 			errx(1, "setup_volume: %s: FSId does match other "
127 				"volumes!", vol->name);
128 		}
129 	}
130 	vol->vol_no = vol_no;
131 
132 	if (isnew) {
133 		/*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
134 		vol->cache.modified = 1;
135         }
136 
137 	/*
138 	 * Link the volume structure in
139 	 */
140 	TAILQ_FOREACH(scan, &VolList, entry) {
141 		if (scan->vol_no == vol_no) {
142 			errx(1, "setup_volume %s: Duplicate volume number %d "
143 				"against %s", filename, vol_no, scan->name);
144 		}
145 	}
146 	TAILQ_INSERT_TAIL(&VolList, vol, entry);
147 	return(vol);
148 }
149 
150 struct volume_info *
151 get_volume(int32_t vol_no)
152 {
153 	struct volume_info *vol;
154 
155 	TAILQ_FOREACH(vol, &VolList, entry) {
156 		if (vol->vol_no == vol_no)
157 			break;
158 	}
159 	if (vol == NULL)
160 		errx(1, "get_volume: Volume %d does not exist!", vol_no);
161 	++vol->cache.refs;
162 	/* not added to or removed from hammer cache */
163 	return(vol);
164 }
165 
166 void
167 rel_volume(struct volume_info *volume)
168 {
169 	/* not added to or removed from hammer cache */
170 	--volume->cache.refs;
171 }
172 
173 /*
174  * Acquire the specified buffer.
175  */
176 struct buffer_info *
177 get_buffer(hammer_off_t buf_offset, int isnew)
178 {
179 	void *ondisk;
180 	struct buffer_info *buf;
181 	struct volume_info *volume;
182 	int vol_no;
183 	int zone;
184 	int n;
185 
186 	zone = HAMMER_ZONE_DECODE(buf_offset);
187 	if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
188 		buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
189 	}
190 	assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
191 	vol_no = HAMMER_VOL_DECODE(buf_offset);
192 	volume = get_volume(vol_no);
193 	buf_offset &= ~HAMMER_BUFMASK64;
194 
195 	TAILQ_FOREACH(buf, &volume->buffer_list, entry) {
196 		if (buf->buf_offset == buf_offset)
197 			break;
198 	}
199 	if (buf == NULL) {
200 		buf = malloc(sizeof(*buf));
201 		bzero(buf, sizeof(*buf));
202 		buf->buf_offset = buf_offset;
203 		buf->buf_disk_offset = volume->ondisk->vol_buf_beg +
204 					(buf_offset & HAMMER_OFF_SHORT_MASK);
205 		buf->volume = volume;
206 		TAILQ_INSERT_TAIL(&volume->buffer_list, buf, entry);
207 		++volume->cache.refs;
208 		buf->cache.u.buffer = buf;
209 		hammer_cache_add(&buf->cache, ISBUFFER);
210 	}
211 	++buf->cache.refs;
212 	hammer_cache_flush();
213 	if ((ondisk = buf->ondisk) == NULL) {
214 		buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
215 		if (isnew == 0) {
216 			n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
217 				  buf->buf_disk_offset);
218 			if (n != HAMMER_BUFSIZE) {
219 				err(1, "get_buffer: %s:%016llx Read failed at "
220 				       "offset %lld",
221 				    volume->name, buf->buf_offset,
222 				    buf->buf_disk_offset);
223 			}
224 		}
225 	}
226 	if (isnew) {
227 		bzero(ondisk, HAMMER_BUFSIZE);
228 		buf->cache.modified = 1;
229 	}
230 	return(buf);
231 }
232 
233 void
234 rel_buffer(struct buffer_info *buffer)
235 {
236 	struct volume_info *volume;
237 
238 	assert(buffer->cache.refs > 0);
239 	if (--buffer->cache.refs == 0) {
240 		if (buffer->cache.delete) {
241 			volume = buffer->volume;
242 			if (buffer->cache.modified)
243 				flush_buffer(buffer);
244 			TAILQ_REMOVE(&volume->buffer_list, buffer, entry);
245 			hammer_cache_del(&buffer->cache);
246 			free(buffer->ondisk);
247 			free(buffer);
248 			rel_volume(volume);
249 		}
250 	}
251 }
252 
253 void *
254 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
255 		int isnew)
256 {
257 	struct buffer_info *buffer;
258 
259 	if ((buffer = *bufferp) != NULL) {
260 		if (isnew ||
261 		    ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
262 			rel_buffer(buffer);
263 			buffer = *bufferp = NULL;
264 		}
265 	}
266 	if (buffer == NULL)
267 		buffer = *bufferp = get_buffer(buf_offset, isnew);
268 	return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
269 }
270 
271 /*
272  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
273  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
274  */
275 hammer_node_ondisk_t
276 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
277 {
278 	struct buffer_info *buf;
279 
280 	if (*bufp)
281 		rel_buffer(*bufp);
282 	*bufp = buf = get_buffer(node_offset, 0);
283 	return((void *)((char *)buf->ondisk +
284 			(int32_t)(node_offset & HAMMER_BUFMASK)));
285 }
286 
287 /*
288  * Allocate HAMMER elements - btree nodes, data storage, and record elements
289  *
290  * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
291  * item and zero's out the remainder, so don't bzero() it.
292  */
293 void *
294 alloc_btree_element(hammer_off_t *offp)
295 {
296 	struct buffer_info *buffer = NULL;
297 	hammer_node_ondisk_t node;
298 
299 	node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
300 			      offp, &buffer);
301 	bzero(node, sizeof(*node));
302 	/* XXX buffer not released, pointer remains valid */
303 	return(node);
304 }
305 
306 hammer_record_ondisk_t
307 alloc_record_element(hammer_off_t *offp, int32_t data_len, void **datap)
308 {
309 	struct buffer_info *record_buffer = NULL;
310 	struct buffer_info *data_buffer = NULL;
311 	hammer_record_ondisk_t rec;
312 
313 	rec = alloc_blockmap(HAMMER_ZONE_RECORD_INDEX, sizeof(*rec),
314 			     offp, &record_buffer);
315 	bzero(rec, sizeof(*rec));
316 
317 	if (data_len >= HAMMER_BUFSIZE) {
318 		assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
319 		*datap = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
320 					&rec->base.data_off, &data_buffer);
321 		rec->base.data_len = data_len;
322 		bzero(*datap, data_len);
323 	} else if (data_len) {
324 		*datap = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
325 					&rec->base.data_off, &data_buffer);
326 		rec->base.data_len = data_len;
327 		bzero(*datap, data_len);
328 	} else {
329 		*datap = NULL;
330 	}
331 	/* XXX buf not released, ptr remains valid */
332 	return(rec);
333 }
334 
335 /*
336  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
337  * code will load each volume's freemap.
338  */
339 void
340 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
341 {
342 	struct buffer_info *buffer = NULL;
343 	hammer_off_t layer1_offset;
344 	struct hammer_blockmap_layer1 *layer1;
345 	int i, isnew;
346 
347 	layer1_offset = alloc_bigblock(root_vol, 0);
348 	for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
349 		isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
350 		layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
351 					 &buffer, isnew);
352 		bzero(layer1, sizeof(*layer1));
353 		layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
354 		layer1->layer1_crc = crc32(layer1, sizeof(*layer1));
355 	}
356 	rel_buffer(buffer);
357 
358 	blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
359 	blockmap->phys_offset = layer1_offset;
360 	blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
361 	blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
362 	blockmap->reserved01 = 0;
363 	blockmap->entry_crc = crc32(blockmap, sizeof(*blockmap));
364 	root_vol->cache.modified = 1;
365 }
366 
367 /*
368  * Load the volume's remaining free space into the freemap.  If this is
369  * the root volume, initialize the freemap owner for the layer1 bigblock.
370  *
371  * Returns the number of bigblocks available.
372  */
373 int64_t
374 initialize_freemap(struct volume_info *vol)
375 {
376 	struct volume_info *root_vol;
377 	struct buffer_info *buffer1 = NULL;
378 	struct buffer_info *buffer2 = NULL;
379 	struct hammer_blockmap_layer1 *layer1;
380 	struct hammer_blockmap_layer2 *layer2;
381 	hammer_off_t layer1_base;
382 	hammer_off_t layer1_offset;
383 	hammer_off_t layer2_offset;
384 	hammer_off_t phys_offset;
385 	hammer_off_t aligned_vol_free_end;
386 	int64_t count = 0;
387 
388 	root_vol = get_volume(RootVolNo);
389 	aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
390 				& ~HAMMER_BLOCKMAP_LAYER2_MASK;
391 
392 	printf("initialize freemap volume %d\n", vol->vol_no);
393 
394 	/*
395 	 * Initialize the freemap.  First preallocate the bigblocks required
396 	 * to implement layer2.   This preallocation is a bootstrap allocation
397 	 * using blocks from the target volume.
398 	 */
399 	layer1_base = root_vol->ondisk->vol0_blockmap[
400 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
401 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
402 	     phys_offset < aligned_vol_free_end;
403 	     phys_offset += HAMMER_BLOCKMAP_LAYER2) {
404 		layer1_offset = layer1_base +
405 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
406 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
407 		if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
408 			layer1->phys_offset = alloc_bigblock(vol, 0);
409 			layer1->blocks_free = 0;
410 			buffer1->cache.modified = 1;
411 		}
412 	}
413 
414 	/*
415 	 * Now fill everything in.
416 	 */
417 	for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
418 	     phys_offset < aligned_vol_free_end;
419 	     phys_offset += HAMMER_LARGEBLOCK_SIZE) {
420 		layer1_offset = layer1_base +
421 				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
422 		layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
423 
424 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
425 		layer2_offset = layer1->phys_offset +
426 				HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
427 
428 		layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
429 		if (phys_offset < vol->vol_free_off) {
430 			/*
431 			 * Fixups XXX - bigblocks already allocated as part
432 			 * of the freemap bootstrap.
433 			 */
434 			layer2->u.owner = HAMMER_ENCODE_FREEMAP(0, 0); /* XXX */
435 		} else if (phys_offset < vol->vol_free_end) {
436 			++layer1->blocks_free;
437 			buffer1->cache.modified = 1;
438 			layer2->u.owner = HAMMER_BLOCKMAP_FREE;
439 			++count;
440 		} else {
441 			layer2->u.owner = HAMMER_BLOCKMAP_UNAVAIL;
442 		}
443 		layer2->entry_crc = crc32(layer2, sizeof(*layer2));
444 		buffer2->cache.modified = 1;
445 
446 		/*
447 		 * Finish-up layer 1
448 		 */
449 		if (((phys_offset + HAMMER_LARGEBLOCK_SIZE) & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) {
450 			layer1->layer1_crc = crc32(layer1, sizeof(*layer1));
451 			buffer1->cache.modified = 1;
452 		}
453 	}
454 	rel_buffer(buffer1);
455 	rel_buffer(buffer2);
456 	rel_volume(root_vol);
457 	return(count);
458 }
459 
460 /*
461  * Allocate big-blocks using our poor-man's volume->vol_free_off and
462  * update the freemap if owner != 0.
463  */
464 hammer_off_t
465 alloc_bigblock(struct volume_info *volume, hammer_off_t owner)
466 {
467 	struct buffer_info *buffer = NULL;
468 	struct volume_info *root_vol;
469 	hammer_off_t result_offset;
470 	hammer_off_t layer_offset;
471 	struct hammer_blockmap_layer1 *layer1;
472 	struct hammer_blockmap_layer2 *layer2;
473 	int didget;
474 
475 	if (volume == NULL) {
476 		volume = get_volume(RootVolNo);
477 		didget = 1;
478 	} else {
479 		didget = 0;
480 	}
481 	result_offset = volume->vol_free_off;
482 	if (result_offset >= volume->vol_free_end)
483 		panic("alloc_bigblock: Ran out of room, filesystem too small");
484 	volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
485 
486 	/*
487 	 * Update the freemap
488 	 */
489 	if (owner) {
490 		root_vol = get_volume(RootVolNo);
491 		layer_offset = root_vol->ondisk->vol0_blockmap[
492 					HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
493 		layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
494 		layer1 = get_buffer_data(layer_offset, &buffer, 0);
495 		assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
496 		--layer1->blocks_free;
497 		layer1->layer1_crc = crc32(layer1, sizeof(*layer1));
498 		buffer->cache.modified = 1;
499 		layer_offset = layer1->phys_offset +
500 			       HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
501 		layer2 = get_buffer_data(layer_offset, &buffer, 0);
502 		assert(layer2->u.owner == HAMMER_BLOCKMAP_FREE);
503 		layer2->u.owner = owner;
504 		layer2->entry_crc = crc32(layer2, sizeof(*layer2));
505 		buffer->cache.modified = 1;
506 
507 		rel_buffer(buffer);
508 		rel_volume(root_vol);
509 	}
510 
511 	if (didget)
512 		rel_volume(volume);
513 	return(result_offset);
514 }
515 
516 /*
517  * Format the undo-map for the root volume.
518  */
519 void
520 format_undomap(hammer_volume_ondisk_t ondisk)
521 {
522 	const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
523 	const hammer_off_t undo_limit = HAMMER_LARGEBLOCK_SIZE; /* XXX */
524 	hammer_blockmap_t blockmap;
525 	hammer_off_t scan;
526 	struct hammer_blockmap_layer2 *layer2;
527 	int n;
528 	int limit_index;
529 
530 	blockmap = &ondisk->vol0_blockmap[undo_zone];
531 	bzero(blockmap, sizeof(*blockmap));
532 	blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
533 	blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
534 	blockmap->next_offset = blockmap->first_offset;
535 	blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
536 
537 	blockmap->entry_crc = crc32(blockmap, sizeof(*blockmap));
538 
539 	layer2 = &ondisk->vol0_undo_array[0];
540 	n = 0;
541 	scan = blockmap->next_offset;
542 	limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
543 
544 	assert(limit_index < HAMMER_UNDO_LAYER2);
545 
546 	for (n = 0; n < limit_index; ++n) {
547 		layer2->u.phys_offset = alloc_bigblock(NULL, scan);
548 		layer2->bytes_free = -1;	/* not used */
549 		layer2->entry_crc = crc32(layer2, sizeof(*layer2));
550 
551 		scan += HAMMER_LARGEBLOCK_SIZE;
552 		++layer2;
553 	}
554 	while (n < HAMMER_UNDO_LAYER2) {
555 		layer2->u.phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
556 		layer2->bytes_free = -1;
557 		layer2->entry_crc = crc32(layer2, sizeof(*layer2));
558 		++layer2;
559 		++n;
560 	}
561 }
562 
563 /*
564  * Format a new blockmap.  Set the owner to the base of the blockmap
565  * (meaning either the blockmap layer1 bigblock, layer2 bigblock, or
566  * target bigblock).
567  */
568 void
569 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_off)
570 {
571 	blockmap->phys_offset = alloc_bigblock(NULL, zone_off);
572 	blockmap->alloc_offset = zone_off;
573 	blockmap->first_offset = zone_off;
574 	blockmap->next_offset = zone_off;
575 	blockmap->entry_crc = crc32(blockmap, sizeof(*blockmap));
576 }
577 
578 static
579 void *
580 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
581 	       struct buffer_info **bufferp)
582 {
583 	struct buffer_info *buffer1 = NULL;
584 	struct buffer_info *buffer2 = NULL;
585 	struct volume_info *volume;
586 	hammer_blockmap_t rootmap;
587 	struct hammer_blockmap_layer1 *layer1;
588 	struct hammer_blockmap_layer2 *layer2;
589 	hammer_off_t layer1_offset;
590 	hammer_off_t layer2_offset;
591 	hammer_off_t bigblock_offset;
592 	void *ptr;
593 
594 	volume = get_volume(RootVolNo);
595 
596 	rootmap = &volume->ondisk->vol0_blockmap[zone];
597 
598 	/*
599 	 * Alignment and buffer-boundary issues
600 	 */
601 	bytes = (bytes + 7) & ~7;
602 	if ((rootmap->phys_offset ^ (rootmap->phys_offset + bytes - 1)) &
603 	    ~HAMMER_BUFMASK64) {
604 		volume->cache.modified = 1;
605 		rootmap->phys_offset = (rootmap->phys_offset + bytes) &
606 				       ~HAMMER_BUFMASK64;
607 	}
608 
609 	/*
610 	 * Dive layer 1
611 	 */
612 	layer1_offset = rootmap->phys_offset +
613 			HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap->alloc_offset);
614 
615 	layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
616 	if ((rootmap->alloc_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) {
617 		buffer1->cache.modified = 1;
618 		bzero(layer1, sizeof(*layer1));
619 		layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
620 		layer1->phys_offset = alloc_bigblock(NULL,
621 						     rootmap->alloc_offset);
622 	}
623 
624 	/*
625 	 * Dive layer 2
626 	 */
627 	layer2_offset = layer1->phys_offset +
628 			HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap->alloc_offset);
629 
630 	layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
631 
632 	if ((rootmap->alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
633 		buffer2->cache.modified = 1;
634 		bzero(layer2, sizeof(*layer2));
635 		layer2->u.phys_offset = alloc_bigblock(NULL,
636 						       rootmap->alloc_offset);
637 		layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
638 		--layer1->blocks_free;
639 	}
640 
641 	buffer1->cache.modified = 1;
642 	buffer2->cache.modified = 1;
643 	volume->cache.modified = 1;
644 	layer2->bytes_free -= bytes;
645 	*result_offp = rootmap->alloc_offset;
646 	rootmap->alloc_offset += bytes;
647 	rootmap->next_offset = rootmap->alloc_offset;
648 
649 	bigblock_offset = layer2->u.phys_offset +
650 			  (*result_offp & HAMMER_LARGEBLOCK_MASK);
651 	ptr = get_buffer_data(bigblock_offset, bufferp, 0);
652 	(*bufferp)->cache.modified = 1;
653 
654 	if (buffer1)
655 		rel_buffer(buffer1);
656 	if (buffer2)
657 		rel_buffer(buffer2);
658 
659 	rel_volume(volume);
660 	return(ptr);
661 }
662 
663 #if 0
664 /*
665  * Reserve space from the FIFO.  Make sure that bytes does not cross a
666  * record boundary.
667  *
668  * Zero out base_bytes and initialize the fifo head and tail.  The
669  * data area is not zerod.
670  */
671 static
672 hammer_off_t
673 hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
674 		  struct buffer_info **bufp, u_int16_t hdr_type)
675 {
676 	struct buffer_info *buf;
677 	struct volume_info *volume;
678 	hammer_fifo_head_t head;
679 	hammer_fifo_tail_t tail;
680 	hammer_off_t off;
681 	int32_t aligned_bytes;
682 
683 	aligned_bytes = (base_bytes + ext_bytes + HAMMER_TAIL_ONDISK_SIZE +
684 			 HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK;
685 
686 	volume = get_volume(RootVolNo);
687 	off = volume->ondisk->vol0_fifo_end;
688 
689 	/*
690 	 * For now don't deal with transitions across buffer boundaries,
691 	 * only newfs_hammer uses this function.
692 	 */
693 	assert((off & ~HAMMER_BUFMASK64) ==
694 		((off + aligned_bytes) & ~HAMMER_BUFMASK));
695 
696 	*bufp = buf = get_buffer(off, 0);
697 
698 	buf->cache.modified = 1;
699 	volume->cache.modified = 1;
700 
701 	head = (void *)((char *)buf->ondisk + ((int32_t)off & HAMMER_BUFMASK));
702 	bzero(head, base_bytes);
703 
704 	head->hdr_signature = HAMMER_HEAD_SIGNATURE;
705 	head->hdr_type = hdr_type;
706 	head->hdr_size = aligned_bytes;
707 	head->hdr_seq = volume->ondisk->vol0_next_seq++;
708 
709 	tail = (void*)((char *)head + aligned_bytes - HAMMER_TAIL_ONDISK_SIZE);
710 	tail->tail_signature = HAMMER_TAIL_SIGNATURE;
711 	tail->tail_type = hdr_type;
712 	tail->tail_size = aligned_bytes;
713 
714 	volume->ondisk->vol0_fifo_end += aligned_bytes;
715 	volume->cache.modified = 1;
716 
717 	rel_volume(volume);
718 
719 	return(off);
720 }
721 
722 #endif
723 
724 /*
725  * Flush various tracking structures to disk
726  */
727 
728 /*
729  * Flush various tracking structures to disk
730  */
731 void
732 flush_all_volumes(void)
733 {
734 	struct volume_info *vol;
735 
736 	TAILQ_FOREACH(vol, &VolList, entry)
737 		flush_volume(vol);
738 }
739 
740 void
741 flush_volume(struct volume_info *volume)
742 {
743 	struct buffer_info *buffer;
744 
745 	TAILQ_FOREACH(buffer, &volume->buffer_list, entry)
746 		flush_buffer(buffer);
747 	writehammerbuf(volume, volume->ondisk, 0);
748 	volume->cache.modified = 0;
749 }
750 
751 void
752 flush_buffer(struct buffer_info *buffer)
753 {
754 	writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset);
755 	buffer->cache.modified = 0;
756 }
757 
758 #if 0
759 /*
760  * Generic buffer initialization
761  */
762 static void
763 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
764 {
765 	head->hdr_signature = HAMMER_HEAD_SIGNATURE;
766 	head->hdr_type = hdr_type;
767 	head->hdr_size = 0;
768 	head->hdr_crc = 0;
769 	head->hdr_seq = 0;
770 }
771 
772 #endif
773 
774 #if 0
775 /*
776  * Core I/O operations
777  */
778 static void
779 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
780 {
781 	ssize_t n;
782 
783 	n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
784 	if (n != HAMMER_BUFSIZE)
785 		err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
786 }
787 
788 #endif
789 
790 static void
791 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
792 {
793 	ssize_t n;
794 
795 	n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
796 	if (n != HAMMER_BUFSIZE)
797 		err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
798 }
799 
800 void
801 panic(const char *ctl, ...)
802 {
803 	va_list va;
804 
805 	va_start(va, ctl);
806 	vfprintf(stderr, ctl, va);
807 	va_end(va);
808 	fprintf(stderr, "\n");
809 	exit(1);
810 }
811 
812