1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sbin/hammer/ondisk.c,v 1.23 2008/06/19 23:30:30 dillon Exp $ 35 */ 36 37 #include <sys/types.h> 38 #include <assert.h> 39 #include <stdio.h> 40 #include <stdlib.h> 41 #include <stdarg.h> 42 #include <string.h> 43 #include <unistd.h> 44 #include <stddef.h> 45 #include <err.h> 46 #include <fcntl.h> 47 #include "hammer_util.h" 48 49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp, 50 struct buffer_info **bufferp); 51 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone); 52 #if 0 53 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type); 54 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes, 55 struct buffer_info **bufp, u_int16_t hdr_type); 56 static void readhammerbuf(struct volume_info *vol, void *data, 57 int64_t offset); 58 #endif 59 static void writehammerbuf(struct volume_info *vol, const void *data, 60 int64_t offset); 61 62 int DebugOpt; 63 64 uuid_t Hammer_FSType; 65 uuid_t Hammer_FSId; 66 int64_t BootAreaSize; 67 int64_t MemAreaSize; 68 int64_t UndoBufferSize; 69 int UsingSuperClusters; 70 int NumVolumes; 71 int RootVolNo = -1; 72 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList); 73 74 static __inline 75 int 76 buffer_hash(hammer_off_t buf_offset) 77 { 78 int hi; 79 80 hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK; 81 return(hi); 82 } 83 84 /* 85 * Lookup the requested information structure and related on-disk buffer. 86 * Missing structures are created. 87 */ 88 struct volume_info * 89 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags) 90 { 91 struct volume_info *vol; 92 struct volume_info *scan; 93 struct hammer_volume_ondisk *ondisk; 94 int i, n; 95 96 /* 97 * Allocate the volume structure 98 */ 99 vol = malloc(sizeof(*vol)); 100 bzero(vol, sizeof(*vol)); 101 for (i = 0; i < HAMMER_BUFLISTS; ++i) 102 TAILQ_INIT(&vol->buffer_lists[i]); 103 vol->name = strdup(filename); 104 vol->fd = open(filename, oflags); 105 if (vol->fd < 0) { 106 free(vol->name); 107 free(vol); 108 err(1, "setup_volume: %s: Open failed", filename); 109 } 110 111 /* 112 * Read or initialize the volume header 113 */ 114 vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE); 115 if (isnew) { 116 bzero(ondisk, HAMMER_BUFSIZE); 117 } else { 118 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0); 119 if (n != HAMMER_BUFSIZE) { 120 err(1, "setup_volume: %s: Read failed at offset 0", 121 filename); 122 } 123 vol_no = ondisk->vol_no; 124 if (RootVolNo < 0) { 125 RootVolNo = ondisk->vol_rootvol; 126 } else if (RootVolNo != (int)ondisk->vol_rootvol) { 127 errx(1, "setup_volume: %s: root volume disagreement: " 128 "%d vs %d", 129 vol->name, RootVolNo, ondisk->vol_rootvol); 130 } 131 132 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) { 133 errx(1, "setup_volume: %s: Header does not indicate " 134 "that this is a hammer volume", vol->name); 135 } 136 if (TAILQ_EMPTY(&VolList)) { 137 Hammer_FSId = vol->ondisk->vol_fsid; 138 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) { 139 errx(1, "setup_volume: %s: FSId does match other " 140 "volumes!", vol->name); 141 } 142 } 143 vol->vol_no = vol_no; 144 145 if (isnew) { 146 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/ 147 vol->cache.modified = 1; 148 } 149 150 /* 151 * Link the volume structure in 152 */ 153 TAILQ_FOREACH(scan, &VolList, entry) { 154 if (scan->vol_no == vol_no) { 155 errx(1, "setup_volume %s: Duplicate volume number %d " 156 "against %s", filename, vol_no, scan->name); 157 } 158 } 159 TAILQ_INSERT_TAIL(&VolList, vol, entry); 160 return(vol); 161 } 162 163 struct volume_info * 164 get_volume(int32_t vol_no) 165 { 166 struct volume_info *vol; 167 168 TAILQ_FOREACH(vol, &VolList, entry) { 169 if (vol->vol_no == vol_no) 170 break; 171 } 172 if (vol == NULL) 173 errx(1, "get_volume: Volume %d does not exist!", vol_no); 174 ++vol->cache.refs; 175 /* not added to or removed from hammer cache */ 176 return(vol); 177 } 178 179 void 180 rel_volume(struct volume_info *volume) 181 { 182 /* not added to or removed from hammer cache */ 183 --volume->cache.refs; 184 } 185 186 /* 187 * Acquire the specified buffer. 188 */ 189 struct buffer_info * 190 get_buffer(hammer_off_t buf_offset, int isnew) 191 { 192 void *ondisk; 193 struct buffer_info *buf; 194 struct volume_info *volume; 195 hammer_off_t orig_offset = buf_offset; 196 int vol_no; 197 int zone; 198 int hi, n; 199 200 zone = HAMMER_ZONE_DECODE(buf_offset); 201 if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) { 202 buf_offset = blockmap_lookup(buf_offset, NULL, NULL); 203 } 204 assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER); 205 vol_no = HAMMER_VOL_DECODE(buf_offset); 206 volume = get_volume(vol_no); 207 buf_offset &= ~HAMMER_BUFMASK64; 208 209 hi = buffer_hash(buf_offset); 210 211 TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) { 212 if (buf->buf_offset == buf_offset) 213 break; 214 } 215 if (buf == NULL) { 216 buf = malloc(sizeof(*buf)); 217 bzero(buf, sizeof(*buf)); 218 if (DebugOpt) { 219 fprintf(stderr, "get_buffer %016llx %016llx\n", 220 orig_offset, buf_offset); 221 } 222 buf->buf_offset = buf_offset; 223 buf->buf_disk_offset = volume->ondisk->vol_buf_beg + 224 (buf_offset & HAMMER_OFF_SHORT_MASK); 225 buf->volume = volume; 226 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry); 227 ++volume->cache.refs; 228 buf->cache.u.buffer = buf; 229 hammer_cache_add(&buf->cache, ISBUFFER); 230 } 231 ++buf->cache.refs; 232 hammer_cache_flush(); 233 if ((ondisk = buf->ondisk) == NULL) { 234 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE); 235 if (isnew == 0) { 236 n = pread(volume->fd, ondisk, HAMMER_BUFSIZE, 237 buf->buf_disk_offset); 238 if (n != HAMMER_BUFSIZE) { 239 err(1, "get_buffer: %s:%016llx Read failed at " 240 "offset %lld", 241 volume->name, buf->buf_offset, 242 buf->buf_disk_offset); 243 } 244 } 245 } 246 if (isnew) { 247 bzero(ondisk, HAMMER_BUFSIZE); 248 buf->cache.modified = 1; 249 } 250 return(buf); 251 } 252 253 void 254 rel_buffer(struct buffer_info *buffer) 255 { 256 struct volume_info *volume; 257 int hi; 258 259 assert(buffer->cache.refs > 0); 260 if (--buffer->cache.refs == 0) { 261 if (buffer->cache.delete) { 262 hi = buffer_hash(buffer->buf_offset); 263 volume = buffer->volume; 264 if (buffer->cache.modified) 265 flush_buffer(buffer); 266 TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry); 267 hammer_cache_del(&buffer->cache); 268 free(buffer->ondisk); 269 free(buffer); 270 rel_volume(volume); 271 } 272 } 273 } 274 275 void * 276 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp, 277 int isnew) 278 { 279 struct buffer_info *buffer; 280 281 if ((buffer = *bufferp) != NULL) { 282 if (isnew || 283 ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) { 284 rel_buffer(buffer); 285 buffer = *bufferp = NULL; 286 } 287 } 288 if (buffer == NULL) 289 buffer = *bufferp = get_buffer(buf_offset, isnew); 290 return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK)); 291 } 292 293 /* 294 * Retrieve a pointer to a B-Tree node given a cluster offset. The underlying 295 * bufp is freed if non-NULL and a referenced buffer is loaded into it. 296 */ 297 hammer_node_ondisk_t 298 get_node(hammer_off_t node_offset, struct buffer_info **bufp) 299 { 300 struct buffer_info *buf; 301 302 if (*bufp) 303 rel_buffer(*bufp); 304 *bufp = buf = get_buffer(node_offset, 0); 305 return((void *)((char *)buf->ondisk + 306 (int32_t)(node_offset & HAMMER_BUFMASK))); 307 } 308 309 /* 310 * Allocate HAMMER elements - btree nodes, data storage, and record elements 311 * 312 * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned 313 * item and zero's out the remainder, so don't bzero() it. 314 */ 315 void * 316 alloc_btree_element(hammer_off_t *offp) 317 { 318 struct buffer_info *buffer = NULL; 319 hammer_node_ondisk_t node; 320 321 node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node), 322 offp, &buffer); 323 bzero(node, sizeof(*node)); 324 /* XXX buffer not released, pointer remains valid */ 325 return(node); 326 } 327 328 void * 329 alloc_data_element(hammer_off_t *offp, int32_t data_len, 330 struct buffer_info **data_bufferp) 331 { 332 void *data; 333 334 if (data_len >= HAMMER_BUFSIZE) { 335 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */ 336 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len, 337 offp, data_bufferp); 338 bzero(data, data_len); 339 } else if (data_len) { 340 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len, 341 offp, data_bufferp); 342 bzero(data, data_len); 343 } else { 344 data = NULL; 345 } 346 return (data); 347 } 348 349 /* 350 * Format a new freemap. Set all layer1 entries to UNAVAIL. The initialize 351 * code will load each volume's freemap. 352 */ 353 void 354 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap) 355 { 356 struct buffer_info *buffer = NULL; 357 hammer_off_t layer1_offset; 358 struct hammer_blockmap_layer1 *layer1; 359 int i, isnew; 360 361 layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX); 362 for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) { 363 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0); 364 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1), 365 &buffer, isnew); 366 bzero(layer1, sizeof(*layer1)); 367 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL; 368 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); 369 layer1->blocks_free = 0; 370 } 371 rel_buffer(buffer); 372 373 blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 374 blockmap->phys_offset = layer1_offset; 375 blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1); 376 blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0); 377 blockmap->reserved01 = 0; 378 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE); 379 root_vol->cache.modified = 1; 380 } 381 382 /* 383 * Load the volume's remaining free space into the freemap. 384 * 385 * Returns the number of bigblocks available. 386 */ 387 int64_t 388 initialize_freemap(struct volume_info *vol) 389 { 390 struct volume_info *root_vol; 391 struct buffer_info *buffer1 = NULL; 392 struct buffer_info *buffer2 = NULL; 393 struct hammer_blockmap_layer1 *layer1; 394 struct hammer_blockmap_layer2 *layer2; 395 hammer_off_t layer1_base; 396 hammer_off_t layer1_offset; 397 hammer_off_t layer2_offset; 398 hammer_off_t phys_offset; 399 hammer_off_t aligned_vol_free_end; 400 int64_t count = 0; 401 int modified1 = 0; 402 403 root_vol = get_volume(RootVolNo); 404 aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK) 405 & ~HAMMER_BLOCKMAP_LAYER2_MASK; 406 407 printf("initialize freemap volume %d\n", vol->vol_no); 408 409 /* 410 * Initialize the freemap. First preallocate the bigblocks required 411 * to implement layer2. This preallocation is a bootstrap allocation 412 * using blocks from the target volume. 413 */ 414 layer1_base = root_vol->ondisk->vol0_blockmap[ 415 HAMMER_ZONE_FREEMAP_INDEX].phys_offset; 416 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0); 417 phys_offset < aligned_vol_free_end; 418 phys_offset += HAMMER_BLOCKMAP_LAYER2) { 419 layer1_offset = layer1_base + 420 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 421 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 422 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 423 layer1->phys_offset = alloc_bigblock(vol, 424 HAMMER_ZONE_FREEMAP_INDEX); 425 layer1->blocks_free = 0; 426 buffer1->cache.modified = 1; 427 layer1->layer1_crc = crc32(layer1, 428 HAMMER_LAYER1_CRCSIZE); 429 } 430 } 431 432 /* 433 * Now fill everything in. 434 */ 435 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0); 436 phys_offset < aligned_vol_free_end; 437 phys_offset += HAMMER_LARGEBLOCK_SIZE) { 438 modified1 = 0; 439 layer1_offset = layer1_base + 440 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset); 441 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 442 443 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 444 layer2_offset = layer1->phys_offset + 445 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset); 446 447 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 448 bzero(layer2, sizeof(*layer2)); 449 if (phys_offset < vol->vol_free_off) { 450 /* 451 * Fixups XXX - bigblocks already allocated as part 452 * of the freemap bootstrap. 453 */ 454 if (layer2->zone == 0) { 455 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX; 456 layer2->append_off = HAMMER_LARGEBLOCK_SIZE; 457 layer2->bytes_free = 0; 458 } 459 } else if (phys_offset < vol->vol_free_end) { 460 ++layer1->blocks_free; 461 buffer1->cache.modified = 1; 462 layer2->zone = 0; 463 layer2->append_off = 0; 464 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE; 465 ++count; 466 modified1 = 1; 467 } else { 468 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX; 469 layer2->append_off = HAMMER_LARGEBLOCK_SIZE; 470 layer2->bytes_free = 0; 471 } 472 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 473 buffer2->cache.modified = 1; 474 475 /* 476 * Finish-up layer 1 477 */ 478 if (modified1) { 479 layer1->layer1_crc = crc32(layer1, 480 HAMMER_LAYER1_CRCSIZE); 481 buffer1->cache.modified = 1; 482 } 483 } 484 rel_buffer(buffer1); 485 rel_buffer(buffer2); 486 rel_volume(root_vol); 487 return(count); 488 } 489 490 /* 491 * Allocate big-blocks using our poor-man's volume->vol_free_off. 492 * 493 * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap 494 * itself and cannot update it yet. 495 */ 496 hammer_off_t 497 alloc_bigblock(struct volume_info *volume, int zone) 498 { 499 struct buffer_info *buffer = NULL; 500 struct volume_info *root_vol; 501 hammer_off_t result_offset; 502 hammer_off_t layer_offset; 503 struct hammer_blockmap_layer1 *layer1; 504 struct hammer_blockmap_layer2 *layer2; 505 int didget; 506 507 if (volume == NULL) { 508 volume = get_volume(RootVolNo); 509 didget = 1; 510 } else { 511 didget = 0; 512 } 513 result_offset = volume->vol_free_off; 514 if (result_offset >= volume->vol_free_end) 515 panic("alloc_bigblock: Ran out of room, filesystem too small"); 516 volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE; 517 518 /* 519 * Update the freemap. 520 */ 521 if (zone != HAMMER_ZONE_FREEMAP_INDEX) { 522 root_vol = get_volume(RootVolNo); 523 layer_offset = root_vol->ondisk->vol0_blockmap[ 524 HAMMER_ZONE_FREEMAP_INDEX].phys_offset; 525 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset); 526 layer1 = get_buffer_data(layer_offset, &buffer, 0); 527 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL); 528 --layer1->blocks_free; 529 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); 530 buffer->cache.modified = 1; 531 layer_offset = layer1->phys_offset + 532 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset); 533 layer2 = get_buffer_data(layer_offset, &buffer, 0); 534 assert(layer2->zone == 0); 535 layer2->zone = zone; 536 layer2->append_off = HAMMER_LARGEBLOCK_SIZE; 537 layer2->bytes_free = 0; 538 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 539 buffer->cache.modified = 1; 540 541 --root_vol->ondisk->vol0_stat_freebigblocks; 542 root_vol->cache.modified = 1; 543 544 rel_buffer(buffer); 545 rel_volume(root_vol); 546 } 547 548 if (didget) 549 rel_volume(volume); 550 return(result_offset); 551 } 552 553 /* 554 * Format the undo-map for the root volume. 555 */ 556 void 557 format_undomap(hammer_volume_ondisk_t ondisk) 558 { 559 const int undo_zone = HAMMER_ZONE_UNDO_INDEX; 560 hammer_off_t undo_limit; 561 hammer_blockmap_t blockmap; 562 hammer_off_t scan; 563 int n; 564 int limit_index; 565 566 /* 567 * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE, 568 * up to HAMMER_UNDO_LAYER2 large blocks. Size to approximately 569 * 0.1% of the disk. 570 */ 571 undo_limit = UndoBufferSize; 572 if (undo_limit == 0) 573 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000; 574 undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) & 575 ~HAMMER_LARGEBLOCK_MASK64; 576 if (undo_limit < HAMMER_LARGEBLOCK_SIZE) 577 undo_limit = HAMMER_LARGEBLOCK_SIZE; 578 if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2) 579 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2; 580 UndoBufferSize = undo_limit; 581 582 blockmap = &ondisk->vol0_blockmap[undo_zone]; 583 bzero(blockmap, sizeof(*blockmap)); 584 blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL; 585 blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0); 586 blockmap->next_offset = blockmap->first_offset; 587 blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit); 588 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE); 589 590 n = 0; 591 scan = blockmap->next_offset; 592 limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE; 593 594 assert(limit_index <= HAMMER_UNDO_LAYER2); 595 596 for (n = 0; n < limit_index; ++n) { 597 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL, 598 HAMMER_ZONE_UNDO_INDEX); 599 scan += HAMMER_LARGEBLOCK_SIZE; 600 } 601 while (n < HAMMER_UNDO_LAYER2) { 602 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL; 603 ++n; 604 } 605 } 606 607 /* 608 * Format a new blockmap. This is mostly a degenerate case because 609 * all allocations are now actually done from the freemap. 610 */ 611 void 612 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base) 613 { 614 blockmap->phys_offset = 0; 615 blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) | 616 HAMMER_SHORT_OFF_ENCODE(-1); 617 blockmap->first_offset = zone_base; 618 blockmap->next_offset = zone_base; 619 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE); 620 } 621 622 /* 623 * Allocate a chunk of data out of a blockmap. This is a simplified 624 * version which uses next_offset as a simple allocation iterator. 625 */ 626 static 627 void * 628 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp, 629 struct buffer_info **bufferp) 630 { 631 struct buffer_info *buffer1 = NULL; 632 struct buffer_info *buffer2 = NULL; 633 struct volume_info *volume; 634 hammer_blockmap_t blockmap; 635 hammer_blockmap_t freemap; 636 struct hammer_blockmap_layer1 *layer1; 637 struct hammer_blockmap_layer2 *layer2; 638 hammer_off_t layer1_offset; 639 hammer_off_t layer2_offset; 640 hammer_off_t zone2_offset; 641 void *ptr; 642 643 volume = get_volume(RootVolNo); 644 645 blockmap = &volume->ondisk->vol0_blockmap[zone]; 646 freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX]; 647 648 /* 649 * Alignment and buffer-boundary issues. If the allocation would 650 * cross a buffer boundary we have to skip to the next buffer. 651 */ 652 bytes = (bytes + 15) & ~15; 653 654 again: 655 if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) & 656 ~HAMMER_BUFMASK64) { 657 volume->cache.modified = 1; 658 blockmap->next_offset = (blockmap->next_offset + bytes) & 659 ~HAMMER_BUFMASK64; 660 } 661 662 /* 663 * Dive layer 1. For now we can't allocate data outside of volume 0. 664 */ 665 layer1_offset = freemap->phys_offset + 666 HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset); 667 668 layer1 = get_buffer_data(layer1_offset, &buffer1, 0); 669 670 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) { 671 fprintf(stderr, "alloc_blockmap: ran out of space!\n"); 672 exit(1); 673 } 674 675 /* 676 * Dive layer 2 677 */ 678 layer2_offset = layer1->phys_offset + 679 HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset); 680 681 layer2 = get_buffer_data(layer2_offset, &buffer2, 0); 682 683 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) { 684 fprintf(stderr, "alloc_blockmap: ran out of space!\n"); 685 exit(1); 686 } 687 688 /* 689 * If we are entering a new bigblock assign ownership to our 690 * zone. If the bigblock is owned by another zone skip it. 691 */ 692 if (layer2->zone == 0) { 693 --layer1->blocks_free; 694 layer2->zone = zone; 695 assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE); 696 assert(layer2->append_off == 0); 697 } 698 if (layer2->zone != zone) { 699 blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) & ~HAMMER_LARGEBLOCK_MASK64; 700 goto again; 701 } 702 703 buffer1->cache.modified = 1; 704 buffer2->cache.modified = 1; 705 volume->cache.modified = 1; 706 assert(layer2->append_off == 707 (blockmap->next_offset & HAMMER_LARGEBLOCK_MASK)); 708 layer2->bytes_free -= bytes; 709 *result_offp = blockmap->next_offset; 710 blockmap->next_offset += bytes; 711 layer2->append_off = (int)blockmap->next_offset & 712 HAMMER_LARGEBLOCK_MASK; 713 714 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE); 715 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE); 716 717 zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) | 718 HAMMER_ZONE_ENCODE(zone, 0); 719 720 ptr = get_buffer_data(zone2_offset, bufferp, 0); 721 (*bufferp)->cache.modified = 1; 722 723 if (buffer1) 724 rel_buffer(buffer1); 725 if (buffer2) 726 rel_buffer(buffer2); 727 728 rel_volume(volume); 729 return(ptr); 730 } 731 732 /* 733 * Flush various tracking structures to disk 734 */ 735 736 /* 737 * Flush various tracking structures to disk 738 */ 739 void 740 flush_all_volumes(void) 741 { 742 struct volume_info *vol; 743 744 TAILQ_FOREACH(vol, &VolList, entry) 745 flush_volume(vol); 746 } 747 748 void 749 flush_volume(struct volume_info *volume) 750 { 751 struct buffer_info *buffer; 752 int i; 753 754 for (i = 0; i < HAMMER_BUFLISTS; ++i) { 755 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry) 756 flush_buffer(buffer); 757 } 758 writehammerbuf(volume, volume->ondisk, 0); 759 volume->cache.modified = 0; 760 } 761 762 void 763 flush_buffer(struct buffer_info *buffer) 764 { 765 writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset); 766 buffer->cache.modified = 0; 767 } 768 769 #if 0 770 /* 771 * Generic buffer initialization 772 */ 773 static void 774 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type) 775 { 776 head->hdr_signature = HAMMER_HEAD_SIGNATURE; 777 head->hdr_type = hdr_type; 778 head->hdr_size = 0; 779 head->hdr_crc = 0; 780 head->hdr_seq = 0; 781 } 782 783 #endif 784 785 #if 0 786 /* 787 * Core I/O operations 788 */ 789 static void 790 readhammerbuf(struct volume_info *vol, void *data, int64_t offset) 791 { 792 ssize_t n; 793 794 n = pread(vol->fd, data, HAMMER_BUFSIZE, offset); 795 if (n != HAMMER_BUFSIZE) 796 err(1, "Read volume %d (%s)", vol->vol_no, vol->name); 797 } 798 799 #endif 800 801 static void 802 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset) 803 { 804 ssize_t n; 805 806 n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset); 807 if (n != HAMMER_BUFSIZE) 808 err(1, "Write volume %d (%s)", vol->vol_no, vol->name); 809 } 810 811 void 812 panic(const char *ctl, ...) 813 { 814 va_list va; 815 816 va_start(va, ctl); 817 vfprintf(stderr, ctl, va); 818 va_end(va); 819 fprintf(stderr, "\n"); 820 exit(1); 821 } 822 823