1 /* 2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/fcntl.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/namei.h> 42 #include <sys/mount.h> 43 #include <sys/vnode.h> 44 #include <sys/mountctl.h> 45 46 #include "hammer2.h" 47 48 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans, 49 hammer2_chain_t **parentp, hammer2_blockref_t *bref, 50 int radix, hammer2_off_t bpref, hammer2_off_t *bnext); 51 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp, 52 hammer2_key_t key, hammer2_chain_t *chain); 53 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 54 hammer2_bmap_data_t *bmap, 55 int n, int radix, hammer2_key_t *basep); 56 static int hammer2_freemap_iterate(hammer2_trans_t *trans, 57 hammer2_chain_t **parentp, hammer2_chain_t **chainp, 58 hammer2_off_t bpref, hammer2_off_t *bnextp); 59 60 static __inline 61 int 62 hammer2_freemapradix(int radix) 63 { 64 return(radix); 65 } 66 67 /* 68 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF 69 * bref. Return a combined media offset and physical size radix. Freemap 70 * chains use fixed storage offsets in the 4MB reserved area at the 71 * beginning of each 2GB zone 72 * 73 * Rotate between four possibilities. Theoretically this means we have three 74 * good freemaps in case of a crash which we can use as a base for the fixup 75 * scan at mount-time. 76 */ 77 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1)) 78 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix)) 79 80 static 81 int 82 hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, 83 int radix) 84 { 85 hammer2_off_t off; 86 size_t bytes; 87 88 /* 89 * Physical allocation size -> radix. Typically either 256 for 90 * a level 0 freemap leaf or 65536 for a level N freemap node. 91 * 92 * NOTE: A 256 byte bitmap represents 256 x 8 x 1024 = 2MB of storage. 93 * Do not use hammer2_allocsize() here as it has a min cap. 94 */ 95 bytes = 1 << radix; 96 97 /* 98 * Adjust by HAMMER2_ZONE_FREEMAP_{A,B,C,D} using the existing 99 * offset as a basis. Start in zone A if previously unallocated. 100 */ 101 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) { 102 off = HAMMER2_ZONE_FREEMAP_A; 103 } else { 104 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX & 105 (((hammer2_off_t)1 << HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 106 off = off / HAMMER2_PBUFSIZE; 107 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_A); 108 KKASSERT(off < HAMMER2_ZONE_FREEMAP_D + 4); 109 110 if (off >= HAMMER2_ZONE_FREEMAP_D) 111 off = HAMMER2_ZONE_FREEMAP_A; 112 else if (off >= HAMMER2_ZONE_FREEMAP_C) 113 off = HAMMER2_ZONE_FREEMAP_D; 114 else if (off >= HAMMER2_ZONE_FREEMAP_B) 115 off = HAMMER2_ZONE_FREEMAP_C; 116 else 117 off = HAMMER2_ZONE_FREEMAP_B; 118 } 119 off = off * HAMMER2_PBUFSIZE; 120 121 /* 122 * Calculate the block offset of the reserved block. This will 123 * point into the 4MB reserved area at the base of the appropriate 124 * 2GB zone, once added to the FREEMAP_x selection above. 125 */ 126 switch(bref->keybits) { 127 /* case HAMMER2_FREEMAP_LEVEL5_RADIX: not applicable */ 128 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */ 129 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 130 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 131 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) + 132 HAMMER2_ZONEFM_LEVEL4 * HAMMER2_PBUFSIZE; 133 break; 134 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */ 135 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 136 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 137 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) + 138 HAMMER2_ZONEFM_LEVEL3 * HAMMER2_PBUFSIZE; 139 break; 140 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */ 141 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 142 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 143 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) + 144 HAMMER2_ZONEFM_LEVEL2 * HAMMER2_PBUFSIZE; 145 break; 146 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */ 147 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 148 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 149 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 150 HAMMER2_ZONEFM_LEVEL1 * HAMMER2_PBUFSIZE; 151 break; 152 default: 153 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits); 154 /* NOT REACHED */ 155 break; 156 } 157 bref->data_off = off | radix; 158 return (0); 159 } 160 161 /* 162 * Normal freemap allocator 163 * 164 * Use available hints to allocate space using the freemap. Create missing 165 * freemap infrastructure on-the-fly as needed (including marking initial 166 * allocations using the iterator as allocated, instantiating new 2GB zones, 167 * and dealing with the end-of-media edge case). 168 * 169 * ip and bpref are only used as a heuristic to determine locality of 170 * reference. bref->key may also be used heuristically. 171 */ 172 int 173 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 174 hammer2_blockref_t *bref, size_t bytes) 175 { 176 hammer2_chain_t *parent; 177 hammer2_off_t bpref; 178 hammer2_off_t bnext; 179 int radix; 180 int error; 181 182 /* 183 * Validate the allocation size. It must be a power of 2. 184 * 185 * For now require that the caller be aware of the minimum 186 * allocation (1K). 187 */ 188 radix = hammer2_getradix(bytes); 189 KKASSERT((size_t)1 << radix == bytes); 190 191 /* 192 * Freemap blocks themselves are simply assigned from the reserve 193 * area, not allocated from the freemap. 194 */ 195 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 196 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 197 return(hammer2_freemap_reserve(hmp, bref, radix)); 198 } 199 200 /* 201 * Normal allocations 202 */ 203 KKASSERT(bytes >= HAMMER2_MIN_ALLOC && bytes <= HAMMER2_MAX_ALLOC); 204 205 /* 206 * Calculate the starting point for our allocation search. 207 * 208 * Each freemap leaf is dedicated to a specific freemap_radix. 209 * The freemap_radix can be more fine-grained than the device buffer 210 * radix which results in inodes being grouped together in their 211 * own segment, terminal-data (16K or less) and initial indirect 212 * block being grouped together, and then full-indirect and full-data 213 * blocks (64K) being grouped together. 214 * 215 * The single most important aspect of this is the inode grouping 216 * because that is what allows 'find' and 'ls' and other filesystem 217 * topology operations to run fast. 218 */ 219 #if 0 220 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) 221 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX; 222 else if (trans->tmp_bpref) 223 bpref = trans->tmp_bpref; 224 else if (trans->tmp_ip) 225 bpref = trans->tmp_ip->chain->bref.data_off; 226 else 227 #endif 228 KKASSERT(radix >= 0 && radix <= HAMMER2_MAX_RADIX); 229 bpref = hmp->heur_freemap[radix]; 230 231 /* 232 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's 233 * reserved area, the try code will iterate past it. 234 */ 235 if (bpref > hmp->voldata.volu_size) 236 bpref = hmp->voldata.volu_size - 1; 237 238 /* 239 * Iterate the freemap looking for free space before and after. 240 */ 241 parent = &hmp->fchain; 242 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 243 error = EAGAIN; 244 bnext = bpref; 245 246 while (error == EAGAIN) { 247 error = hammer2_freemap_try_alloc(trans, &parent, bref, 248 radix, bpref, &bnext); 249 } 250 hmp->heur_freemap[radix] = bnext; 251 hammer2_chain_unlock(parent); 252 253 return (error); 254 } 255 256 static int 257 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp, 258 hammer2_blockref_t *bref, int radix, 259 hammer2_off_t bpref, hammer2_off_t *bnextp) 260 { 261 hammer2_mount_t *hmp = (*parentp)->hmp; 262 hammer2_off_t l0size; 263 hammer2_off_t l1size; 264 hammer2_off_t l1mask; 265 hammer2_chain_t *chain; 266 hammer2_off_t key; 267 size_t bytes; 268 int error = 0; 269 270 271 /* 272 * Calculate the number of bytes being allocated, the number 273 * of contiguous bits of bitmap being allocated, and the bitmap 274 * mask. 275 * 276 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the 277 * mask calculation. 278 */ 279 bytes = (size_t)1 << radix; 280 281 /* 282 * Lookup the level1 freemap chain, creating and initializing one 283 * if necessary. Intermediate levels will be created automatically 284 * when necessary by hammer2_chain_create(). 285 */ 286 key = H2FMBASE(*bnextp, HAMMER2_FREEMAP_LEVEL1_RADIX); 287 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 288 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 289 l1mask = l1size - 1; 290 291 chain = hammer2_chain_lookup(parentp, key, key + l1mask, 292 HAMMER2_LOOKUP_FREEMAP | 293 HAMMER2_LOOKUP_ALWAYS | 294 HAMMER2_LOOKUP_MATCHIND/*XXX*/); 295 if (chain == NULL) { 296 /* 297 * Create the missing leaf, be sure to initialize 298 * the auxillary freemap tracking information in 299 * the bref.check.freemap structure. 300 */ 301 #if 0 302 kprintf("freemap create L1 @ %016jx bpref %016jx\n", 303 key, bpref); 304 #endif 305 error = hammer2_chain_create(trans, parentp, &chain, 306 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 307 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 308 HAMMER2_FREEMAP_LEVELN_PSIZE); 309 if (error == 0) { 310 hammer2_chain_modify(trans, &chain, 0); 311 bzero(&chain->data->bmdata[0], 312 HAMMER2_FREEMAP_LEVELN_PSIZE); 313 chain->bref.check.freemap.bigmask = (uint32_t)-1; 314 chain->bref.check.freemap.avail = l1size; 315 /* bref.methods should already be inherited */ 316 317 hammer2_freemap_init(trans, hmp, key, chain); 318 } 319 } else if ((chain->bref.check.freemap.bigmask & (1 << radix)) == 0) { 320 /* 321 * Already flagged as not having enough space 322 */ 323 error = ENOSPC; 324 } else { 325 /* 326 * Modify existing chain to setup for adjustment. 327 */ 328 hammer2_chain_modify(trans, &chain, 0); 329 } 330 331 /* 332 * Scan 2MB entries. 333 */ 334 if (error == 0) { 335 hammer2_bmap_data_t *bmap; 336 hammer2_key_t base_key; 337 int count; 338 int start; 339 int n; 340 341 start = (int)((*bnextp - key) >> HAMMER2_FREEMAP_LEVEL0_RADIX); 342 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT); 343 hammer2_chain_modify(trans, &chain, 0); 344 345 error = ENOSPC; 346 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 347 if (start + count >= HAMMER2_FREEMAP_COUNT && 348 start - count < 0) { 349 break; 350 } 351 n = start + count; 352 bmap = &chain->data->bmdata[n]; 353 if (n < HAMMER2_FREEMAP_COUNT && bmap->avail && 354 (bmap->radix == 0 || bmap->radix == radix)) { 355 base_key = key + n * l0size; 356 error = hammer2_bmap_alloc(trans, hmp, bmap, n, 357 radix, &base_key); 358 if (error != ENOSPC) { 359 key = base_key; 360 break; 361 } 362 } 363 n = start - count; 364 bmap = &chain->data->bmdata[n]; 365 if (n >= 0 && bmap->avail && 366 (bmap->radix == 0 || bmap->radix == radix)) { 367 base_key = key + n * l0size; 368 error = hammer2_bmap_alloc(trans, hmp, bmap, n, 369 radix, &base_key); 370 if (error != ENOSPC) { 371 key = base_key; 372 break; 373 } 374 } 375 } 376 if (error == ENOSPC) 377 chain->bref.check.freemap.bigmask &= ~(1 << radix); 378 /* XXX also scan down from original count */ 379 } 380 381 if (error == 0) { 382 /* 383 * Assert validity. Must be beyond the static allocator used 384 * by newfs_hammer2 (and thus also beyond the aux area), 385 * not go past the volume size, and must not be in the 386 * reserved segment area for a zone. 387 */ 388 KKASSERT(key >= hmp->voldata.allocator_beg && 389 key + bytes <= hmp->voldata.volu_size); 390 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 391 bref->data_off = key | radix; 392 393 #if 0 394 kprintf("alloc cp=%p %016jx %016jx using %016jx\n", 395 chain, 396 bref->key, bref->data_off, chain->bref.data_off); 397 #endif 398 } else if (error == ENOSPC) { 399 /* 400 * Return EAGAIN with next iteration in *bnextp, or 401 * return ENOSPC if the allocation map has been exhausted. 402 */ 403 error = hammer2_freemap_iterate(trans, parentp, &chain, 404 bpref, bnextp); 405 } 406 407 /* 408 * Cleanup 409 */ 410 if (chain) 411 hammer2_chain_unlock(chain); 412 return (error); 413 } 414 415 /* 416 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep). 417 * 418 * If the linear iterator is mid-block we use it directly (the bitmap should 419 * already be marked allocated), otherwise we search for a block in the bitmap 420 * that fits the allocation request. 421 * 422 * A partial bitmap allocation sets the minimum bitmap granularity (16KB) 423 * to fully allocated and adjusts the linear allocator to allow the 424 * remaining space to be allocated. 425 */ 426 static 427 int 428 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 429 hammer2_bmap_data_t *bmap, 430 int n, int radix, hammer2_key_t *basep) 431 { 432 struct buf *bp; 433 size_t size; 434 size_t bmsize; 435 int bmradix; 436 uint32_t bmmask; 437 int offset; 438 int i; 439 int j; 440 441 /* 442 * Take into account 2-bits per block when calculating bmradix. 443 */ 444 size = (size_t)1 << radix; 445 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) { 446 bmradix = 2; 447 bmsize = HAMMER2_FREEMAP_BLOCK_SIZE; 448 /* (16K) 2 bits per allocation block */ 449 } else { 450 bmradix = 2 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 451 bmsize = size; 452 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */ 453 } 454 455 /* 456 * Use iterator or scan bitmap. Beware of hardware artifacts 457 * when bmradix == 32 (intermediate result can wind up being '1' 458 * instead of '0' if hardware masks bit-count & 31). 459 * 460 * NOTE: j needs to be even in the j= calculation. As an artifact 461 * of the /2 division, our bitmask has to clear bit 0. 462 */ 463 if (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) { 464 KKASSERT(bmap->linear >= 0 && 465 bmap->linear + size <= HAMMER2_SEGSIZE); 466 offset = bmap->linear; 467 i = offset / (HAMMER2_SEGSIZE / 8); 468 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30; 469 bmmask = (bmradix == 32) ? 470 0xFFFFFFFFU : (1 << bmradix) - 1; 471 bmmask <<= j; 472 #if 0 473 kprintf("alloc1 %016jx/%zd %08x i=%d j=%d bmmask=%08x " 474 "(linear=%08x)\n", 475 *basep + offset, size, 476 offset, i, j, bmmask, bmap->linear); 477 #endif 478 } else { 479 for (i = 0; i < 8; ++i) { 480 bmmask = (bmradix == 32) ? 481 0xFFFFFFFFU : (1 << bmradix) - 1; 482 for (j = 0; j < 32; j += bmradix) { 483 if ((bmap->bitmap[i] & bmmask) == 0) 484 goto success; 485 bmmask <<= bmradix; 486 } 487 } 488 KKASSERT(bmap->avail == 0); 489 return (ENOSPC); 490 success: 491 offset = i * (HAMMER2_SEGSIZE / 8) + 492 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2)); 493 #if 0 494 kprintf("alloc2 %016jx/%zd %08x i=%d j=%d bmmask=%08x\n", 495 *basep + offset, size, 496 offset, i, j, bmmask); 497 #endif 498 } 499 500 KKASSERT(i >= 0 && i < 8); /* 8 x 16 -> 128 x 16K -> 2MB */ 501 502 /* 503 * Optimize the buffer cache to avoid unnecessary read-before-write 504 * operations. 505 */ 506 if (radix < HAMMER2_MINIORADIX && 507 (bmap->bitmap[i] & bmmask) == 0) { 508 bp = getblk(hmp->devvp, *basep + offset, 509 HAMMER2_MINIOSIZE, GETBLK_NOWAIT, 0); 510 if (bp) { 511 if ((bp->b_flags & B_CACHE) == 0) 512 vfs_bio_clrbuf(bp); 513 bqrelse(bp); 514 } 515 } 516 517 #if 0 518 /* 519 * When initializing a new inode segment also attempt to initialize 520 * an adjacent segment. Be careful not to index beyond the array 521 * bounds. 522 * 523 * We do this to try to localize inode accesses to improve 524 * directory scan rates. XXX doesn't improve scan rates. 525 */ 526 if (size == HAMMER2_INODE_BYTES) { 527 if (n & 1) { 528 if (bmap[-1].radix == 0 && bmap[-1].avail) 529 bmap[-1].radix = radix; 530 } else { 531 if (bmap[1].radix == 0 && bmap[1].avail) 532 bmap[1].radix = radix; 533 } 534 } 535 #endif 536 537 /* 538 * Adjust the linear iterator, set the radix if necessary (might as 539 * well just set it unconditionally), adjust *basep to return the 540 * allocated data offset. 541 */ 542 bmap->bitmap[i] |= bmmask; 543 bmap->linear = offset + size; 544 bmap->radix = radix; 545 bmap->avail -= size; 546 *basep += offset; 547 548 return(0); 549 } 550 551 static 552 void 553 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp, 554 hammer2_key_t key, hammer2_chain_t *chain) 555 { 556 hammer2_off_t l1size; 557 hammer2_off_t lokey; 558 hammer2_off_t hikey; 559 hammer2_bmap_data_t *bmap; 560 int count; 561 562 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 563 564 /* 565 * Calculate the portion of the 2GB map that should be initialized 566 * as free. Portions below or after will be initialized as allocated. 567 * SEGMASK-align the areas so we don't have to worry about sub-scans 568 * or endianess when using memset. 569 * 570 * (1) Ensure that all statically allocated space from newfs_hammer2 571 * is marked allocated. 572 * 573 * (2) Ensure that the reserved area is marked allocated (typically 574 * the first 4MB of the 2GB area being represented). 575 * 576 * (3) Ensure that any trailing space at the end-of-volume is marked 577 * allocated. 578 * 579 * WARNING! It is possible for lokey to be larger than hikey if the 580 * entire 2GB segment is within the static allocation. 581 */ 582 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) & 583 ~HAMMER2_SEGMASK64; 584 585 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 586 HAMMER2_ZONE_SEG64) { 587 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 588 HAMMER2_ZONE_SEG64; 589 } 590 591 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 592 if (hikey > hmp->voldata.volu_size) { 593 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64; 594 } 595 596 chain->bref.check.freemap.avail = 597 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 598 bmap = &chain->data->bmdata[0]; 599 600 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 601 if (key < lokey || key >= hikey) { 602 memset(bmap->bitmap, -1, 603 sizeof(bmap->bitmap)); 604 bmap->avail = 0; 605 chain->bref.check.freemap.avail -= 606 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 607 } else { 608 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 609 } 610 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 611 ++bmap; 612 } 613 } 614 615 /* 616 * The current Level 1 freemap has been exhausted, iterate to the next 617 * one, return ENOSPC if no freemaps remain. 618 * 619 * XXX this should rotate back to the beginning to handle freed-up space 620 * XXX or use intermediate entries to locate free space. TODO 621 */ 622 static int 623 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp, 624 hammer2_chain_t **chainp, 625 hammer2_off_t bpref, hammer2_off_t *bnextp) 626 { 627 hammer2_mount_t *hmp = (*parentp)->hmp; 628 629 *bnextp &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 630 *bnextp += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 631 if (*bnextp >= hmp->voldata.volu_size) 632 return (ENOSPC); 633 return(EAGAIN); 634 } 635 636 #if 0 637 638 void 639 hammer2_freemap_free(hammer2_mount_t *hmp, hammer2_off_t data_off, int type) 640 { 641 hammer2_freecache_t *fc; 642 int radix; 643 int fctype; 644 645 switch(type) { 646 case HAMMER2_BREF_TYPE_INODE: 647 fctype = HAMMER2_FREECACHE_INODE; 648 break; 649 case HAMMER2_BREF_TYPE_INDIRECT: 650 fctype = HAMMER2_FREECACHE_INODE; 651 break; 652 case HAMMER2_BREF_TYPE_DATA: 653 fctype = HAMMER2_FREECACHE_DATA; 654 break; 655 default: 656 fctype = HAMMER2_FREECACHE_DATA; 657 break; 658 } 659 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX; 660 data_off &= ~HAMMER2_OFF_MASK_RADIX; 661 if (radix >= HAMMER2_MAX_RADIX) 662 return; 663 664 fc = &hmp->freecache[fctype][radix]; 665 if (fc->single == 0) { 666 lockmgr(&hmp->alloclk, LK_EXCLUSIVE); 667 fc->single = data_off; 668 lockmgr(&hmp->alloclk, LK_RELEASE); 669 } 670 } 671 672 #endif 673 674 #if 0 675 /* 676 * Allocate media space, returning a combined data offset and radix. 677 * Also return the related (device) buffer cache buffer. 678 */ 679 hammer2_off_t 680 hammer2_freemap_alloc_bp(hammer2_mount_t *hmp, size_t bytes, struct buf **bpp) 681 { 682 } 683 684 #endif 685