1 /* 2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/fcntl.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/namei.h> 42 #include <sys/mount.h> 43 #include <sys/vnode.h> 44 #include <sys/mountctl.h> 45 46 #include "hammer2.h" 47 48 #define FREEMAP_DEBUG 0 49 50 struct hammer2_fiterate { 51 hammer2_off_t bpref; 52 hammer2_off_t bnext; 53 int loops; 54 }; 55 56 typedef struct hammer2_fiterate hammer2_fiterate_t; 57 58 static int hammer2_freemap_try_alloc(hammer2_chain_t **parentp, 59 hammer2_blockref_t *bref, int radix, 60 hammer2_fiterate_t *iter, hammer2_tid_t mtid); 61 static void hammer2_freemap_init(hammer2_dev_t *hmp, 62 hammer2_key_t key, hammer2_chain_t *chain); 63 static int hammer2_bmap_alloc(hammer2_dev_t *hmp, 64 hammer2_bmap_data_t *bmap, uint16_t class, 65 int n, int radix, hammer2_key_t *basep); 66 static int hammer2_freemap_iterate(hammer2_chain_t **parentp, 67 hammer2_chain_t **chainp, 68 hammer2_fiterate_t *iter); 69 70 static __inline 71 int 72 hammer2_freemapradix(int radix) 73 { 74 return(radix); 75 } 76 77 /* 78 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF 79 * bref. Return a combined media offset and physical size radix. Freemap 80 * chains use fixed storage offsets in the 4MB reserved area at the 81 * beginning of each 2GB zone 82 * 83 * Rotate between four possibilities. Theoretically this means we have three 84 * good freemaps in case of a crash which we can use as a base for the fixup 85 * scan at mount-time. 86 */ 87 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1)) 88 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix)) 89 90 static 91 int 92 hammer2_freemap_reserve(hammer2_chain_t *chain, int radix) 93 { 94 hammer2_blockref_t *bref = &chain->bref; 95 hammer2_off_t off; 96 int index; 97 int index_inc; 98 size_t bytes; 99 100 /* 101 * Physical allocation size. 102 */ 103 bytes = (size_t)1 << radix; 104 105 /* 106 * Calculate block selection index 0..7 of current block. If this 107 * is the first allocation of the block (verses a modification of an 108 * existing block), we use index 0, otherwise we use the next rotating 109 * index. 110 */ 111 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) { 112 index = 0; 113 } else { 114 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX & 115 (((hammer2_off_t)1 << 116 HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 117 off = off / HAMMER2_PBUFSIZE; 118 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 && 119 off < HAMMER2_ZONE_FREEMAP_END); 120 index = (int)(off - HAMMER2_ZONE_FREEMAP_00) / 121 HAMMER2_ZONE_FREEMAP_INC; 122 KKASSERT(index >= 0 && index < HAMMER2_NFREEMAPS); 123 if (++index == HAMMER2_NFREEMAPS) 124 index = 0; 125 } 126 127 /* 128 * Calculate the block offset of the reserved block. This will 129 * point into the 4MB reserved area at the base of the appropriate 130 * 2GB zone, once added to the FREEMAP_x selection above. 131 */ 132 index_inc = index * HAMMER2_ZONE_FREEMAP_INC; 133 134 switch(bref->keybits) { 135 /* case HAMMER2_FREEMAP_LEVEL6_RADIX: not applicable */ 136 case HAMMER2_FREEMAP_LEVEL5_RADIX: /* 2EB */ 137 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 138 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 139 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL5_RADIX) + 140 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 141 HAMMER2_ZONEFM_LEVEL5) * HAMMER2_PBUFSIZE; 142 break; 143 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */ 144 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 145 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 146 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) + 147 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 148 HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE; 149 break; 150 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */ 151 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 152 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 153 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) + 154 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 155 HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE; 156 break; 157 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */ 158 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 159 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 160 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) + 161 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 162 HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE; 163 break; 164 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */ 165 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 166 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 167 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 168 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 169 HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE; 170 break; 171 default: 172 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits); 173 /* NOT REACHED */ 174 off = (hammer2_off_t)-1; 175 break; 176 } 177 bref->data_off = off | radix; 178 #if FREEMAP_DEBUG 179 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n", 180 bref->type, bref->key, bref->keybits, bref->data_off); 181 #endif 182 return (0); 183 } 184 185 /* 186 * Normal freemap allocator 187 * 188 * Use available hints to allocate space using the freemap. Create missing 189 * freemap infrastructure on-the-fly as needed (including marking initial 190 * allocations using the iterator as allocated, instantiating new 2GB zones, 191 * and dealing with the end-of-media edge case). 192 * 193 * ip and bpref are only used as a heuristic to determine locality of 194 * reference. bref->key may also be used heuristically. 195 */ 196 int 197 hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes) 198 { 199 hammer2_dev_t *hmp = chain->hmp; 200 hammer2_blockref_t *bref = &chain->bref; 201 hammer2_chain_t *parent; 202 hammer2_tid_t mtid; 203 int radix; 204 int error; 205 unsigned int hindex; 206 hammer2_fiterate_t iter; 207 208 mtid = hammer2_trans_sub(hmp->spmp); 209 210 /* 211 * Validate the allocation size. It must be a power of 2. 212 * 213 * For now require that the caller be aware of the minimum 214 * allocation (1K). 215 */ 216 radix = hammer2_getradix(bytes); 217 KKASSERT((size_t)1 << radix == bytes); 218 219 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 220 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 221 /* 222 * Freemap blocks themselves are assigned from the reserve 223 * area, not allocated from the freemap. 224 */ 225 error = hammer2_freemap_reserve(chain, radix); 226 KKASSERT(error == 0); 227 228 return error; 229 } 230 231 KKASSERT(bytes >= HAMMER2_ALLOC_MIN && bytes <= HAMMER2_ALLOC_MAX); 232 233 /* 234 * Calculate the starting point for our allocation search. 235 * 236 * Each freemap leaf is dedicated to a specific freemap_radix. 237 * The freemap_radix can be more fine-grained than the device buffer 238 * radix which results in inodes being grouped together in their 239 * own segment, terminal-data (16K or less) and initial indirect 240 * block being grouped together, and then full-indirect and full-data 241 * blocks (64K) being grouped together. 242 * 243 * The single most important aspect of this is the inode grouping 244 * because that is what allows 'find' and 'ls' and other filesystem 245 * topology operations to run fast. 246 */ 247 #if 0 248 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) 249 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX; 250 else if (trans->tmp_bpref) 251 bpref = trans->tmp_bpref; 252 else if (trans->tmp_ip) 253 bpref = trans->tmp_ip->chain->bref.data_off; 254 else 255 #endif 256 /* 257 * Heuristic tracking index. We would like one for each distinct 258 * bref type if possible. heur_freemap[] has room for two classes 259 * for each type. At a minimum we have to break-up our heuristic 260 * by device block sizes. 261 */ 262 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX; 263 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX); 264 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX; 265 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1; 266 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_SIZE); 267 268 iter.bpref = hmp->heur_freemap[hindex]; 269 270 /* 271 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's 272 * reserved area, the try code will iterate past it. 273 */ 274 if (iter.bpref > hmp->voldata.volu_size) 275 iter.bpref = hmp->voldata.volu_size - 1; 276 277 /* 278 * Iterate the freemap looking for free space before and after. 279 */ 280 parent = &hmp->fchain; 281 hammer2_chain_ref(parent); 282 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 283 error = EAGAIN; 284 iter.bnext = iter.bpref; 285 iter.loops = 0; 286 287 while (error == EAGAIN) { 288 error = hammer2_freemap_try_alloc(&parent, bref, radix, 289 &iter, mtid); 290 } 291 hmp->heur_freemap[hindex] = iter.bnext; 292 hammer2_chain_unlock(parent); 293 hammer2_chain_drop(parent); 294 295 KKASSERT(error == 0); 296 297 return (error); 298 } 299 300 static int 301 hammer2_freemap_try_alloc(hammer2_chain_t **parentp, 302 hammer2_blockref_t *bref, int radix, 303 hammer2_fiterate_t *iter, hammer2_tid_t mtid) 304 { 305 hammer2_dev_t *hmp = (*parentp)->hmp; 306 hammer2_off_t l0size; 307 hammer2_off_t l1size; 308 hammer2_off_t l1mask; 309 hammer2_key_t key_dummy; 310 hammer2_chain_t *chain; 311 hammer2_off_t key; 312 size_t bytes; 313 uint16_t class; 314 int error = 0; 315 int cache_index = -1; 316 317 /* 318 * Calculate the number of bytes being allocated, the number 319 * of contiguous bits of bitmap being allocated, and the bitmap 320 * mask. 321 * 322 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the 323 * mask calculation. 324 */ 325 bytes = (size_t)1 << radix; 326 class = (bref->type << 8) | hammer2_devblkradix(radix); 327 328 /* 329 * Lookup the level1 freemap chain, creating and initializing one 330 * if necessary. Intermediate levels will be created automatically 331 * when necessary by hammer2_chain_create(). 332 */ 333 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX); 334 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 335 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 336 l1mask = l1size - 1; 337 338 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask, 339 &cache_index, 340 HAMMER2_LOOKUP_ALWAYS | 341 HAMMER2_LOOKUP_MATCHIND); 342 343 if (chain == NULL) { 344 /* 345 * Create the missing leaf, be sure to initialize 346 * the auxillary freemap tracking information in 347 * the bref.check.freemap structure. 348 */ 349 #if 0 350 kprintf("freemap create L1 @ %016jx bpref %016jx\n", 351 key, iter->bpref); 352 #endif 353 error = hammer2_chain_create(parentp, &chain, 354 hmp->spmp, HAMMER2_METH_DEFAULT, 355 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 356 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 357 HAMMER2_FREEMAP_LEVELN_PSIZE, 358 mtid, 0, 0); 359 KKASSERT(error == 0); 360 if (error == 0) { 361 hammer2_chain_modify(chain, mtid, 0, 0); 362 bzero(&chain->data->bmdata[0], 363 HAMMER2_FREEMAP_LEVELN_PSIZE); 364 chain->bref.check.freemap.bigmask = (uint32_t)-1; 365 chain->bref.check.freemap.avail = l1size; 366 /* bref.methods should already be inherited */ 367 368 hammer2_freemap_init(hmp, key, chain); 369 } 370 } else if (chain->error) { 371 /* 372 * Error during lookup. 373 */ 374 kprintf("hammer2_freemap_try_alloc: %016jx: error %s\n", 375 (intmax_t)bref->data_off, 376 hammer2_error_str(chain->error)); 377 error = EIO; 378 } else if ((chain->bref.check.freemap.bigmask & 379 ((size_t)1 << radix)) == 0) { 380 /* 381 * Already flagged as not having enough space 382 */ 383 error = ENOSPC; 384 } else { 385 /* 386 * Modify existing chain to setup for adjustment. 387 */ 388 hammer2_chain_modify(chain, mtid, 0, 0); 389 } 390 391 /* 392 * Scan 2MB entries. 393 */ 394 if (error == 0) { 395 hammer2_bmap_data_t *bmap; 396 hammer2_key_t base_key; 397 int count; 398 int start; 399 int n; 400 401 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 402 start = (int)((iter->bnext - key) >> 403 HAMMER2_FREEMAP_LEVEL0_RADIX); 404 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT); 405 hammer2_chain_modify(chain, mtid, 0, 0); 406 407 error = ENOSPC; 408 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 409 int availchk; 410 411 if (start + count >= HAMMER2_FREEMAP_COUNT && 412 start - count < 0) { 413 break; 414 } 415 416 /* 417 * Calculate bmap pointer 418 * 419 * NOTE: bmap pointer is invalid if n >= FREEMAP_COUNT. 420 */ 421 n = start + count; 422 bmap = &chain->data->bmdata[n]; 423 424 if (n >= HAMMER2_FREEMAP_COUNT) { 425 availchk = 0; 426 } else if (bmap->avail) { 427 availchk = 1; 428 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX && 429 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) { 430 availchk = 1; 431 } else { 432 availchk = 0; 433 } 434 435 if (availchk && 436 (bmap->class == 0 || bmap->class == class)) { 437 base_key = key + n * l0size; 438 error = hammer2_bmap_alloc(hmp, bmap, 439 class, n, radix, 440 &base_key); 441 if (error != ENOSPC) { 442 key = base_key; 443 break; 444 } 445 } 446 447 /* 448 * Must recalculate after potentially having called 449 * hammer2_bmap_alloc() above in case chain was 450 * reallocated. 451 * 452 * NOTE: bmap pointer is invalid if n < 0. 453 */ 454 n = start - count; 455 bmap = &chain->data->bmdata[n]; 456 if (n < 0) { 457 availchk = 0; 458 } else if (bmap->avail) { 459 availchk = 1; 460 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX && 461 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) { 462 availchk = 1; 463 } else { 464 availchk = 0; 465 } 466 467 if (availchk && 468 (bmap->class == 0 || bmap->class == class)) { 469 base_key = key + n * l0size; 470 error = hammer2_bmap_alloc(hmp, bmap, 471 class, n, radix, 472 &base_key); 473 if (error != ENOSPC) { 474 key = base_key; 475 break; 476 } 477 } 478 } 479 if (error == ENOSPC) { 480 chain->bref.check.freemap.bigmask &= 481 (uint32_t)~((size_t)1 << radix); 482 } 483 /* XXX also scan down from original count */ 484 } 485 486 if (error == 0) { 487 /* 488 * Assert validity. Must be beyond the static allocator used 489 * by newfs_hammer2 (and thus also beyond the aux area), 490 * not go past the volume size, and must not be in the 491 * reserved segment area for a zone. 492 */ 493 KKASSERT(key >= hmp->voldata.allocator_beg && 494 key + bytes <= hmp->voldata.volu_size); 495 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 496 bref->data_off = key | radix; 497 #if 0 498 kprintf("alloc cp=%p %016jx %016jx using %016jx\n", 499 chain, 500 bref->key, bref->data_off, chain->bref.data_off); 501 #endif 502 } else if (error == ENOSPC) { 503 /* 504 * Return EAGAIN with next iteration in iter->bnext, or 505 * return ENOSPC if the allocation map has been exhausted. 506 */ 507 error = hammer2_freemap_iterate(parentp, &chain, iter); 508 } 509 510 /* 511 * Cleanup 512 */ 513 if (chain) { 514 hammer2_chain_unlock(chain); 515 hammer2_chain_drop(chain); 516 } 517 return (error); 518 } 519 520 /* 521 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep). 522 * 523 * If the linear iterator is mid-block we use it directly (the bitmap should 524 * already be marked allocated), otherwise we search for a block in the bitmap 525 * that fits the allocation request. 526 * 527 * A partial bitmap allocation sets the minimum bitmap granularity (16KB) 528 * to fully allocated and adjusts the linear allocator to allow the 529 * remaining space to be allocated. 530 */ 531 static 532 int 533 hammer2_bmap_alloc(hammer2_dev_t *hmp, hammer2_bmap_data_t *bmap, 534 uint16_t class, int n, int radix, hammer2_key_t *basep) 535 { 536 size_t size; 537 size_t bgsize; 538 int bmradix; 539 hammer2_bitmap_t bmmask; 540 int offset; 541 int i; 542 int j; 543 544 /* 545 * Take into account 2-bits per block when calculating bmradix. 546 */ 547 size = (size_t)1 << radix; 548 549 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) { 550 bmradix = 2; 551 /* (16K) 2 bits per allocation block */ 552 } else { 553 bmradix = (hammer2_bitmap_t)2 << 554 (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 555 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */ 556 } 557 558 /* 559 * Use the linear iterator to pack small allocations, otherwise 560 * fall-back to finding a free 16KB chunk. The linear iterator 561 * is only valid when *NOT* on a freemap chunking boundary (16KB). 562 * If it is the bitmap must be scanned. It can become invalid 563 * once we pack to the boundary. We adjust it after a bitmap 564 * allocation only for sub-16KB allocations (so the perfectly good 565 * previous value can still be used for fragments when 16KB+ 566 * allocations are made). 567 * 568 * Beware of hardware artifacts when bmradix == 64 (intermediate 569 * result can wind up being '1' instead of '0' if hardware masks 570 * bit-count & 31). 571 * 572 * NOTE: j needs to be even in the j= calculation. As an artifact 573 * of the /2 division, our bitmask has to clear bit 0. 574 * 575 * NOTE: TODO this can leave little unallocatable fragments lying 576 * around. 577 */ 578 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <= 579 HAMMER2_FREEMAP_BLOCK_SIZE && 580 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) && 581 bmap->linear < HAMMER2_SEGSIZE) { 582 KKASSERT(bmap->linear >= 0 && 583 bmap->linear + size <= HAMMER2_SEGSIZE && 584 (bmap->linear & (HAMMER2_ALLOC_MIN - 1)) == 0); 585 offset = bmap->linear; 586 i = offset / (HAMMER2_SEGSIZE / 8); 587 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30; 588 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ? 589 HAMMER2_BMAP_ALLONES : 590 ((hammer2_bitmap_t)1 << bmradix) - 1; 591 bmmask <<= j; 592 bmap->linear = offset + size; 593 } else { 594 for (i = 0; i < HAMMER2_BMAP_ELEMENTS; ++i) { 595 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ? 596 HAMMER2_BMAP_ALLONES : 597 ((hammer2_bitmap_t)1 << bmradix) - 1; 598 for (j = 0; 599 j < HAMMER2_BMAP_BITS_PER_ELEMENT; 600 j += bmradix) { 601 if ((bmap->bitmapq[i] & bmmask) == 0) 602 goto success; 603 bmmask <<= bmradix; 604 } 605 } 606 /*fragments might remain*/ 607 /*KKASSERT(bmap->avail == 0);*/ 608 return (ENOSPC); 609 success: 610 offset = i * (HAMMER2_SEGSIZE / HAMMER2_BMAP_ELEMENTS) + 611 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2)); 612 if (size & HAMMER2_FREEMAP_BLOCK_MASK) 613 bmap->linear = offset + size; 614 } 615 616 /* 8 x (64/2) -> 256 x 16K -> 4MB */ 617 KKASSERT(i >= 0 && i < HAMMER2_BMAP_ELEMENTS); 618 619 /* 620 * Optimize the buffer cache to avoid unnecessary read-before-write 621 * operations. 622 * 623 * The device block size could be larger than the allocation size 624 * so the actual bitmap test is somewhat more involved. We have 625 * to use a compatible buffer size for this operation. 626 */ 627 if ((bmap->bitmapq[i] & bmmask) == 0 && 628 hammer2_devblksize(size) != size) { 629 size_t psize = hammer2_devblksize(size); 630 hammer2_off_t pmask = (hammer2_off_t)psize - 1; 631 int pbmradix = (hammer2_bitmap_t)2 << 632 (hammer2_devblkradix(radix) - 633 HAMMER2_FREEMAP_BLOCK_RADIX); 634 hammer2_bitmap_t pbmmask; 635 int pradix = hammer2_getradix(psize); 636 637 pbmmask = (pbmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ? 638 HAMMER2_BMAP_ALLONES : 639 ((hammer2_bitmap_t)1 << pbmradix) - 1; 640 while ((pbmmask & bmmask) == 0) 641 pbmmask <<= pbmradix; 642 643 #if 0 644 kprintf("%016jx mask %016jx %016jx %016jx (%zd/%zd)\n", 645 *basep + offset, bmap->bitmapq[i], 646 pbmmask, bmmask, size, psize); 647 #endif 648 649 if ((bmap->bitmapq[i] & pbmmask) == 0) { 650 hammer2_io_newq(hmp, HAMMER2_BREF_TYPE_FREEMAP_LEAF, 651 (*basep + (offset & ~pmask)) | 652 pradix, psize); 653 } 654 } 655 656 #if 0 657 /* 658 * When initializing a new inode segment also attempt to initialize 659 * an adjacent segment. Be careful not to index beyond the array 660 * bounds. 661 * 662 * We do this to try to localize inode accesses to improve 663 * directory scan rates. XXX doesn't improve scan rates. 664 */ 665 if (size == HAMMER2_INODE_BYTES) { 666 if (n & 1) { 667 if (bmap[-1].radix == 0 && bmap[-1].avail) 668 bmap[-1].radix = radix; 669 } else { 670 if (bmap[1].radix == 0 && bmap[1].avail) 671 bmap[1].radix = radix; 672 } 673 } 674 #endif 675 /* 676 * Calculate the bitmap-granular change in bgsize for the volume 677 * header. We cannot use the fine-grained change here because 678 * the bulkfree code can't undo it. If the bitmap element is already 679 * marked allocated it has already been accounted for. 680 */ 681 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) { 682 if (bmap->bitmapq[i] & bmmask) 683 bgsize = 0; 684 else 685 bgsize = HAMMER2_FREEMAP_BLOCK_SIZE; 686 } else { 687 bgsize = size; 688 } 689 690 /* 691 * Adjust the bitmap, set the class (it might have been 0), 692 * and available bytes, update the allocation offset (*basep) 693 * from the L0 base to the actual offset. 694 * 695 * avail must reflect the bitmap-granular availability. The allocator 696 * tests will also check the linear iterator. 697 */ 698 bmap->bitmapq[i] |= bmmask; 699 bmap->class = class; 700 bmap->avail -= bgsize; 701 *basep += offset; 702 703 /* 704 * Adjust the volume header's allocator_free parameter. This 705 * parameter has to be fixed up by bulkfree which has no way to 706 * figure out sub-16K chunking, so it must be adjusted by the 707 * bitmap-granular size. 708 */ 709 if (bgsize) { 710 hammer2_voldata_lock(hmp); 711 hammer2_voldata_modify(hmp); 712 hmp->voldata.allocator_free -= bgsize; 713 hammer2_voldata_unlock(hmp); 714 } 715 716 return(0); 717 } 718 719 static 720 void 721 hammer2_freemap_init(hammer2_dev_t *hmp, hammer2_key_t key, 722 hammer2_chain_t *chain) 723 { 724 hammer2_off_t l1size; 725 hammer2_off_t lokey; 726 hammer2_off_t hikey; 727 hammer2_bmap_data_t *bmap; 728 int count; 729 730 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 731 732 /* 733 * Calculate the portion of the 2GB map that should be initialized 734 * as free. Portions below or after will be initialized as allocated. 735 * SEGMASK-align the areas so we don't have to worry about sub-scans 736 * or endianess when using memset. 737 * 738 * (1) Ensure that all statically allocated space from newfs_hammer2 739 * is marked allocated. 740 * 741 * (2) Ensure that the reserved area is marked allocated (typically 742 * the first 4MB of the 2GB area being represented). 743 * 744 * (3) Ensure that any trailing space at the end-of-volume is marked 745 * allocated. 746 * 747 * WARNING! It is possible for lokey to be larger than hikey if the 748 * entire 2GB segment is within the static allocation. 749 */ 750 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) & 751 ~HAMMER2_SEGMASK64; 752 753 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 754 HAMMER2_ZONE_SEG64) { 755 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 756 HAMMER2_ZONE_SEG64; 757 } 758 759 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 760 if (hikey > hmp->voldata.volu_size) { 761 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64; 762 } 763 764 chain->bref.check.freemap.avail = 765 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 766 bmap = &chain->data->bmdata[0]; 767 768 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 769 if (key < lokey || key >= hikey) { 770 memset(bmap->bitmapq, -1, 771 sizeof(bmap->bitmapq)); 772 bmap->avail = 0; 773 bmap->linear = HAMMER2_SEGSIZE; 774 chain->bref.check.freemap.avail -= 775 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 776 } else { 777 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 778 } 779 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 780 ++bmap; 781 } 782 } 783 784 /* 785 * The current Level 1 freemap has been exhausted, iterate to the next 786 * one, return ENOSPC if no freemaps remain. 787 * 788 * XXX this should rotate back to the beginning to handle freed-up space 789 * XXX or use intermediate entries to locate free space. TODO 790 */ 791 static int 792 hammer2_freemap_iterate(hammer2_chain_t **parentp, hammer2_chain_t **chainp, 793 hammer2_fiterate_t *iter) 794 { 795 hammer2_dev_t *hmp = (*parentp)->hmp; 796 797 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 798 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 799 if (iter->bnext >= hmp->voldata.volu_size) { 800 iter->bnext = 0; 801 if (++iter->loops == 2) 802 return (ENOSPC); 803 } 804 return(EAGAIN); 805 } 806 807 /* 808 * Adjust the bit-pattern for data in the freemap bitmap according to 809 * (how). This code is called from on-mount recovery to fixup (mark 810 * as allocated) blocks whos freemap upates might not have been committed 811 * in the last crash and is used by the bulk freemap scan to stage frees. 812 * 813 * XXX currently disabled when how == 0 (the normal real-time case). At 814 * the moment we depend on the bulk freescan to actually free blocks. It 815 * will still call this routine with a non-zero how to stage possible frees 816 * and to do the actual free. 817 */ 818 void 819 hammer2_freemap_adjust(hammer2_dev_t *hmp, hammer2_blockref_t *bref, 820 int how) 821 { 822 hammer2_off_t data_off = bref->data_off; 823 hammer2_chain_t *chain; 824 hammer2_chain_t *parent; 825 hammer2_bmap_data_t *bmap; 826 hammer2_key_t key; 827 hammer2_key_t key_dummy; 828 hammer2_off_t l0size; 829 hammer2_off_t l1size; 830 hammer2_off_t l1mask; 831 hammer2_tid_t mtid; 832 hammer2_bitmap_t *bitmap; 833 const hammer2_bitmap_t bmmask00 = 0; 834 hammer2_bitmap_t bmmask01; 835 hammer2_bitmap_t bmmask10; 836 hammer2_bitmap_t bmmask11; 837 size_t bytes; 838 uint16_t class; 839 int radix; 840 int start; 841 int count; 842 int modified = 0; 843 int cache_index = -1; 844 int error; 845 size_t bgsize = 0; 846 847 KKASSERT(how == HAMMER2_FREEMAP_DORECOVER); 848 849 mtid = hammer2_trans_sub(hmp->spmp); 850 851 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX; 852 data_off &= ~HAMMER2_OFF_MASK_RADIX; 853 KKASSERT(radix <= HAMMER2_RADIX_MAX); 854 855 bytes = (size_t)1 << radix; 856 class = (bref->type << 8) | hammer2_devblkradix(radix); 857 858 /* 859 * We can't adjust thre freemap for data allocations made by 860 * newfs_hammer2. 861 */ 862 if (data_off < hmp->voldata.allocator_beg) 863 return; 864 865 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 866 867 /* 868 * Lookup the level1 freemap chain. The chain must exist. 869 */ 870 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX); 871 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 872 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 873 l1mask = l1size - 1; 874 875 parent = &hmp->fchain; 876 hammer2_chain_ref(parent); 877 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 878 879 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask, 880 &cache_index, 881 HAMMER2_LOOKUP_ALWAYS | 882 HAMMER2_LOOKUP_MATCHIND); 883 884 /* 885 * Stop early if we are trying to free something but no leaf exists. 886 */ 887 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) { 888 kprintf("hammer2_freemap_adjust: %016jx: no chain\n", 889 (intmax_t)bref->data_off); 890 goto done; 891 } 892 if (chain->error) { 893 kprintf("hammer2_freemap_adjust: %016jx: error %s\n", 894 (intmax_t)bref->data_off, 895 hammer2_error_str(chain->error)); 896 hammer2_chain_unlock(chain); 897 hammer2_chain_drop(chain); 898 chain = NULL; 899 goto done; 900 } 901 902 /* 903 * Create any missing leaf(s) if we are doing a recovery (marking 904 * the block(s) as being allocated instead of being freed). Be sure 905 * to initialize the auxillary freemap tracking info in the 906 * bref.check.freemap structure. 907 */ 908 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) { 909 error = hammer2_chain_create(&parent, &chain, 910 hmp->spmp, HAMMER2_METH_DEFAULT, 911 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 912 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 913 HAMMER2_FREEMAP_LEVELN_PSIZE, 914 mtid, 0, 0); 915 916 if (hammer2_debug & 0x0040) { 917 kprintf("fixup create chain %p %016jx:%d\n", 918 chain, chain->bref.key, chain->bref.keybits); 919 } 920 921 if (error == 0) { 922 hammer2_chain_modify(chain, mtid, 0, 0); 923 bzero(&chain->data->bmdata[0], 924 HAMMER2_FREEMAP_LEVELN_PSIZE); 925 chain->bref.check.freemap.bigmask = (uint32_t)-1; 926 chain->bref.check.freemap.avail = l1size; 927 /* bref.methods should already be inherited */ 928 929 hammer2_freemap_init(hmp, key, chain); 930 } 931 /* XXX handle error */ 932 } 933 934 #if FREEMAP_DEBUG 935 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n", 936 chain->bref.type, chain->bref.key, 937 chain->bref.keybits, chain->bref.data_off); 938 #endif 939 940 /* 941 * Calculate the bitmask (runs in 2-bit pairs). 942 */ 943 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2; 944 bmmask01 = (hammer2_bitmap_t)1 << start; 945 bmmask10 = (hammer2_bitmap_t)2 << start; 946 bmmask11 = (hammer2_bitmap_t)3 << start; 947 948 /* 949 * Fixup the bitmap. Partial blocks cannot be fully freed unless 950 * a bulk scan is able to roll them up. 951 */ 952 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) { 953 count = 1; 954 if (how == HAMMER2_FREEMAP_DOREALFREE) 955 how = HAMMER2_FREEMAP_DOMAYFREE; 956 } else { 957 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 958 } 959 960 /* 961 * [re]load the bmap and bitmap pointers. Each bmap entry covers 962 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB. 963 * 964 * Be sure to reset the linear iterator to ensure that the adjustment 965 * is not ignored. 966 */ 967 again: 968 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) & 969 (HAMMER2_FREEMAP_COUNT - 1)]; 970 bitmap = &bmap->bitmapq[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7]; 971 972 if (modified) 973 bmap->linear = 0; 974 975 while (count) { 976 KKASSERT(bmmask11); 977 if (how == HAMMER2_FREEMAP_DORECOVER) { 978 /* 979 * Recovery request, mark as allocated. 980 */ 981 if ((*bitmap & bmmask11) != bmmask11) { 982 if (modified == 0) { 983 hammer2_chain_modify(chain, mtid, 0, 0); 984 modified = 1; 985 goto again; 986 } 987 if ((*bitmap & bmmask11) == bmmask00) { 988 bmap->avail -= 989 HAMMER2_FREEMAP_BLOCK_SIZE; 990 bgsize += HAMMER2_FREEMAP_BLOCK_SIZE; 991 } 992 if (bmap->class == 0) 993 bmap->class = class; 994 *bitmap |= bmmask11; 995 if (hammer2_debug & 0x0040) { 996 kprintf("hammer2_freemap_recover: " 997 "fixup type=%02x " 998 "block=%016jx/%zd\n", 999 bref->type, data_off, bytes); 1000 } 1001 } else { 1002 /* 1003 kprintf("hammer2_freemap_recover: good " 1004 "type=%02x block=%016jx/%zd\n", 1005 bref->type, data_off, bytes); 1006 */ 1007 } 1008 } 1009 #if 0 1010 /* 1011 * XXX this stuff doesn't work, avail is miscalculated and 1012 * code 10 means something else now. 1013 */ 1014 else if ((*bitmap & bmmask11) == bmmask11) { 1015 /* 1016 * Mayfree/Realfree request and bitmap is currently 1017 * marked as being fully allocated. 1018 */ 1019 if (!modified) { 1020 hammer2_chain_modify(chain, 0); 1021 modified = 1; 1022 goto again; 1023 } 1024 if (how == HAMMER2_FREEMAP_DOREALFREE) 1025 *bitmap &= ~bmmask11; 1026 else 1027 *bitmap = (*bitmap & ~bmmask11) | bmmask10; 1028 } else if ((*bitmap & bmmask11) == bmmask10) { 1029 /* 1030 * Mayfree/Realfree request and bitmap is currently 1031 * marked as being possibly freeable. 1032 */ 1033 if (how == HAMMER2_FREEMAP_DOREALFREE) { 1034 if (!modified) { 1035 hammer2_chain_modify(chain, 0); 1036 modified = 1; 1037 goto again; 1038 } 1039 *bitmap &= ~bmmask11; 1040 } 1041 } else { 1042 /* 1043 * 01 - Not implemented, currently illegal state 1044 * 00 - Not allocated at all, illegal free. 1045 */ 1046 panic("hammer2_freemap_adjust: " 1047 "Illegal state %08x(%08x)", 1048 *bitmap, *bitmap & bmmask11); 1049 } 1050 #endif 1051 --count; 1052 bmmask01 <<= 2; 1053 bmmask10 <<= 2; 1054 bmmask11 <<= 2; 1055 } 1056 #if HAMMER2_BMAP_ELEMENTS != 8 1057 #error "hammer2_freemap.c: HAMMER2_BMAP_ELEMENTS expected to be 8" 1058 #endif 1059 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) { 1060 bmap->avail += 1 << radix; 1061 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE); 1062 if (bmap->avail == HAMMER2_SEGSIZE && 1063 bmap->bitmapq[0] == 0 && 1064 bmap->bitmapq[1] == 0 && 1065 bmap->bitmapq[2] == 0 && 1066 bmap->bitmapq[3] == 0 && 1067 bmap->bitmapq[4] == 0 && 1068 bmap->bitmapq[5] == 0 && 1069 bmap->bitmapq[6] == 0 && 1070 bmap->bitmapq[7] == 0) { 1071 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX); 1072 kprintf("Freeseg %016jx\n", (intmax_t)key); 1073 bmap->class = 0; 1074 } 1075 } 1076 1077 /* 1078 * chain->bref.check.freemap.bigmask (XXX) 1079 * 1080 * Setting bigmask is a hint to the allocation code that there might 1081 * be something allocatable. We also set this in recovery... it 1082 * doesn't hurt and we might want to use the hint for other validation 1083 * operations later on. 1084 */ 1085 if (modified) 1086 chain->bref.check.freemap.bigmask |= 1 << radix; 1087 1088 hammer2_chain_unlock(chain); 1089 hammer2_chain_drop(chain); 1090 done: 1091 hammer2_chain_unlock(parent); 1092 hammer2_chain_drop(parent); 1093 1094 if (bgsize) { 1095 hammer2_voldata_lock(hmp); 1096 hammer2_voldata_modify(hmp); 1097 hmp->voldata.allocator_free -= bgsize; 1098 hammer2_voldata_unlock(hmp); 1099 } 1100 } 1101 1102 /* 1103 * Validate the freemap, in three stages. 1104 * 1105 * stage-1 ALLOCATED -> POSSIBLY FREE 1106 * POSSIBLY FREE -> POSSIBLY FREE (type corrected) 1107 * 1108 * This transitions bitmap entries from ALLOCATED to POSSIBLY FREE. 1109 * The POSSIBLY FREE state does not mean that a block is actually free 1110 * and may be transitioned back to ALLOCATED in stage-2. 1111 * 1112 * This is typically done during normal filesystem operations when 1113 * something is deleted or a block is replaced. 1114 * 1115 * This is done by bulkfree in-bulk after a memory-bounded meta-data 1116 * scan to try to determine what might be freeable. 1117 * 1118 * This can be done unconditionally through a freemap scan when the 1119 * intention is to brute-force recover the proper state of the freemap. 1120 * 1121 * stage-2 POSSIBLY FREE -> ALLOCATED (scan metadata topology) 1122 * 1123 * This is done by bulkfree during a meta-data scan to ensure that 1124 * all blocks still actually allocated by the filesystem are marked 1125 * as such. 1126 * 1127 * NOTE! Live filesystem transitions to POSSIBLY FREE can occur while 1128 * the bulkfree stage-2 and stage-3 is running. The live filesystem 1129 * will use the alternative POSSIBLY FREE type (2) to prevent 1130 * stage-3 from improperly transitioning unvetted possibly-free 1131 * blocks to FREE. 1132 * 1133 * stage-3 POSSIBLY FREE (type 1) -> FREE (scan freemap) 1134 * 1135 * This is done by bulkfree to finalize POSSIBLY FREE states. 1136 * 1137 */ 1138