1 /* 2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/fcntl.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/namei.h> 42 #include <sys/mount.h> 43 #include <sys/vnode.h> 44 #include <sys/mountctl.h> 45 46 #include "hammer2.h" 47 48 #define FREEMAP_DEBUG 0 49 50 struct hammer2_fiterate { 51 hammer2_off_t bpref; 52 hammer2_off_t bnext; 53 int loops; 54 }; 55 56 typedef struct hammer2_fiterate hammer2_fiterate_t; 57 58 static int hammer2_freemap_try_alloc(hammer2_chain_t **parentp, 59 hammer2_blockref_t *bref, int radix, 60 hammer2_fiterate_t *iter, hammer2_tid_t mtid); 61 static void hammer2_freemap_init(hammer2_dev_t *hmp, 62 hammer2_key_t key, hammer2_chain_t *chain); 63 static int hammer2_bmap_alloc(hammer2_dev_t *hmp, 64 hammer2_bmap_data_t *bmap, uint16_t class, 65 int n, int radix, hammer2_key_t *basep); 66 static int hammer2_freemap_iterate(hammer2_chain_t **parentp, 67 hammer2_chain_t **chainp, 68 hammer2_fiterate_t *iter); 69 70 static __inline 71 int 72 hammer2_freemapradix(int radix) 73 { 74 return(radix); 75 } 76 77 /* 78 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF 79 * bref. Return a combined media offset and physical size radix. Freemap 80 * chains use fixed storage offsets in the 4MB reserved area at the 81 * beginning of each 2GB zone 82 * 83 * Rotate between four possibilities. Theoretically this means we have three 84 * good freemaps in case of a crash which we can use as a base for the fixup 85 * scan at mount-time. 86 */ 87 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1)) 88 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix)) 89 90 static 91 int 92 hammer2_freemap_reserve(hammer2_chain_t *chain, int radix) 93 { 94 hammer2_blockref_t *bref = &chain->bref; 95 hammer2_off_t off; 96 int index; 97 int index_inc; 98 size_t bytes; 99 100 /* 101 * Physical allocation size. 102 */ 103 bytes = (size_t)1 << radix; 104 105 /* 106 * Calculate block selection index 0..7 of current block. If this 107 * is the first allocation of the block (verses a modification of an 108 * existing block), we use index 0, otherwise we use the next rotating 109 * index. 110 */ 111 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) { 112 index = 0; 113 } else { 114 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX & 115 (((hammer2_off_t)1 << 116 HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 117 off = off / HAMMER2_PBUFSIZE; 118 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 && 119 off < HAMMER2_ZONE_FREEMAP_END); 120 index = (int)(off - HAMMER2_ZONE_FREEMAP_00) / 121 HAMMER2_ZONE_FREEMAP_INC; 122 KKASSERT(index >= 0 && index < HAMMER2_NFREEMAPS); 123 if (++index == HAMMER2_NFREEMAPS) 124 index = 0; 125 } 126 127 /* 128 * Calculate the block offset of the reserved block. This will 129 * point into the 4MB reserved area at the base of the appropriate 130 * 2GB zone, once added to the FREEMAP_x selection above. 131 */ 132 index_inc = index * HAMMER2_ZONE_FREEMAP_INC; 133 134 switch(bref->keybits) { 135 /* case HAMMER2_FREEMAP_LEVEL6_RADIX: not applicable */ 136 case HAMMER2_FREEMAP_LEVEL5_RADIX: /* 2EB */ 137 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 138 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 139 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL5_RADIX) + 140 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 141 HAMMER2_ZONEFM_LEVEL5) * HAMMER2_PBUFSIZE; 142 break; 143 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */ 144 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 145 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 146 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) + 147 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 148 HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE; 149 break; 150 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */ 151 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 152 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 153 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) + 154 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 155 HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE; 156 break; 157 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */ 158 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 159 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 160 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) + 161 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 162 HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE; 163 break; 164 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */ 165 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 166 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 167 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 168 (index_inc + HAMMER2_ZONE_FREEMAP_00 + 169 HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE; 170 break; 171 default: 172 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits); 173 /* NOT REACHED */ 174 off = (hammer2_off_t)-1; 175 break; 176 } 177 bref->data_off = off | radix; 178 #if FREEMAP_DEBUG 179 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n", 180 bref->type, bref->key, bref->keybits, bref->data_off); 181 #endif 182 return (0); 183 } 184 185 /* 186 * Normal freemap allocator 187 * 188 * Use available hints to allocate space using the freemap. Create missing 189 * freemap infrastructure on-the-fly as needed (including marking initial 190 * allocations using the iterator as allocated, instantiating new 2GB zones, 191 * and dealing with the end-of-media edge case). 192 * 193 * ip and bpref are only used as a heuristic to determine locality of 194 * reference. bref->key may also be used heuristically. 195 */ 196 int 197 hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes) 198 { 199 hammer2_dev_t *hmp = chain->hmp; 200 hammer2_blockref_t *bref = &chain->bref; 201 hammer2_chain_t *parent; 202 hammer2_tid_t mtid; 203 int radix; 204 int error; 205 unsigned int hindex; 206 hammer2_fiterate_t iter; 207 208 mtid = hammer2_trans_sub(hmp->spmp); 209 210 /* 211 * Validate the allocation size. It must be a power of 2. 212 * 213 * For now require that the caller be aware of the minimum 214 * allocation (1K). 215 */ 216 radix = hammer2_getradix(bytes); 217 KKASSERT((size_t)1 << radix == bytes); 218 219 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 220 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 221 /* 222 * Freemap blocks themselves are assigned from the reserve 223 * area, not allocated from the freemap. 224 */ 225 error = hammer2_freemap_reserve(chain, radix); 226 return error; 227 } 228 229 KKASSERT(bytes >= HAMMER2_ALLOC_MIN && bytes <= HAMMER2_ALLOC_MAX); 230 231 /* 232 * Calculate the starting point for our allocation search. 233 * 234 * Each freemap leaf is dedicated to a specific freemap_radix. 235 * The freemap_radix can be more fine-grained than the device buffer 236 * radix which results in inodes being grouped together in their 237 * own segment, terminal-data (16K or less) and initial indirect 238 * block being grouped together, and then full-indirect and full-data 239 * blocks (64K) being grouped together. 240 * 241 * The single most important aspect of this is the inode grouping 242 * because that is what allows 'find' and 'ls' and other filesystem 243 * topology operations to run fast. 244 */ 245 #if 0 246 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) 247 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX; 248 else if (trans->tmp_bpref) 249 bpref = trans->tmp_bpref; 250 else if (trans->tmp_ip) 251 bpref = trans->tmp_ip->chain->bref.data_off; 252 else 253 #endif 254 /* 255 * Heuristic tracking index. We would like one for each distinct 256 * bref type if possible. heur_freemap[] has room for two classes 257 * for each type. At a minimum we have to break-up our heuristic 258 * by device block sizes. 259 */ 260 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX; 261 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX); 262 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX; 263 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1; 264 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR); 265 266 iter.bpref = hmp->heur_freemap[hindex]; 267 268 /* 269 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's 270 * reserved area, the try code will iterate past it. 271 */ 272 if (iter.bpref > hmp->voldata.volu_size) 273 iter.bpref = hmp->voldata.volu_size - 1; 274 275 /* 276 * Iterate the freemap looking for free space before and after. 277 */ 278 parent = &hmp->fchain; 279 hammer2_chain_ref(parent); 280 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 281 error = EAGAIN; 282 iter.bnext = iter.bpref; 283 iter.loops = 0; 284 285 while (error == EAGAIN) { 286 error = hammer2_freemap_try_alloc(&parent, bref, radix, 287 &iter, mtid); 288 } 289 hmp->heur_freemap[hindex] = iter.bnext; 290 hammer2_chain_unlock(parent); 291 hammer2_chain_drop(parent); 292 293 return (error); 294 } 295 296 static int 297 hammer2_freemap_try_alloc(hammer2_chain_t **parentp, 298 hammer2_blockref_t *bref, int radix, 299 hammer2_fiterate_t *iter, hammer2_tid_t mtid) 300 { 301 hammer2_dev_t *hmp = (*parentp)->hmp; 302 hammer2_off_t l0size; 303 hammer2_off_t l1size; 304 hammer2_off_t l1mask; 305 hammer2_key_t key_dummy; 306 hammer2_chain_t *chain; 307 hammer2_off_t key; 308 size_t bytes; 309 uint16_t class; 310 int error = 0; 311 int cache_index = -1; 312 313 /* 314 * Calculate the number of bytes being allocated, the number 315 * of contiguous bits of bitmap being allocated, and the bitmap 316 * mask. 317 * 318 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the 319 * mask calculation. 320 */ 321 bytes = (size_t)1 << radix; 322 class = (bref->type << 8) | hammer2_devblkradix(radix); 323 324 /* 325 * Lookup the level1 freemap chain, creating and initializing one 326 * if necessary. Intermediate levels will be created automatically 327 * when necessary by hammer2_chain_create(). 328 */ 329 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX); 330 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 331 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 332 l1mask = l1size - 1; 333 334 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask, 335 &cache_index, 336 HAMMER2_LOOKUP_ALWAYS | 337 HAMMER2_LOOKUP_MATCHIND); 338 339 if (chain == NULL) { 340 /* 341 * Create the missing leaf, be sure to initialize 342 * the auxillary freemap tracking information in 343 * the bref.check.freemap structure. 344 */ 345 #if 0 346 kprintf("freemap create L1 @ %016jx bpref %016jx\n", 347 key, iter->bpref); 348 #endif 349 error = hammer2_chain_create(parentp, &chain, hmp->spmp, 350 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 351 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 352 HAMMER2_FREEMAP_LEVELN_PSIZE, 353 mtid, 0); 354 KKASSERT(error == 0); 355 if (error == 0) { 356 hammer2_chain_modify(chain, mtid, 0); 357 bzero(&chain->data->bmdata[0], 358 HAMMER2_FREEMAP_LEVELN_PSIZE); 359 chain->bref.check.freemap.bigmask = (uint32_t)-1; 360 chain->bref.check.freemap.avail = l1size; 361 /* bref.methods should already be inherited */ 362 363 hammer2_freemap_init(hmp, key, chain); 364 } 365 } else if (chain->error) { 366 /* 367 * Error during lookup. 368 */ 369 kprintf("hammer2_freemap_try_alloc: %016jx: error %s\n", 370 (intmax_t)bref->data_off, 371 hammer2_error_str(chain->error)); 372 error = EIO; 373 } else if ((chain->bref.check.freemap.bigmask & 374 ((size_t)1 << radix)) == 0) { 375 /* 376 * Already flagged as not having enough space 377 */ 378 error = ENOSPC; 379 } else { 380 /* 381 * Modify existing chain to setup for adjustment. 382 */ 383 hammer2_chain_modify(chain, mtid, 0); 384 } 385 386 /* 387 * Scan 2MB entries. 388 */ 389 if (error == 0) { 390 hammer2_bmap_data_t *bmap; 391 hammer2_key_t base_key; 392 int count; 393 int start; 394 int n; 395 396 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 397 start = (int)((iter->bnext - key) >> 398 HAMMER2_FREEMAP_LEVEL0_RADIX); 399 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT); 400 hammer2_chain_modify(chain, mtid, 0); 401 402 error = ENOSPC; 403 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 404 int availchk; 405 406 if (start + count >= HAMMER2_FREEMAP_COUNT && 407 start - count < 0) { 408 break; 409 } 410 411 /* 412 * Calculate bmap pointer 413 * 414 * NOTE: bmap pointer is invalid if n >= FREEMAP_COUNT. 415 */ 416 n = start + count; 417 bmap = &chain->data->bmdata[n]; 418 419 if (n >= HAMMER2_FREEMAP_COUNT) { 420 availchk = 0; 421 } else if (bmap->avail) { 422 availchk = 1; 423 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX && 424 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) { 425 availchk = 1; 426 } else { 427 availchk = 0; 428 } 429 430 if (availchk && 431 (bmap->class == 0 || bmap->class == class)) { 432 base_key = key + n * l0size; 433 error = hammer2_bmap_alloc(hmp, bmap, 434 class, n, radix, 435 &base_key); 436 if (error != ENOSPC) { 437 key = base_key; 438 break; 439 } 440 } 441 442 /* 443 * Must recalculate after potentially having called 444 * hammer2_bmap_alloc() above in case chain was 445 * reallocated. 446 * 447 * NOTE: bmap pointer is invalid if n < 0. 448 */ 449 n = start - count; 450 bmap = &chain->data->bmdata[n]; 451 if (n < 0) { 452 availchk = 0; 453 } else if (bmap->avail) { 454 availchk = 1; 455 } else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX && 456 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) { 457 availchk = 1; 458 } else { 459 availchk = 0; 460 } 461 462 if (availchk && 463 (bmap->class == 0 || bmap->class == class)) { 464 base_key = key + n * l0size; 465 error = hammer2_bmap_alloc(hmp, bmap, 466 class, n, radix, 467 &base_key); 468 if (error != ENOSPC) { 469 key = base_key; 470 break; 471 } 472 } 473 } 474 if (error == ENOSPC) { 475 chain->bref.check.freemap.bigmask &= 476 (uint32_t)~((size_t)1 << radix); 477 } 478 /* XXX also scan down from original count */ 479 } 480 481 if (error == 0) { 482 /* 483 * Assert validity. Must be beyond the static allocator used 484 * by newfs_hammer2 (and thus also beyond the aux area), 485 * not go past the volume size, and must not be in the 486 * reserved segment area for a zone. 487 */ 488 KKASSERT(key >= hmp->voldata.allocator_beg && 489 key + bytes <= hmp->voldata.volu_size); 490 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 491 bref->data_off = key | radix; 492 493 #if 0 494 kprintf("alloc cp=%p %016jx %016jx using %016jx\n", 495 chain, 496 bref->key, bref->data_off, chain->bref.data_off); 497 #endif 498 } else if (error == ENOSPC) { 499 /* 500 * Return EAGAIN with next iteration in iter->bnext, or 501 * return ENOSPC if the allocation map has been exhausted. 502 */ 503 error = hammer2_freemap_iterate(parentp, &chain, iter); 504 } 505 506 /* 507 * Cleanup 508 */ 509 if (chain) { 510 hammer2_chain_unlock(chain); 511 hammer2_chain_drop(chain); 512 } 513 return (error); 514 } 515 516 /* 517 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep). 518 * 519 * If the linear iterator is mid-block we use it directly (the bitmap should 520 * already be marked allocated), otherwise we search for a block in the bitmap 521 * that fits the allocation request. 522 * 523 * A partial bitmap allocation sets the minimum bitmap granularity (16KB) 524 * to fully allocated and adjusts the linear allocator to allow the 525 * remaining space to be allocated. 526 */ 527 static 528 int 529 hammer2_bmap_alloc(hammer2_dev_t *hmp, hammer2_bmap_data_t *bmap, 530 uint16_t class, int n, int radix, hammer2_key_t *basep) 531 { 532 hammer2_io_t *dio; 533 size_t size; 534 size_t bgsize; 535 int bmradix; 536 hammer2_bitmap_t bmmask; 537 int offset; 538 int error; 539 int i; 540 int j; 541 542 /* 543 * Take into account 2-bits per block when calculating bmradix. 544 */ 545 size = (size_t)1 << radix; 546 547 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) { 548 bmradix = 2; 549 /* (16K) 2 bits per allocation block */ 550 } else { 551 bmradix = (hammer2_bitmap_t)2 << 552 (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 553 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */ 554 } 555 556 /* 557 * Use the linear iterator to pack small allocations, otherwise 558 * fall-back to finding a free 16KB chunk. The linear iterator 559 * is only valid when *NOT* on a freemap chunking boundary (16KB). 560 * If it is the bitmap must be scanned. It can become invalid 561 * once we pack to the boundary. We adjust it after a bitmap 562 * allocation only for sub-16KB allocations (so the perfectly good 563 * previous value can still be used for fragments when 16KB+ 564 * allocations are made). 565 * 566 * Beware of hardware artifacts when bmradix == 64 (intermediate 567 * result can wind up being '1' instead of '0' if hardware masks 568 * bit-count & 31). 569 * 570 * NOTE: j needs to be even in the j= calculation. As an artifact 571 * of the /2 division, our bitmask has to clear bit 0. 572 * 573 * NOTE: TODO this can leave little unallocatable fragments lying 574 * around. 575 */ 576 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <= 577 HAMMER2_FREEMAP_BLOCK_SIZE && 578 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) && 579 bmap->linear < HAMMER2_SEGSIZE) { 580 KKASSERT(bmap->linear >= 0 && 581 bmap->linear + size <= HAMMER2_SEGSIZE && 582 (bmap->linear & (HAMMER2_ALLOC_MIN - 1)) == 0); 583 offset = bmap->linear; 584 i = offset / (HAMMER2_SEGSIZE / 8); 585 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30; 586 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ? 587 HAMMER2_BMAP_ALLONES : 588 ((hammer2_bitmap_t)1 << bmradix) - 1; 589 bmmask <<= j; 590 bmap->linear = offset + size; 591 } else { 592 for (i = 0; i < HAMMER2_BMAP_ELEMENTS; ++i) { 593 bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ? 594 HAMMER2_BMAP_ALLONES : 595 ((hammer2_bitmap_t)1 << bmradix) - 1; 596 for (j = 0; 597 j < HAMMER2_BMAP_BITS_PER_ELEMENT; 598 j += bmradix) { 599 if ((bmap->bitmapq[i] & bmmask) == 0) 600 goto success; 601 bmmask <<= bmradix; 602 } 603 } 604 /*fragments might remain*/ 605 /*KKASSERT(bmap->avail == 0);*/ 606 return (ENOSPC); 607 success: 608 offset = i * (HAMMER2_SEGSIZE / HAMMER2_BMAP_ELEMENTS) + 609 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2)); 610 if (size & HAMMER2_FREEMAP_BLOCK_MASK) 611 bmap->linear = offset + size; 612 } 613 614 /* 8 x (64/2) -> 256 x 16K -> 4MB */ 615 KKASSERT(i >= 0 && i < HAMMER2_BMAP_ELEMENTS); 616 617 /* 618 * Optimize the buffer cache to avoid unnecessary read-before-write 619 * operations. 620 * 621 * The device block size could be larger than the allocation size 622 * so the actual bitmap test is somewhat more involved. We have 623 * to use a compatible buffer size for this operation. 624 */ 625 if ((bmap->bitmapq[i] & bmmask) == 0 && 626 hammer2_devblksize(size) != size) { 627 size_t psize = hammer2_devblksize(size); 628 hammer2_off_t pmask = (hammer2_off_t)psize - 1; 629 int pbmradix = (hammer2_bitmap_t)2 << 630 (hammer2_devblkradix(radix) - 631 HAMMER2_FREEMAP_BLOCK_RADIX); 632 hammer2_bitmap_t pbmmask; 633 int pradix = hammer2_getradix(psize); 634 635 pbmmask = (pbmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ? 636 HAMMER2_BMAP_ALLONES : 637 ((hammer2_bitmap_t)1 << pbmradix) - 1; 638 while ((pbmmask & bmmask) == 0) 639 pbmmask <<= pbmradix; 640 641 #if 0 642 kprintf("%016jx mask %016jx %016jx %016jx (%zd/%zd)\n", 643 *basep + offset, bmap->bitmapq[i], 644 pbmmask, bmmask, size, psize); 645 #endif 646 647 if ((bmap->bitmapq[i] & pbmmask) == 0) { 648 error = hammer2_io_newq(hmp, 649 (*basep + (offset & ~pmask)) | 650 pradix, 651 psize, &dio); 652 hammer2_io_bqrelse(&dio); 653 } 654 } 655 656 #if 0 657 /* 658 * When initializing a new inode segment also attempt to initialize 659 * an adjacent segment. Be careful not to index beyond the array 660 * bounds. 661 * 662 * We do this to try to localize inode accesses to improve 663 * directory scan rates. XXX doesn't improve scan rates. 664 */ 665 if (size == HAMMER2_INODE_BYTES) { 666 if (n & 1) { 667 if (bmap[-1].radix == 0 && bmap[-1].avail) 668 bmap[-1].radix = radix; 669 } else { 670 if (bmap[1].radix == 0 && bmap[1].avail) 671 bmap[1].radix = radix; 672 } 673 } 674 #endif 675 /* 676 * Calculate the bitmap-granular change in bgsize for the volume 677 * header. We cannot use the fine-grained change here because 678 * the bulkfree code can't undo it. If the bitmap element is already 679 * marked allocated it has already been accounted for. 680 */ 681 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) { 682 if (bmap->bitmapq[i] & bmmask) 683 bgsize = 0; 684 else 685 bgsize = HAMMER2_FREEMAP_BLOCK_SIZE; 686 } else { 687 bgsize = size; 688 } 689 690 /* 691 * Adjust the bitmap, set the class (it might have been 0), 692 * and available bytes, update the allocation offset (*basep) 693 * from the L0 base to the actual offset. 694 * 695 * avail must reflect the bitmap-granular availability. The allocator 696 * tests will also check the linear iterator. 697 */ 698 bmap->bitmapq[i] |= bmmask; 699 bmap->class = class; 700 bmap->avail -= bgsize; 701 *basep += offset; 702 703 /* 704 * Adjust the volume header's allocator_free parameter. This 705 * parameter has to be fixed up by bulkfree which has no way to 706 * figure out sub-16K chunking, so it must be adjusted by the 707 * bitmap-granular size. 708 */ 709 if (bgsize) { 710 hammer2_voldata_lock(hmp); 711 hammer2_voldata_modify(hmp); 712 hmp->voldata.allocator_free -= bgsize; 713 hammer2_voldata_unlock(hmp); 714 } 715 716 return(0); 717 } 718 719 static 720 void 721 hammer2_freemap_init(hammer2_dev_t *hmp, hammer2_key_t key, 722 hammer2_chain_t *chain) 723 { 724 hammer2_off_t l1size; 725 hammer2_off_t lokey; 726 hammer2_off_t hikey; 727 hammer2_bmap_data_t *bmap; 728 int count; 729 730 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 731 732 /* 733 * Calculate the portion of the 2GB map that should be initialized 734 * as free. Portions below or after will be initialized as allocated. 735 * SEGMASK-align the areas so we don't have to worry about sub-scans 736 * or endianess when using memset. 737 * 738 * (1) Ensure that all statically allocated space from newfs_hammer2 739 * is marked allocated. 740 * 741 * (2) Ensure that the reserved area is marked allocated (typically 742 * the first 4MB of the 2GB area being represented). 743 * 744 * (3) Ensure that any trailing space at the end-of-volume is marked 745 * allocated. 746 * 747 * WARNING! It is possible for lokey to be larger than hikey if the 748 * entire 2GB segment is within the static allocation. 749 */ 750 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) & 751 ~HAMMER2_SEGMASK64; 752 753 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 754 HAMMER2_ZONE_SEG64) { 755 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 756 HAMMER2_ZONE_SEG64; 757 } 758 759 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 760 if (hikey > hmp->voldata.volu_size) { 761 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64; 762 } 763 764 chain->bref.check.freemap.avail = 765 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 766 bmap = &chain->data->bmdata[0]; 767 768 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 769 if (key < lokey || key >= hikey) { 770 memset(bmap->bitmapq, -1, 771 sizeof(bmap->bitmapq)); 772 bmap->avail = 0; 773 bmap->linear = HAMMER2_SEGSIZE; 774 chain->bref.check.freemap.avail -= 775 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 776 } else { 777 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 778 } 779 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 780 ++bmap; 781 } 782 } 783 784 /* 785 * The current Level 1 freemap has been exhausted, iterate to the next 786 * one, return ENOSPC if no freemaps remain. 787 * 788 * XXX this should rotate back to the beginning to handle freed-up space 789 * XXX or use intermediate entries to locate free space. TODO 790 */ 791 static int 792 hammer2_freemap_iterate(hammer2_chain_t **parentp, hammer2_chain_t **chainp, 793 hammer2_fiterate_t *iter) 794 { 795 hammer2_dev_t *hmp = (*parentp)->hmp; 796 797 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 798 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 799 if (iter->bnext >= hmp->voldata.volu_size) { 800 iter->bnext = 0; 801 if (++iter->loops == 2) 802 return (ENOSPC); 803 } 804 return(EAGAIN); 805 } 806 807 /* 808 * Adjust the bit-pattern for data in the freemap bitmap according to 809 * (how). This code is called from on-mount recovery to fixup (mark 810 * as allocated) blocks whos freemap upates might not have been committed 811 * in the last crash and is used by the bulk freemap scan to stage frees. 812 * 813 * XXX currently disabled when how == 0 (the normal real-time case). At 814 * the moment we depend on the bulk freescan to actually free blocks. It 815 * will still call this routine with a non-zero how to stage possible frees 816 * and to do the actual free. 817 */ 818 void 819 hammer2_freemap_adjust(hammer2_dev_t *hmp, hammer2_blockref_t *bref, 820 int how) 821 { 822 hammer2_off_t data_off = bref->data_off; 823 hammer2_chain_t *chain; 824 hammer2_chain_t *parent; 825 hammer2_bmap_data_t *bmap; 826 hammer2_key_t key; 827 hammer2_key_t key_dummy; 828 hammer2_off_t l0size; 829 hammer2_off_t l1size; 830 hammer2_off_t l1mask; 831 hammer2_tid_t mtid; 832 hammer2_bitmap_t *bitmap; 833 const hammer2_bitmap_t bmmask00 = 0; 834 hammer2_bitmap_t bmmask01; 835 hammer2_bitmap_t bmmask10; 836 hammer2_bitmap_t bmmask11; 837 size_t bytes; 838 uint16_t class; 839 int radix; 840 int start; 841 int count; 842 int modified = 0; 843 int cache_index = -1; 844 int error; 845 846 KKASSERT(how == HAMMER2_FREEMAP_DORECOVER); 847 848 mtid = hammer2_trans_sub(hmp->spmp); 849 850 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX; 851 data_off &= ~HAMMER2_OFF_MASK_RADIX; 852 KKASSERT(radix <= HAMMER2_RADIX_MAX); 853 854 bytes = (size_t)1 << radix; 855 class = (bref->type << 8) | hammer2_devblkradix(radix); 856 857 /* 858 * We can't adjust thre freemap for data allocations made by 859 * newfs_hammer2. 860 */ 861 if (data_off < hmp->voldata.allocator_beg) 862 return; 863 864 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 865 866 /* 867 * Lookup the level1 freemap chain. The chain must exist. 868 */ 869 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX); 870 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 871 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 872 l1mask = l1size - 1; 873 874 parent = &hmp->fchain; 875 hammer2_chain_ref(parent); 876 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 877 878 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask, 879 &cache_index, 880 HAMMER2_LOOKUP_ALWAYS | 881 HAMMER2_LOOKUP_MATCHIND); 882 883 /* 884 * Stop early if we are trying to free something but no leaf exists. 885 */ 886 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) { 887 kprintf("hammer2_freemap_adjust: %016jx: no chain\n", 888 (intmax_t)bref->data_off); 889 goto done; 890 } 891 if (chain->error) { 892 kprintf("hammer2_freemap_adjust: %016jx: error %s\n", 893 (intmax_t)bref->data_off, 894 hammer2_error_str(chain->error)); 895 hammer2_chain_unlock(chain); 896 hammer2_chain_drop(chain); 897 chain = NULL; 898 goto done; 899 } 900 901 /* 902 * Create any missing leaf(s) if we are doing a recovery (marking 903 * the block(s) as being allocated instead of being freed). Be sure 904 * to initialize the auxillary freemap tracking info in the 905 * bref.check.freemap structure. 906 */ 907 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) { 908 error = hammer2_chain_create(&parent, &chain, hmp->spmp, 909 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 910 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 911 HAMMER2_FREEMAP_LEVELN_PSIZE, 912 mtid, 0); 913 914 if (hammer2_debug & 0x0040) { 915 kprintf("fixup create chain %p %016jx:%d\n", 916 chain, chain->bref.key, chain->bref.keybits); 917 } 918 919 if (error == 0) { 920 hammer2_chain_modify(chain, mtid, 0); 921 bzero(&chain->data->bmdata[0], 922 HAMMER2_FREEMAP_LEVELN_PSIZE); 923 chain->bref.check.freemap.bigmask = (uint32_t)-1; 924 chain->bref.check.freemap.avail = l1size; 925 /* bref.methods should already be inherited */ 926 927 hammer2_freemap_init(hmp, key, chain); 928 } 929 /* XXX handle error */ 930 } 931 932 #if FREEMAP_DEBUG 933 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n", 934 chain->bref.type, chain->bref.key, 935 chain->bref.keybits, chain->bref.data_off); 936 #endif 937 938 /* 939 * Calculate the bitmask (runs in 2-bit pairs). 940 */ 941 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2; 942 bmmask01 = (hammer2_bitmap_t)1 << start; 943 bmmask10 = (hammer2_bitmap_t)2 << start; 944 bmmask11 = (hammer2_bitmap_t)3 << start; 945 946 /* 947 * Fixup the bitmap. Partial blocks cannot be fully freed unless 948 * a bulk scan is able to roll them up. 949 */ 950 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) { 951 count = 1; 952 if (how == HAMMER2_FREEMAP_DOREALFREE) 953 how = HAMMER2_FREEMAP_DOMAYFREE; 954 } else { 955 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 956 } 957 958 /* 959 * [re]load the bmap and bitmap pointers. Each bmap entry covers 960 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB. 961 * 962 * Be sure to reset the linear iterator to ensure that the adjustment 963 * is not ignored. 964 */ 965 again: 966 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) & 967 (HAMMER2_FREEMAP_COUNT - 1)]; 968 bitmap = &bmap->bitmapq[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7]; 969 970 if (modified) 971 bmap->linear = 0; 972 973 while (count) { 974 KKASSERT(bmmask11); 975 if (how == HAMMER2_FREEMAP_DORECOVER) { 976 /* 977 * Recovery request, mark as allocated. 978 */ 979 if ((*bitmap & bmmask11) != bmmask11) { 980 if (modified == 0) { 981 hammer2_chain_modify(chain, mtid, 0); 982 modified = 1; 983 goto again; 984 } 985 if ((*bitmap & bmmask11) == bmmask00) { 986 bmap->avail -= 987 HAMMER2_FREEMAP_BLOCK_SIZE; 988 } 989 if (bmap->class == 0) 990 bmap->class = class; 991 *bitmap |= bmmask11; 992 if (hammer2_debug & 0x0040) { 993 kprintf("hammer2_freemap_recover: " 994 "fixup type=%02x " 995 "block=%016jx/%zd\n", 996 bref->type, data_off, bytes); 997 } 998 } else { 999 /* 1000 kprintf("hammer2_freemap_recover: good " 1001 "type=%02x block=%016jx/%zd\n", 1002 bref->type, data_off, bytes); 1003 */ 1004 } 1005 } 1006 #if 0 1007 /* 1008 * XXX this stuff doesn't work, avail is miscalculated and 1009 * code 10 means something else now. 1010 */ 1011 else if ((*bitmap & bmmask11) == bmmask11) { 1012 /* 1013 * Mayfree/Realfree request and bitmap is currently 1014 * marked as being fully allocated. 1015 */ 1016 if (!modified) { 1017 hammer2_chain_modify(chain, 0); 1018 modified = 1; 1019 goto again; 1020 } 1021 if (how == HAMMER2_FREEMAP_DOREALFREE) 1022 *bitmap &= ~bmmask11; 1023 else 1024 *bitmap = (*bitmap & ~bmmask11) | bmmask10; 1025 } else if ((*bitmap & bmmask11) == bmmask10) { 1026 /* 1027 * Mayfree/Realfree request and bitmap is currently 1028 * marked as being possibly freeable. 1029 */ 1030 if (how == HAMMER2_FREEMAP_DOREALFREE) { 1031 if (!modified) { 1032 hammer2_chain_modify(chain, 0); 1033 modified = 1; 1034 goto again; 1035 } 1036 *bitmap &= ~bmmask11; 1037 } 1038 } else { 1039 /* 1040 * 01 - Not implemented, currently illegal state 1041 * 00 - Not allocated at all, illegal free. 1042 */ 1043 panic("hammer2_freemap_adjust: " 1044 "Illegal state %08x(%08x)", 1045 *bitmap, *bitmap & bmmask11); 1046 } 1047 #endif 1048 --count; 1049 bmmask01 <<= 2; 1050 bmmask10 <<= 2; 1051 bmmask11 <<= 2; 1052 } 1053 #if HAMMER2_BMAP_ELEMENTS != 8 1054 #error "hammer2_freemap.c: HAMMER2_BMAP_ELEMENTS expected to be 8" 1055 #endif 1056 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) { 1057 bmap->avail += 1 << radix; 1058 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE); 1059 if (bmap->avail == HAMMER2_SEGSIZE && 1060 bmap->bitmapq[0] == 0 && 1061 bmap->bitmapq[1] == 0 && 1062 bmap->bitmapq[2] == 0 && 1063 bmap->bitmapq[3] == 0 && 1064 bmap->bitmapq[4] == 0 && 1065 bmap->bitmapq[5] == 0 && 1066 bmap->bitmapq[6] == 0 && 1067 bmap->bitmapq[7] == 0) { 1068 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX); 1069 kprintf("Freeseg %016jx\n", (intmax_t)key); 1070 bmap->class = 0; 1071 } 1072 } 1073 1074 /* 1075 * chain->bref.check.freemap.bigmask (XXX) 1076 * 1077 * Setting bigmask is a hint to the allocation code that there might 1078 * be something allocatable. We also set this in recovery... it 1079 * doesn't hurt and we might want to use the hint for other validation 1080 * operations later on. 1081 */ 1082 if (modified) 1083 chain->bref.check.freemap.bigmask |= 1 << radix; 1084 1085 hammer2_chain_unlock(chain); 1086 hammer2_chain_drop(chain); 1087 done: 1088 hammer2_chain_unlock(parent); 1089 hammer2_chain_drop(parent); 1090 } 1091 1092 /* 1093 * Validate the freemap, in three stages. 1094 * 1095 * stage-1 ALLOCATED -> POSSIBLY FREE 1096 * POSSIBLY FREE -> POSSIBLY FREE (type corrected) 1097 * 1098 * This transitions bitmap entries from ALLOCATED to POSSIBLY FREE. 1099 * The POSSIBLY FREE state does not mean that a block is actually free 1100 * and may be transitioned back to ALLOCATED in stage-2. 1101 * 1102 * This is typically done during normal filesystem operations when 1103 * something is deleted or a block is replaced. 1104 * 1105 * This is done by bulkfree in-bulk after a memory-bounded meta-data 1106 * scan to try to determine what might be freeable. 1107 * 1108 * This can be done unconditionally through a freemap scan when the 1109 * intention is to brute-force recover the proper state of the freemap. 1110 * 1111 * stage-2 POSSIBLY FREE -> ALLOCATED (scan metadata topology) 1112 * 1113 * This is done by bulkfree during a meta-data scan to ensure that 1114 * all blocks still actually allocated by the filesystem are marked 1115 * as such. 1116 * 1117 * NOTE! Live filesystem transitions to POSSIBLY FREE can occur while 1118 * the bulkfree stage-2 and stage-3 is running. The live filesystem 1119 * will use the alternative POSSIBLY FREE type (2) to prevent 1120 * stage-3 from improperly transitioning unvetted possibly-free 1121 * blocks to FREE. 1122 * 1123 * stage-3 POSSIBLY FREE (type 1) -> FREE (scan freemap) 1124 * 1125 * This is done by bulkfree to finalize POSSIBLY FREE states. 1126 * 1127 */ 1128