1 /* 2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/fcntl.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/namei.h> 42 #include <sys/mount.h> 43 #include <sys/vnode.h> 44 #include <sys/mountctl.h> 45 46 #include "hammer2.h" 47 48 #define FREEMAP_DEBUG 0 49 50 struct hammer2_fiterate { 51 hammer2_off_t bpref; 52 hammer2_off_t bnext; 53 int loops; 54 }; 55 56 typedef struct hammer2_fiterate hammer2_fiterate_t; 57 58 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans, 59 hammer2_chain_t **parentp, hammer2_blockref_t *bref, 60 int radix, hammer2_fiterate_t *iter); 61 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp, 62 hammer2_key_t key, hammer2_chain_t *chain); 63 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 64 hammer2_bmap_data_t *bmap, uint16_t class, 65 int n, int radix, hammer2_key_t *basep); 66 static int hammer2_freemap_iterate(hammer2_trans_t *trans, 67 hammer2_chain_t **parentp, hammer2_chain_t **chainp, 68 hammer2_fiterate_t *iter); 69 70 static __inline 71 int 72 hammer2_freemapradix(int radix) 73 { 74 return(radix); 75 } 76 77 /* 78 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF 79 * bref. Return a combined media offset and physical size radix. Freemap 80 * chains use fixed storage offsets in the 4MB reserved area at the 81 * beginning of each 2GB zone 82 * 83 * Rotate between four possibilities. Theoretically this means we have three 84 * good freemaps in case of a crash which we can use as a base for the fixup 85 * scan at mount-time. 86 */ 87 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1)) 88 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix)) 89 90 static 91 int 92 hammer2_freemap_reserve(hammer2_trans_t *trans, hammer2_chain_t *chain, 93 int radix) 94 { 95 hammer2_blockref_t *bref = &chain->bref; 96 hammer2_off_t off; 97 int index; 98 size_t bytes; 99 100 /* 101 * Physical allocation size -> radix. Typically either 256 for 102 * a level 0 freemap leaf or 65536 for a level N freemap node. 103 * 104 * NOTE: A 256 byte bitmap represents 256 x 8 x 1024 = 2MB of storage. 105 * Do not use hammer2_allocsize() here as it has a min cap. 106 */ 107 bytes = 1 << radix; 108 109 /* 110 * Calculate block selection index 0..7 of current block. 111 */ 112 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) { 113 index = 0; 114 } else { 115 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX & 116 (((hammer2_off_t)1 << HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 117 off = off / HAMMER2_PBUFSIZE; 118 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 && 119 off < HAMMER2_ZONE_FREEMAP_END); 120 index = (int)(off - HAMMER2_ZONE_FREEMAP_00) / 4; 121 KKASSERT(index >= 0 && index < HAMMER2_ZONE_FREEMAP_COPIES); 122 } 123 124 /* 125 * Calculate new index (our 'allocation'). We have to be careful 126 * here as there can be two different transaction ids running 127 * concurrently when a flush is in-progress. 128 * 129 * We also want to make sure, for algorithmic repeatability, that 130 * the index sequences are monotonic with transaction ids. Some 131 * skipping is allowed as long as we ensure that all four volume 132 * header backups have consistent freemaps. 133 * 134 * FLUSH NORMAL FLUSH NORMAL FLUSH NORMAL FLUSH NORMAL 135 * N+=1 N+=2 136 * (0->1) (1->3) (3->4) (4->6) (6->7) (7->9) (9->10) (10->12) 137 * 138 * [-concurrent-][-concurrent-][-concurrent-][-concurrent-] 139 * 140 * (alternative first NORMAL might be 0->2 if flush had not yet 141 * modified the chain, this is the worst case). 142 */ 143 if ((trans->flags & HAMMER2_TRANS_ISFLUSH) == 0) { 144 /* 145 * Normal transactions always run with the highest TID. 146 * But if a flush is in-progress we want to reserve a slot 147 * for the flush with a lower TID running concurrently to 148 * do a delete-duplicate. 149 */ 150 index = (index + 2) % HAMMER2_ZONE_FREEMAP_COPIES; 151 } else if (trans->flags & HAMMER2_TRANS_ISALLOCATING) { 152 /* 153 * Flush transaction, hammer2_freemap.c itself is doing a 154 * delete-duplicate during an allocation within the freemap. 155 */ 156 index = (index + 1) % HAMMER2_ZONE_FREEMAP_COPIES; 157 } else { 158 /* 159 * Flush transaction, hammer2_flush.c is doing a 160 * delete-duplicate on the freemap while flushing 161 * hmp->fchain. 162 */ 163 index = (index + 1) % HAMMER2_ZONE_FREEMAP_COPIES; 164 } 165 166 /* 167 * Calculate the block offset of the reserved block. This will 168 * point into the 4MB reserved area at the base of the appropriate 169 * 2GB zone, once added to the FREEMAP_x selection above. 170 */ 171 switch(bref->keybits) { 172 /* case HAMMER2_FREEMAP_LEVEL5_RADIX: not applicable */ 173 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */ 174 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 175 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 176 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) + 177 (index * 4 + HAMMER2_ZONE_FREEMAP_00 + 178 HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE; 179 break; 180 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */ 181 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 182 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 183 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) + 184 (index * 4 + HAMMER2_ZONE_FREEMAP_00 + 185 HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE; 186 break; 187 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */ 188 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 189 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 190 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) + 191 (index * 4 + HAMMER2_ZONE_FREEMAP_00 + 192 HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE; 193 break; 194 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */ 195 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 196 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 197 off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 198 (index * 4 + HAMMER2_ZONE_FREEMAP_00 + 199 HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE; 200 break; 201 default: 202 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits); 203 /* NOT REACHED */ 204 off = (hammer2_off_t)-1; 205 break; 206 } 207 bref->data_off = off | radix; 208 #if FREEMAP_DEBUG 209 kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n", 210 bref->type, bref->key, bref->keybits, bref->data_off); 211 #endif 212 return (0); 213 } 214 215 /* 216 * Normal freemap allocator 217 * 218 * Use available hints to allocate space using the freemap. Create missing 219 * freemap infrastructure on-the-fly as needed (including marking initial 220 * allocations using the iterator as allocated, instantiating new 2GB zones, 221 * and dealing with the end-of-media edge case). 222 * 223 * ip and bpref are only used as a heuristic to determine locality of 224 * reference. bref->key may also be used heuristically. 225 * 226 * WARNING! When called from a flush we have to use the 'live' sync_tid 227 * and not the flush sync_tid. The live sync_tid is the flush 228 * sync_tid + 1. That is, freemap allocations which occur during 229 * a flush are not part of the flush. Crash-recovery will restore 230 * any lost allocations. 231 */ 232 int 233 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain, 234 size_t bytes) 235 { 236 hammer2_mount_t *hmp = chain->hmp; 237 hammer2_blockref_t *bref = &chain->bref; 238 hammer2_chain_t *parent; 239 int radix; 240 int error; 241 unsigned int hindex; 242 hammer2_fiterate_t iter; 243 244 /* 245 * Validate the allocation size. It must be a power of 2. 246 * 247 * For now require that the caller be aware of the minimum 248 * allocation (1K). 249 */ 250 radix = hammer2_getradix(bytes); 251 KKASSERT((size_t)1 << radix == bytes); 252 253 /* 254 * Freemap blocks themselves are simply assigned from the reserve 255 * area, not allocated from the freemap. 256 */ 257 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 258 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 259 return (hammer2_freemap_reserve(trans, chain, radix)); 260 } 261 262 #if 0 263 /* 264 * (this mechanic is no longer used, DOMAYFREE is used only by 265 * the bulk freemap scan now). 266 * 267 * Mark previously allocated block as possibly freeable. There might 268 * be snapshots and other races so we can't just mark it fully free. 269 * (XXX optimize this for the current-transaction create+delete case) 270 */ 271 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) { 272 hammer2_freemap_adjust(trans, hmp, bref, 273 HAMMER2_FREEMAP_DOMAYFREE); 274 } 275 #endif 276 277 /* 278 * Setting ISALLOCATING ensures correct operation even when the 279 * flusher itself is making allocations. 280 */ 281 KKASSERT(bytes >= HAMMER2_MIN_ALLOC && bytes <= HAMMER2_MAX_ALLOC); 282 KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0); 283 atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING); 284 if (trans->flags & HAMMER2_TRANS_ISFLUSH) 285 ++trans->sync_tid; 286 287 /* 288 * Calculate the starting point for our allocation search. 289 * 290 * Each freemap leaf is dedicated to a specific freemap_radix. 291 * The freemap_radix can be more fine-grained than the device buffer 292 * radix which results in inodes being grouped together in their 293 * own segment, terminal-data (16K or less) and initial indirect 294 * block being grouped together, and then full-indirect and full-data 295 * blocks (64K) being grouped together. 296 * 297 * The single most important aspect of this is the inode grouping 298 * because that is what allows 'find' and 'ls' and other filesystem 299 * topology operations to run fast. 300 */ 301 #if 0 302 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) 303 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX; 304 else if (trans->tmp_bpref) 305 bpref = trans->tmp_bpref; 306 else if (trans->tmp_ip) 307 bpref = trans->tmp_ip->chain->bref.data_off; 308 else 309 #endif 310 /* 311 * Heuristic tracking index. We would like one for each distinct 312 * bref type if possible. heur_freemap[] has room for two classes 313 * for each type. At a minimum we have to break-up our heuristic 314 * by device block sizes. 315 */ 316 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX; 317 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX); 318 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX; 319 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1; 320 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR); 321 322 iter.bpref = hmp->heur_freemap[hindex]; 323 324 /* 325 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's 326 * reserved area, the try code will iterate past it. 327 */ 328 if (iter.bpref > hmp->voldata.volu_size) 329 iter.bpref = hmp->voldata.volu_size - 1; 330 331 /* 332 * Iterate the freemap looking for free space before and after. 333 */ 334 parent = &hmp->fchain; 335 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 336 error = EAGAIN; 337 iter.bnext = iter.bpref; 338 iter.loops = 0; 339 340 while (error == EAGAIN) { 341 error = hammer2_freemap_try_alloc(trans, &parent, bref, 342 radix, &iter); 343 } 344 hmp->heur_freemap[hindex] = iter.bnext; 345 hammer2_chain_unlock(parent); 346 347 atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING); 348 if (trans->flags & HAMMER2_TRANS_ISFLUSH) 349 --trans->sync_tid; 350 351 return (error); 352 } 353 354 static int 355 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp, 356 hammer2_blockref_t *bref, int radix, 357 hammer2_fiterate_t *iter) 358 { 359 hammer2_mount_t *hmp = (*parentp)->hmp; 360 hammer2_off_t l0size; 361 hammer2_off_t l1size; 362 hammer2_off_t l1mask; 363 hammer2_key_t key_dummy; 364 hammer2_chain_t *chain; 365 hammer2_off_t key; 366 size_t bytes; 367 uint16_t class; 368 int error = 0; 369 int cache_index = -1; 370 371 372 /* 373 * Calculate the number of bytes being allocated, the number 374 * of contiguous bits of bitmap being allocated, and the bitmap 375 * mask. 376 * 377 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the 378 * mask calculation. 379 */ 380 bytes = (size_t)1 << radix; 381 class = (bref->type << 8) | hammer2_devblkradix(radix); 382 383 /* 384 * Lookup the level1 freemap chain, creating and initializing one 385 * if necessary. Intermediate levels will be created automatically 386 * when necessary by hammer2_chain_create(). 387 */ 388 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX); 389 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 390 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 391 l1mask = l1size - 1; 392 393 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask, 394 &cache_index, 395 HAMMER2_LOOKUP_ALWAYS | 396 HAMMER2_LOOKUP_MATCHIND); 397 398 if (chain == NULL) { 399 /* 400 * Create the missing leaf, be sure to initialize 401 * the auxillary freemap tracking information in 402 * the bref.check.freemap structure. 403 */ 404 #if 0 405 kprintf("freemap create L1 @ %016jx bpref %016jx\n", 406 key, iter->bpref); 407 #endif 408 error = hammer2_chain_create(trans, parentp, &chain, 409 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 410 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 411 HAMMER2_FREEMAP_LEVELN_PSIZE); 412 KKASSERT(error == 0); 413 if (error == 0) { 414 hammer2_chain_modify(trans, &chain, 0); 415 bzero(&chain->data->bmdata[0], 416 HAMMER2_FREEMAP_LEVELN_PSIZE); 417 chain->bref.check.freemap.bigmask = (uint32_t)-1; 418 chain->bref.check.freemap.avail = l1size; 419 /* bref.methods should already be inherited */ 420 421 hammer2_freemap_init(trans, hmp, key, chain); 422 } 423 } else if ((chain->bref.check.freemap.bigmask & (1 << radix)) == 0) { 424 /* 425 * Already flagged as not having enough space 426 */ 427 error = ENOSPC; 428 } else { 429 /* 430 * Modify existing chain to setup for adjustment. 431 */ 432 hammer2_chain_modify(trans, &chain, 0); 433 } 434 435 /* 436 * Scan 2MB entries. 437 */ 438 if (error == 0) { 439 hammer2_bmap_data_t *bmap; 440 hammer2_key_t base_key; 441 int count; 442 int start; 443 int n; 444 445 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 446 start = (int)((iter->bnext - key) >> 447 HAMMER2_FREEMAP_LEVEL0_RADIX); 448 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT); 449 hammer2_chain_modify(trans, &chain, 0); 450 451 error = ENOSPC; 452 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 453 if (start + count >= HAMMER2_FREEMAP_COUNT && 454 start - count < 0) { 455 break; 456 } 457 n = start + count; 458 bmap = &chain->data->bmdata[n]; 459 if (n < HAMMER2_FREEMAP_COUNT && bmap->avail && 460 (bmap->class == 0 || bmap->class == class)) { 461 base_key = key + n * l0size; 462 error = hammer2_bmap_alloc(trans, hmp, bmap, 463 class, n, radix, 464 &base_key); 465 if (error != ENOSPC) { 466 key = base_key; 467 break; 468 } 469 } 470 n = start - count; 471 bmap = &chain->data->bmdata[n]; 472 if (n >= 0 && bmap->avail && 473 (bmap->class == 0 || bmap->class == class)) { 474 base_key = key + n * l0size; 475 error = hammer2_bmap_alloc(trans, hmp, bmap, 476 class, n, radix, 477 &base_key); 478 if (error != ENOSPC) { 479 key = base_key; 480 break; 481 } 482 } 483 } 484 if (error == ENOSPC) 485 chain->bref.check.freemap.bigmask &= ~(1 << radix); 486 /* XXX also scan down from original count */ 487 } 488 489 if (error == 0) { 490 /* 491 * Assert validity. Must be beyond the static allocator used 492 * by newfs_hammer2 (and thus also beyond the aux area), 493 * not go past the volume size, and must not be in the 494 * reserved segment area for a zone. 495 */ 496 KKASSERT(key >= hmp->voldata.allocator_beg && 497 key + bytes <= hmp->voldata.volu_size); 498 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 499 bref->data_off = key | radix; 500 501 #if 0 502 kprintf("alloc cp=%p %016jx %016jx using %016jx\n", 503 chain, 504 bref->key, bref->data_off, chain->bref.data_off); 505 #endif 506 } else if (error == ENOSPC) { 507 /* 508 * Return EAGAIN with next iteration in iter->bnext, or 509 * return ENOSPC if the allocation map has been exhausted. 510 */ 511 error = hammer2_freemap_iterate(trans, parentp, &chain, iter); 512 } 513 514 /* 515 * Cleanup 516 */ 517 if (chain) 518 hammer2_chain_unlock(chain); 519 return (error); 520 } 521 522 /* 523 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep). 524 * 525 * If the linear iterator is mid-block we use it directly (the bitmap should 526 * already be marked allocated), otherwise we search for a block in the bitmap 527 * that fits the allocation request. 528 * 529 * A partial bitmap allocation sets the minimum bitmap granularity (16KB) 530 * to fully allocated and adjusts the linear allocator to allow the 531 * remaining space to be allocated. 532 */ 533 static 534 int 535 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 536 hammer2_bmap_data_t *bmap, 537 uint16_t class, int n, int radix, hammer2_key_t *basep) 538 { 539 hammer2_io_t *dio; 540 size_t size; 541 size_t bsize; 542 int bmradix; 543 uint32_t bmmask; 544 int offset; 545 int error; 546 int i; 547 int j; 548 549 /* 550 * Take into account 2-bits per block when calculating bmradix. 551 */ 552 size = (size_t)1 << radix; 553 554 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) { 555 bmradix = 2; 556 bsize = HAMMER2_FREEMAP_BLOCK_SIZE; 557 /* (16K) 2 bits per allocation block */ 558 } else { 559 bmradix = 2 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 560 bsize = size; 561 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */ 562 } 563 564 /* 565 * Use the linear iterator to pack small allocations, otherwise 566 * fall-back to finding a free 16KB chunk. The linear iterator 567 * is only valid when *NOT* on a freemap chunking boundary (16KB). 568 * If it is the bitmap must be scanned. It can become invalid 569 * once we pack to the boundary. We adjust it after a bitmap 570 * allocation only for sub-16KB allocations (so the perfectly good 571 * previous value can still be used for fragments when 16KB+ 572 * allocations are made). 573 * 574 * Beware of hardware artifacts when bmradix == 32 (intermediate 575 * result can wind up being '1' instead of '0' if hardware masks 576 * bit-count & 31). 577 * 578 * NOTE: j needs to be even in the j= calculation. As an artifact 579 * of the /2 division, our bitmask has to clear bit 0. 580 * 581 * NOTE: TODO this can leave little unallocatable fragments lying 582 * around. 583 */ 584 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <= 585 HAMMER2_FREEMAP_BLOCK_SIZE && 586 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) && 587 bmap->linear < HAMMER2_SEGSIZE) { 588 KKASSERT(bmap->linear >= 0 && 589 bmap->linear + size <= HAMMER2_SEGSIZE && 590 (bmap->linear & (HAMMER2_MIN_ALLOC - 1)) == 0); 591 offset = bmap->linear; 592 i = offset / (HAMMER2_SEGSIZE / 8); 593 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30; 594 bmmask = (bmradix == 32) ? 595 0xFFFFFFFFU : (1 << bmradix) - 1; 596 bmmask <<= j; 597 bmap->linear = offset + size; 598 } else { 599 for (i = 0; i < 8; ++i) { 600 bmmask = (bmradix == 32) ? 601 0xFFFFFFFFU : (1 << bmradix) - 1; 602 for (j = 0; j < 32; j += bmradix) { 603 if ((bmap->bitmap[i] & bmmask) == 0) 604 goto success; 605 bmmask <<= bmradix; 606 } 607 } 608 /*fragments might remain*/ 609 /*KKASSERT(bmap->avail == 0);*/ 610 return (ENOSPC); 611 success: 612 offset = i * (HAMMER2_SEGSIZE / 8) + 613 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2)); 614 if (size & HAMMER2_FREEMAP_BLOCK_MASK) 615 bmap->linear = offset + size; 616 } 617 618 KKASSERT(i >= 0 && i < 8); /* 8 x 16 -> 128 x 16K -> 2MB */ 619 620 /* 621 * Optimize the buffer cache to avoid unnecessary read-before-write 622 * operations. 623 * 624 * The device block size could be larger than the allocation size 625 * so the actual bitmap test is somewhat more involved. We have 626 * to use a compatible buffer size for this operation. 627 */ 628 if ((bmap->bitmap[i] & bmmask) == 0 && 629 hammer2_devblksize(size) != size) { 630 size_t psize = hammer2_devblksize(size); 631 hammer2_off_t pmask = (hammer2_off_t)psize - 1; 632 int pbmradix = 2 << (hammer2_devblkradix(radix) - 633 HAMMER2_FREEMAP_BLOCK_RADIX); 634 uint32_t pbmmask; 635 int pradix = hammer2_getradix(psize); 636 637 pbmmask = (pbmradix == 32) ? 0xFFFFFFFFU : (1 << pbmradix) - 1; 638 while ((pbmmask & bmmask) == 0) 639 pbmmask <<= pbmradix; 640 641 #if 0 642 kprintf("%016jx mask %08x %08x %08x (%zd/%zd)\n", 643 *basep + offset, bmap->bitmap[i], 644 pbmmask, bmmask, size, psize); 645 #endif 646 647 if ((bmap->bitmap[i] & pbmmask) == 0) { 648 error = hammer2_io_newq(hmp, 649 (*basep + (offset & ~pmask)) | 650 pradix, 651 psize, &dio); 652 hammer2_io_bqrelse(&dio); 653 } 654 } 655 656 #if 0 657 /* 658 * When initializing a new inode segment also attempt to initialize 659 * an adjacent segment. Be careful not to index beyond the array 660 * bounds. 661 * 662 * We do this to try to localize inode accesses to improve 663 * directory scan rates. XXX doesn't improve scan rates. 664 */ 665 if (size == HAMMER2_INODE_BYTES) { 666 if (n & 1) { 667 if (bmap[-1].radix == 0 && bmap[-1].avail) 668 bmap[-1].radix = radix; 669 } else { 670 if (bmap[1].radix == 0 && bmap[1].avail) 671 bmap[1].radix = radix; 672 } 673 } 674 #endif 675 676 /* 677 * Adjust the linear iterator, set the radix if necessary (might as 678 * well just set it unconditionally), adjust *basep to return the 679 * allocated data offset. 680 */ 681 bmap->bitmap[i] |= bmmask; 682 bmap->class = class; 683 bmap->avail -= size; 684 *basep += offset; 685 686 hammer2_voldata_lock(hmp); 687 hmp->voldata.allocator_free -= size; /* XXX */ 688 hammer2_voldata_unlock(hmp, 1); 689 690 return(0); 691 } 692 693 static 694 void 695 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp, 696 hammer2_key_t key, hammer2_chain_t *chain) 697 { 698 hammer2_off_t l1size; 699 hammer2_off_t lokey; 700 hammer2_off_t hikey; 701 hammer2_bmap_data_t *bmap; 702 int count; 703 704 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 705 706 /* 707 * Calculate the portion of the 2GB map that should be initialized 708 * as free. Portions below or after will be initialized as allocated. 709 * SEGMASK-align the areas so we don't have to worry about sub-scans 710 * or endianess when using memset. 711 * 712 * (1) Ensure that all statically allocated space from newfs_hammer2 713 * is marked allocated. 714 * 715 * (2) Ensure that the reserved area is marked allocated (typically 716 * the first 4MB of the 2GB area being represented). 717 * 718 * (3) Ensure that any trailing space at the end-of-volume is marked 719 * allocated. 720 * 721 * WARNING! It is possible for lokey to be larger than hikey if the 722 * entire 2GB segment is within the static allocation. 723 */ 724 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) & 725 ~HAMMER2_SEGMASK64; 726 727 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 728 HAMMER2_ZONE_SEG64) { 729 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 730 HAMMER2_ZONE_SEG64; 731 } 732 733 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 734 if (hikey > hmp->voldata.volu_size) { 735 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64; 736 } 737 738 chain->bref.check.freemap.avail = 739 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 740 bmap = &chain->data->bmdata[0]; 741 742 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 743 if (key < lokey || key >= hikey) { 744 memset(bmap->bitmap, -1, 745 sizeof(bmap->bitmap)); 746 bmap->avail = 0; 747 bmap->linear = HAMMER2_SEGSIZE; 748 chain->bref.check.freemap.avail -= 749 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 750 } else { 751 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 752 } 753 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 754 ++bmap; 755 } 756 } 757 758 /* 759 * The current Level 1 freemap has been exhausted, iterate to the next 760 * one, return ENOSPC if no freemaps remain. 761 * 762 * XXX this should rotate back to the beginning to handle freed-up space 763 * XXX or use intermediate entries to locate free space. TODO 764 */ 765 static int 766 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp, 767 hammer2_chain_t **chainp, hammer2_fiterate_t *iter) 768 { 769 hammer2_mount_t *hmp = (*parentp)->hmp; 770 771 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 772 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 773 if (iter->bnext >= hmp->voldata.volu_size) { 774 iter->bnext = 0; 775 if (++iter->loops == 2) 776 return (ENOSPC); 777 } 778 return(EAGAIN); 779 } 780 781 /* 782 * Adjust the bit-pattern for data in the freemap bitmap according to 783 * (how). This code is called from on-mount recovery to fixup (mark 784 * as allocated) blocks whos freemap upates might not have been committed 785 * in the last crash and is used by the bulk freemap scan to stage frees. 786 * 787 * XXX currently disabled when how == 0 (the normal real-time case). At 788 * the moment we depend on the bulk freescan to actually free blocks. It 789 * will still call this routine with a non-zero how to stage possible frees 790 * and to do the actual free. 791 * 792 * WARNING! When called from a flush we have to use the 'live' sync_tid 793 * and not the flush sync_tid. The live sync_tid is the flush 794 * sync_tid + 1. That is, freemap allocations which occur during 795 * a flush are not part of the flush. Crash-recovery will restore 796 * any lost allocations. 797 */ 798 void 799 hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp, 800 hammer2_blockref_t *bref, int how) 801 { 802 hammer2_off_t data_off = bref->data_off; 803 hammer2_chain_t *chain; 804 hammer2_chain_t *parent; 805 hammer2_bmap_data_t *bmap; 806 hammer2_key_t key; 807 hammer2_key_t key_dummy; 808 hammer2_off_t l0size; 809 hammer2_off_t l1size; 810 hammer2_off_t l1mask; 811 uint32_t *bitmap; 812 const uint32_t bmmask00 = 0; 813 uint32_t bmmask01; 814 uint32_t bmmask10; 815 uint32_t bmmask11; 816 size_t bytes; 817 uint16_t class; 818 int radix; 819 int start; 820 int count; 821 int modified = 0; 822 int cache_index = -1; 823 int error; 824 825 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX; 826 data_off &= ~HAMMER2_OFF_MASK_RADIX; 827 KKASSERT(radix <= HAMMER2_MAX_RADIX); 828 829 bytes = (size_t)1 << radix; 830 class = (bref->type << 8) | hammer2_devblkradix(radix); 831 832 /* 833 * We can't adjust thre freemap for data allocations made by 834 * newfs_hammer2. 835 */ 836 if (data_off < hmp->voldata.allocator_beg) 837 return; 838 839 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 840 KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0); 841 atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING); 842 if (trans->flags & HAMMER2_TRANS_ISFLUSH) 843 ++trans->sync_tid; 844 845 /* 846 * Lookup the level1 freemap chain. The chain must exist. 847 */ 848 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX); 849 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 850 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 851 l1mask = l1size - 1; 852 853 parent = &hmp->fchain; 854 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 855 856 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask, 857 &cache_index, 858 HAMMER2_LOOKUP_ALWAYS | 859 HAMMER2_LOOKUP_MATCHIND); 860 861 /* 862 * Stop early if we are trying to free something but no leaf exists. 863 */ 864 if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) { 865 kprintf("hammer2_freemap_adjust: %016jx: no chain\n", 866 (intmax_t)bref->data_off); 867 goto done; 868 } 869 870 /* 871 * Create any missing leaf(s) if we are doing a recovery (marking 872 * the block(s) as being allocated instead of being freed). Be sure 873 * to initialize the auxillary freemap tracking info in the 874 * bref.check.freemap structure. 875 */ 876 if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) { 877 error = hammer2_chain_create(trans, &parent, &chain, 878 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 879 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 880 HAMMER2_FREEMAP_LEVELN_PSIZE); 881 882 if (hammer2_debug & 0x0040) { 883 kprintf("fixup create chain %p %016jx:%d\n", 884 chain, chain->bref.key, chain->bref.keybits); 885 } 886 887 if (error == 0) { 888 hammer2_chain_modify(trans, &chain, 0); 889 bzero(&chain->data->bmdata[0], 890 HAMMER2_FREEMAP_LEVELN_PSIZE); 891 chain->bref.check.freemap.bigmask = (uint32_t)-1; 892 chain->bref.check.freemap.avail = l1size; 893 /* bref.methods should already be inherited */ 894 895 hammer2_freemap_init(trans, hmp, key, chain); 896 } 897 /* XXX handle error */ 898 } 899 900 #if FREEMAP_DEBUG 901 kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n", 902 chain->bref.type, chain->bref.key, 903 chain->bref.keybits, chain->bref.data_off); 904 #endif 905 906 /* 907 * Calculate the bitmask (runs in 2-bit pairs). 908 */ 909 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2; 910 bmmask01 = 1 << start; 911 bmmask10 = 2 << start; 912 bmmask11 = 3 << start; 913 914 /* 915 * Fixup the bitmap. Partial blocks cannot be fully freed unless 916 * a bulk scan is able to roll them up. 917 */ 918 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) { 919 count = 1; 920 if (how == HAMMER2_FREEMAP_DOREALFREE) 921 how = HAMMER2_FREEMAP_DOMAYFREE; 922 } else { 923 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 924 } 925 926 /* 927 * [re]load the bmap and bitmap pointers. Each bmap entry covers 928 * a 2MB swath. The bmap itself (LEVEL1) covers 2GB. 929 * 930 * Be sure to reset the linear iterator to ensure that the adjustment 931 * is not ignored. 932 */ 933 again: 934 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) & 935 (HAMMER2_FREEMAP_COUNT - 1)]; 936 bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7]; 937 bmap->linear = 0; 938 939 while (count) { 940 KKASSERT(bmmask11); 941 if (how == HAMMER2_FREEMAP_DORECOVER) { 942 /* 943 * Recovery request, mark as allocated. 944 */ 945 if ((*bitmap & bmmask11) != bmmask11) { 946 if (modified == 0) { 947 hammer2_chain_modify(trans, &chain, 0); 948 modified = 1; 949 goto again; 950 } 951 if ((*bitmap & bmmask11) == bmmask00) 952 bmap->avail -= 1 << radix; 953 if (bmap->class == 0) 954 bmap->class = class; 955 *bitmap |= bmmask11; 956 if (hammer2_debug & 0x0040) { 957 kprintf("hammer2_freemap_recover: " 958 "fixup type=%02x " 959 "block=%016jx/%zd\n", 960 bref->type, data_off, bytes); 961 } 962 } else { 963 /* 964 kprintf("hammer2_freemap_recover: good " 965 "type=%02x block=%016jx/%zd\n", 966 bref->type, data_off, bytes); 967 */ 968 } 969 } else if ((*bitmap & bmmask11) == bmmask11) { 970 /* 971 * Mayfree/Realfree request and bitmap is currently 972 * marked as being fully allocated. 973 */ 974 if (!modified) { 975 hammer2_chain_modify(trans, &chain, 0); 976 modified = 1; 977 goto again; 978 } 979 if (how == HAMMER2_FREEMAP_DOREALFREE) 980 *bitmap &= ~bmmask11; 981 else 982 *bitmap = (*bitmap & ~bmmask11) | bmmask10; 983 } else if ((*bitmap & bmmask11) == bmmask10) { 984 /* 985 * Mayfree/Realfree request and bitmap is currently 986 * marked as being possibly freeable. 987 */ 988 if (how == HAMMER2_FREEMAP_DOREALFREE) { 989 if (!modified) { 990 hammer2_chain_modify(trans, &chain, 0); 991 modified = 1; 992 goto again; 993 } 994 *bitmap &= ~bmmask11; 995 } 996 } else { 997 /* 998 * 01 - Not implemented, currently illegal state 999 * 00 - Not allocated at all, illegal free. 1000 */ 1001 panic("hammer2_freemap_adjust: " 1002 "Illegal state %08x(%08x)", 1003 *bitmap, *bitmap & bmmask11); 1004 } 1005 --count; 1006 bmmask01 <<= 2; 1007 bmmask10 <<= 2; 1008 bmmask11 <<= 2; 1009 } 1010 if (how == HAMMER2_FREEMAP_DOREALFREE && modified) { 1011 bmap->avail += 1 << radix; 1012 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE); 1013 if (bmap->avail == HAMMER2_SEGSIZE && 1014 bmap->bitmap[0] == 0 && 1015 bmap->bitmap[1] == 0 && 1016 bmap->bitmap[2] == 0 && 1017 bmap->bitmap[3] == 0 && 1018 bmap->bitmap[4] == 0 && 1019 bmap->bitmap[5] == 0 && 1020 bmap->bitmap[6] == 0 && 1021 bmap->bitmap[7] == 0) { 1022 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX); 1023 kprintf("Freeseg %016jx\n", (intmax_t)key); 1024 bmap->class = 0; 1025 } 1026 } 1027 1028 /* 1029 * chain->bref.check.freemap.bigmask (XXX) 1030 * 1031 * Setting bigmask is a hint to the allocation code that there might 1032 * be something allocatable. We also set this in recovery... it 1033 * doesn't hurt and we might want to use the hint for other validation 1034 * operations later on. 1035 */ 1036 if (modified) 1037 chain->bref.check.freemap.bigmask |= 1 << radix; 1038 1039 hammer2_chain_unlock(chain); 1040 done: 1041 hammer2_chain_unlock(parent); 1042 atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING); 1043 if (trans->flags & HAMMER2_TRANS_ISFLUSH) 1044 --trans->sync_tid; 1045 } 1046