1 /* 2 * Copyright (c) 2013-2019 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/fcntl.h> 38 #include <sys/buf.h> 39 #include <sys/proc.h> 40 #include <sys/namei.h> 41 #include <sys/mount.h> 42 #include <sys/vnode.h> 43 #include <sys/mountctl.h> 44 #include <vm/vm_kern.h> 45 #include <vm/vm_extern.h> 46 47 #include "hammer2.h" 48 49 /* 50 * breadth-first search 51 */ 52 typedef struct hammer2_chain_save { 53 TAILQ_ENTRY(hammer2_chain_save) entry; 54 hammer2_chain_t *chain; 55 int pri; 56 } hammer2_chain_save_t; 57 58 TAILQ_HEAD(hammer2_chain_save_list, hammer2_chain_save); 59 typedef struct hammer2_chain_save_list hammer2_chain_save_list_t; 60 61 typedef struct hammer2_bulkfree_info { 62 hammer2_dev_t *hmp; 63 kmem_anon_desc_t kp; 64 hammer2_off_t sbase; /* sub-loop iteration */ 65 hammer2_off_t sstop; 66 hammer2_bmap_data_t *bmap; 67 int depth; 68 long count_10_00; /* staged->free */ 69 long count_11_10; /* allocated->staged */ 70 long count_00_11; /* (should not happen) */ 71 long count_01_11; /* (should not happen) */ 72 long count_10_11; /* staged->allocated */ 73 long count_l0cleans; 74 long count_linadjusts; 75 long count_inodes_scanned; 76 long count_dirents_scanned; 77 long count_dedup_factor; 78 long count_bytes_scanned; 79 long count_chains_scanned; 80 long count_chains_reported; 81 long bulkfree_calls; 82 int bulkfree_ticks; 83 hammer2_off_t adj_free; 84 hammer2_tid_t mtid; 85 hammer2_tid_t saved_mirror_tid; 86 time_t save_time; 87 hammer2_chain_save_list_t list; 88 hammer2_dedup_t *dedup; 89 int pri; 90 } hammer2_bulkfree_info_t; 91 92 static int h2_bulkfree_test(hammer2_bulkfree_info_t *info, 93 hammer2_blockref_t *bref, int pri, int saved_error); 94 95 /* 96 * General bulk scan function with callback. Called with a referenced 97 * but UNLOCKED parent. The parent is returned in the same state. 98 */ 99 static 100 int 101 hammer2_bulk_scan(hammer2_chain_t *parent, 102 int (*func)(hammer2_bulkfree_info_t *info, 103 hammer2_blockref_t *bref), 104 hammer2_bulkfree_info_t *info) 105 { 106 hammer2_blockref_t bref; 107 hammer2_chain_t *chain; 108 hammer2_chain_save_t *tail; 109 hammer2_chain_save_t *save; 110 int first = 1; 111 int rup_error; 112 int error; 113 int e2; 114 115 ++info->pri; 116 117 chain = NULL; 118 rup_error = 0; 119 error = 0; 120 121 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS | 122 HAMMER2_RESOLVE_SHARED); 123 tail = TAILQ_LAST(&info->list, hammer2_chain_save_list); 124 125 /* 126 * The parent was previously retrieved NODATA and thus has not 127 * tested the CRC. Now that we have locked it normally, check 128 * for a CRC problem and skip it if we found one. The bulk scan 129 * cannot safely traverse invalid block tables (we could end up 130 * in an endless loop or cause a panic). 131 */ 132 if (parent->error & HAMMER2_ERROR_CHECK) { 133 error = parent->error; 134 goto done; 135 } 136 137 /* 138 * Report which PFS is being scanned 139 */ 140 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE && 141 (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) { 142 kprintf("hammer2_bulkfree: Scanning %s\n", 143 parent->data->ipdata.filename); 144 } 145 146 /* 147 * Generally loop on the contents if we have not been flagged 148 * for abort. 149 * 150 * Remember that these chains are completely isolated from 151 * the frontend, so we can release locks temporarily without 152 * imploding. 153 */ 154 for (;;) { 155 error |= hammer2_chain_scan(parent, &chain, &bref, &first, 156 HAMMER2_LOOKUP_NODATA | 157 HAMMER2_LOOKUP_SHARED); 158 159 /* 160 * Handle EOF or other error at current level. This stops 161 * the bulkfree scan. 162 */ 163 if (error & ~HAMMER2_ERROR_CHECK) 164 break; 165 166 /* 167 * Account for dirents before thre data_off test, since most 168 * dirents do not need a data reference. 169 */ 170 if (bref.type == HAMMER2_BREF_TYPE_DIRENT) 171 ++info->count_dirents_scanned; 172 173 /* 174 * Ignore brefs without data (typically dirents) 175 */ 176 if ((bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) 177 continue; 178 179 /* 180 * Process bref, chain is only non-NULL if the bref 181 * might be recursable (its possible that we sometimes get 182 * a non-NULL chain where the bref cannot be recursed). 183 * 184 * If we already ran down this tree we do not have to do it 185 * again, but we must still recover any cumulative error 186 * recorded from the time we did. 187 */ 188 ++info->pri; 189 e2 = h2_bulkfree_test(info, &bref, 1, 0); 190 if (e2) { 191 error |= e2 & ~HAMMER2_ERROR_EOF; 192 continue; 193 } 194 195 if (bref.type == HAMMER2_BREF_TYPE_INODE) 196 ++info->count_inodes_scanned; 197 198 error |= func(info, &bref); 199 if (error & ~HAMMER2_ERROR_CHECK) 200 break; 201 202 /* 203 * A non-null chain is always returned if it is 204 * recursive, otherwise a non-null chain might be 205 * returned but usually is not when not recursive. 206 */ 207 if (chain == NULL) 208 continue; 209 210 if (chain) { 211 info->count_bytes_scanned += chain->bytes; 212 ++info->count_chains_scanned; 213 214 if (info->count_chains_scanned >= 215 info->count_chains_reported + 1000000 || 216 (info->count_chains_scanned < 1000000 && 217 info->count_chains_scanned >= 218 info->count_chains_reported + 100000)) { 219 kprintf(" chains %-7ld inodes %-7ld " 220 "dirents %-7ld bytes %5ldMB\n", 221 info->count_chains_scanned, 222 info->count_inodes_scanned, 223 info->count_dirents_scanned, 224 info->count_bytes_scanned / 1000000); 225 info->count_chains_reported = 226 info->count_chains_scanned; 227 } 228 } 229 230 /* 231 * Else check type and setup depth-first scan. 232 * 233 * Account for bytes actually read. 234 */ 235 switch(chain->bref.type) { 236 case HAMMER2_BREF_TYPE_INODE: 237 case HAMMER2_BREF_TYPE_FREEMAP_NODE: 238 case HAMMER2_BREF_TYPE_INDIRECT: 239 case HAMMER2_BREF_TYPE_VOLUME: 240 case HAMMER2_BREF_TYPE_FREEMAP: 241 ++info->depth; 242 if (chain->error & HAMMER2_ERROR_CHECK) { 243 /* 244 * Cannot safely recurse chains with crc 245 * errors, even in emergency mode. 246 */ 247 /* NOP */ 248 } else if (info->depth > 16) { 249 save = kmalloc(sizeof(*save), M_HAMMER2, 250 M_WAITOK | M_ZERO); 251 save->chain = chain; 252 hammer2_chain_ref(chain); 253 TAILQ_INSERT_TAIL(&info->list, save, entry); 254 255 /* guess */ 256 info->pri += 10; 257 } else { 258 int savepri = info->pri; 259 260 hammer2_chain_unlock(chain); 261 hammer2_chain_unlock(parent); 262 info->pri = 0; 263 rup_error |= hammer2_bulk_scan(chain, 264 func, info); 265 info->pri += savepri; 266 hammer2_chain_lock(parent, 267 HAMMER2_RESOLVE_ALWAYS | 268 HAMMER2_RESOLVE_SHARED); 269 hammer2_chain_lock(chain, 270 HAMMER2_RESOLVE_ALWAYS | 271 HAMMER2_RESOLVE_SHARED); 272 } 273 --info->depth; 274 break; 275 case HAMMER2_BREF_TYPE_DATA: 276 break; 277 default: 278 /* does not recurse */ 279 break; 280 } 281 if (rup_error & HAMMER2_ERROR_ABORTED) 282 break; 283 } 284 if (chain) { 285 hammer2_chain_unlock(chain); 286 hammer2_chain_drop(chain); 287 } 288 289 /* 290 * If this is a PFSROOT, also re-run any defered elements 291 * added during our scan so we can report any cumulative errors 292 * for the PFS. 293 */ 294 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE && 295 (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) { 296 for (;;) { 297 int opri; 298 299 save = tail ? TAILQ_NEXT(tail, entry) : 300 TAILQ_FIRST(&info->list); 301 if (save == NULL) 302 break; 303 304 TAILQ_REMOVE(&info->list, save, entry); 305 opri = info->pri; 306 info->pri = 0; 307 rup_error |= hammer2_bulk_scan(save->chain, func, info); 308 hammer2_chain_drop(save->chain); 309 kfree(save, M_HAMMER2); 310 info->pri = opri; 311 } 312 } 313 314 error |= rup_error; 315 316 /* 317 * Report which PFS the errors were encountered in. 318 */ 319 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE && 320 (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) && 321 (error & ~HAMMER2_ERROR_EOF)) { 322 kprintf("hammer2_bulkfree: Encountered errors (%08x) " 323 "while scanning \"%s\"\n", 324 error, parent->data->ipdata.filename); 325 } 326 327 /* 328 * Save with higher pri now that we know what it is. 329 */ 330 h2_bulkfree_test(info, &parent->bref, info->pri + 1, 331 (error & ~HAMMER2_ERROR_EOF)); 332 333 done: 334 hammer2_chain_unlock(parent); 335 336 return (error & ~HAMMER2_ERROR_EOF); 337 } 338 339 /* 340 * Bulkfree algorithm 341 * 342 * Repeat { 343 * Chain flush (partial synchronization) XXX removed 344 * Scan the whole topology - build in-memory freemap (mark 11) 345 * Reconcile the in-memory freemap against the on-disk freemap. 346 * ondisk xx -> ondisk 11 (if allocated) 347 * ondisk 11 -> ondisk 10 (if free in-memory) 348 * ondisk 10 -> ondisk 00 (if free in-memory) - on next pass 349 * } 350 * 351 * The topology scan may have to be performed multiple times to window 352 * freemaps which are too large to fit in kernel memory. 353 * 354 * Races are handled using a double-transition (11->10, 10->00). The bulkfree 355 * scan snapshots the volume root's blockset and thus can run concurrent with 356 * normal operations, as long as a full flush is made between each pass to 357 * synchronize any modified chains (otherwise their blocks might be improperly 358 * freed). 359 * 360 * Temporary memory in multiples of 64KB is required to reconstruct the leaf 361 * hammer2_bmap_data blocks so they can later be compared against the live 362 * freemap. Each 64KB block represents 128 x 16KB x 1024 = ~2 GB of storage. 363 * A 32MB save area thus represents around ~1 TB. The temporary memory 364 * allocated can be specified. If it is not sufficient multiple topology 365 * passes will be made. 366 */ 367 368 /* 369 * Bulkfree callback info 370 */ 371 static void hammer2_bulkfree_thread(void *arg __unused); 372 static void cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size); 373 static int h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo, 374 hammer2_blockref_t *bref); 375 static int h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo); 376 static void h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo, 377 hammer2_off_t data_off, hammer2_bmap_data_t *live, 378 hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base); 379 380 void 381 hammer2_bulkfree_init(hammer2_dev_t *hmp) 382 { 383 hammer2_thr_create(&hmp->bfthr, NULL, hmp, 384 hmp->devrepname, -1, -1, 385 hammer2_bulkfree_thread); 386 } 387 388 void 389 hammer2_bulkfree_uninit(hammer2_dev_t *hmp) 390 { 391 hammer2_thr_delete(&hmp->bfthr); 392 } 393 394 static void 395 hammer2_bulkfree_thread(void *arg) 396 { 397 hammer2_thread_t *thr = arg; 398 hammer2_ioc_bulkfree_t bfi; 399 uint32_t flags; 400 401 for (;;) { 402 hammer2_thr_wait_any(thr, 403 HAMMER2_THREAD_STOP | 404 HAMMER2_THREAD_FREEZE | 405 HAMMER2_THREAD_UNFREEZE | 406 HAMMER2_THREAD_REMASTER, 407 hz * 60); 408 409 flags = thr->flags; 410 cpu_ccfence(); 411 if (flags & HAMMER2_THREAD_STOP) 412 break; 413 if (flags & HAMMER2_THREAD_FREEZE) { 414 hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN, 415 HAMMER2_THREAD_FREEZE); 416 continue; 417 } 418 if (flags & HAMMER2_THREAD_UNFREEZE) { 419 hammer2_thr_signal2(thr, 0, 420 HAMMER2_THREAD_FROZEN | 421 HAMMER2_THREAD_UNFREEZE); 422 continue; 423 } 424 if (flags & HAMMER2_THREAD_FROZEN) 425 continue; 426 if (flags & HAMMER2_THREAD_REMASTER) { 427 hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER); 428 bzero(&bfi, sizeof(bfi)); 429 bfi.size = 8192 * 1024; 430 /* hammer2_bulkfree_pass(thr->hmp, &bfi); */ 431 } 432 } 433 thr->td = NULL; 434 hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED); 435 /* structure can go invalid at this point */ 436 } 437 438 int 439 hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain, 440 hammer2_ioc_bulkfree_t *bfi) 441 { 442 hammer2_bulkfree_info_t cbinfo; 443 hammer2_chain_save_t *save; 444 hammer2_off_t incr; 445 size_t size; 446 int error; 447 448 /* 449 * We have to clear the live dedup cache as it might have entries 450 * that are freeable as of now. Any new entries in the dedup cache 451 * made after this point, even if they become freeable, will have 452 * previously been fully allocated and will be protected by the 453 * 2-stage bulkfree. 454 */ 455 hammer2_dedup_clear(hmp); 456 457 /* 458 * Setup for free pass using the buffer size specified by the 459 * hammer2 utility, 32K-aligned. 460 */ 461 bzero(&cbinfo, sizeof(cbinfo)); 462 size = (bfi->size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) & 463 ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1); 464 465 /* 466 * Cap at 1/4 physical memory (hammer2 utility will not normally 467 * ever specify a buffer this big, but leave the option available). 468 */ 469 if (size > kmem_lim_size() * 1024 * 1024 / 4) { 470 size = kmem_lim_size() * 1024 * 1024 / 4; 471 kprintf("hammer2: Warning: capping bulkfree buffer at %jdM\n", 472 (intmax_t)size / (1024 * 1024)); 473 } 474 475 #define HAMMER2_FREEMAP_SIZEDIV \ 476 (HAMMER2_FREEMAP_LEVEL1_SIZE / HAMMER2_FREEMAP_LEVELN_PSIZE) 477 #define HAMMER2_FREEMAP_SIZEMASK (HAMMER2_FREEMAP_SIZEDIV - 1) 478 479 /* 480 * Cap at the size needed to cover the whole volume to avoid 481 * making an unnecessarily large allocation. 482 */ 483 if (size > hmp->voldata.volu_size / HAMMER2_FREEMAP_SIZEDIV) { 484 size = (hmp->voldata.volu_size + HAMMER2_FREEMAP_SIZEMASK) / 485 HAMMER2_FREEMAP_SIZEDIV; 486 } 487 488 /* 489 * Minimum bitmap buffer size, then align to a LEVELN_PSIZE (32K) 490 * boundary. 491 */ 492 if (size < 1024 * 1024) 493 size = 1024 * 1024; 494 size = (size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) & 495 ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1); 496 497 cbinfo.hmp = hmp; 498 cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size, VM_SUBSYS_HAMMER); 499 cbinfo.saved_mirror_tid = hmp->voldata.mirror_tid; 500 501 cbinfo.dedup = kmalloc(sizeof(*cbinfo.dedup) * HAMMER2_DEDUP_HEUR_SIZE, 502 M_HAMMER2, M_WAITOK | M_ZERO); 503 504 kprintf("hammer2: bulkfree buf=%jdM\n", 505 (intmax_t)size / (1024 * 1024)); 506 507 /* 508 * Normalize start point to a 2GB boundary. We operate on a 509 * 64KB leaf bitmap boundary which represents 2GB of storage. 510 */ 511 cbinfo.sbase = bfi->sbase; 512 if (cbinfo.sbase > hmp->voldata.volu_size) 513 cbinfo.sbase = hmp->voldata.volu_size; 514 cbinfo.sbase &= ~HAMMER2_FREEMAP_LEVEL1_MASK; 515 TAILQ_INIT(&cbinfo.list); 516 517 cbinfo.bulkfree_ticks = ticks; 518 519 /* 520 * Loop on a full meta-data scan as many times as required to 521 * get through all available storage. 522 */ 523 error = 0; 524 while (cbinfo.sbase < hmp->voldata.volu_size) { 525 /* 526 * We have enough ram to represent (incr) bytes of storage. 527 * Each 64KB of ram represents 2GB of storage. 528 * 529 * We must also clean out our de-duplication heuristic for 530 * each (incr) bytes of storage, otherwise we wind up not 531 * scanning meta-data for later areas of storage because 532 * they had already been scanned in earlier areas of storage. 533 * Since the ranging is different, we have to restart 534 * the dedup heuristic too. 535 */ 536 int allmedia; 537 538 cbinfo_bmap_init(&cbinfo, size); 539 bzero(cbinfo.dedup, sizeof(*cbinfo.dedup) * 540 HAMMER2_DEDUP_HEUR_SIZE); 541 cbinfo.count_inodes_scanned = 0; 542 cbinfo.count_dirents_scanned = 0; 543 cbinfo.count_bytes_scanned = 0; 544 cbinfo.count_chains_scanned = 0; 545 cbinfo.count_chains_reported = 0; 546 547 incr = size / HAMMER2_FREEMAP_LEVELN_PSIZE * 548 HAMMER2_FREEMAP_LEVEL1_SIZE; 549 if (hmp->voldata.volu_size - cbinfo.sbase <= incr) { 550 cbinfo.sstop = hmp->voldata.volu_size; 551 allmedia = 1; 552 } else { 553 cbinfo.sstop = cbinfo.sbase + incr; 554 allmedia = 0; 555 } 556 kprintf("hammer2: pass %016jx-%016jx ", 557 (intmax_t)cbinfo.sbase, 558 (intmax_t)cbinfo.sstop); 559 if (allmedia && cbinfo.sbase == 0) 560 kprintf("(all media)\n"); 561 else if (allmedia) 562 kprintf("(remaining media)\n"); 563 else 564 kprintf("(%jdGB of media)\n", 565 (intmax_t)incr / (1024L*1024*1024)); 566 567 /* 568 * Scan topology for stuff inside this range. 569 * 570 * NOTE - By not using a transaction the operation can 571 * run concurrent with the frontend as well as 572 * with flushes. 573 * 574 * We cannot safely set a mtid without a transaction, 575 * and in fact we don't want to set one anyway. We 576 * want the bulkfree to be passive and no interfere 577 * with crash recovery. 578 */ 579 #undef HAMMER2_BULKFREE_TRANS /* undef - don't use transaction */ 580 #ifdef HAMMER2_BULKFREE_TRANS 581 hammer2_trans_init(hmp->spmp, 0); 582 cbinfo.mtid = hammer2_trans_sub(hmp->spmp); 583 #else 584 cbinfo.mtid = 0; 585 #endif 586 cbinfo.pri = 0; 587 error |= hammer2_bulk_scan(vchain, 588 h2_bulkfree_callback, &cbinfo); 589 590 while ((save = TAILQ_FIRST(&cbinfo.list)) != NULL && 591 (error & ~HAMMER2_ERROR_CHECK) == 0) { 592 TAILQ_REMOVE(&cbinfo.list, save, entry); 593 cbinfo.pri = 0; 594 error |= hammer2_bulk_scan(save->chain, 595 h2_bulkfree_callback, 596 &cbinfo); 597 hammer2_chain_drop(save->chain); 598 kfree(save, M_HAMMER2); 599 } 600 while (save) { 601 TAILQ_REMOVE(&cbinfo.list, save, entry); 602 hammer2_chain_drop(save->chain); 603 kfree(save, M_HAMMER2); 604 save = TAILQ_FIRST(&cbinfo.list); 605 } 606 607 /* 608 * If the complete scan succeeded we can synchronize our 609 * in-memory freemap against live storage. If an abort 610 * occured we cannot safely synchronize our partially 611 * filled-out in-memory freemap. 612 * 613 * We still synchronize on CHECK failures. That is, we still 614 * want bulkfree to operate even if the filesystem has defects. 615 */ 616 if (error & ~HAMMER2_ERROR_CHECK) { 617 kprintf("bulkfree lastdrop %d %d error=0x%04x\n", 618 vchain->refs, vchain->core.chain_count, error); 619 } else { 620 if (error & HAMMER2_ERROR_CHECK) { 621 kprintf("bulkfree lastdrop %d %d " 622 "(with check errors)\n", 623 vchain->refs, vchain->core.chain_count); 624 } else { 625 kprintf("bulkfree lastdrop %d %d\n", 626 vchain->refs, vchain->core.chain_count); 627 } 628 629 error = h2_bulkfree_sync(&cbinfo); 630 631 hammer2_voldata_lock(hmp); 632 hammer2_voldata_modify(hmp); 633 hmp->voldata.allocator_free += cbinfo.adj_free; 634 hammer2_voldata_unlock(hmp); 635 } 636 637 /* 638 * Cleanup for next loop. 639 */ 640 #ifdef HAMMER2_BULKFREE_TRANS 641 hammer2_trans_done(hmp->spmp, 0); 642 #endif 643 if (error & ~HAMMER2_ERROR_CHECK) 644 break; 645 cbinfo.sbase = cbinfo.sstop; 646 cbinfo.adj_free = 0; 647 } 648 kmem_free_swapbacked(&cbinfo.kp); 649 kfree(cbinfo.dedup, M_HAMMER2); 650 cbinfo.dedup = NULL; 651 652 bfi->sstop = cbinfo.sbase; 653 654 incr = bfi->sstop / (hmp->voldata.volu_size / 10000); 655 if (incr > 10000) 656 incr = 10000; 657 658 kprintf("bulkfree pass statistics (%d.%02d%% storage processed):\n", 659 (int)incr / 100, 660 (int)incr % 100); 661 662 if (error & ~HAMMER2_ERROR_CHECK) { 663 kprintf(" bulkfree was aborted\n"); 664 } else { 665 if (error & HAMMER2_ERROR_CHECK) { 666 kprintf(" WARNING: bulkfree " 667 "encountered CRC errors\n"); 668 } 669 kprintf(" transition->free %ld\n", cbinfo.count_10_00); 670 kprintf(" transition->staged %ld\n", cbinfo.count_11_10); 671 kprintf(" ERR(00)->allocated %ld\n", cbinfo.count_00_11); 672 kprintf(" ERR(01)->allocated %ld\n", cbinfo.count_01_11); 673 kprintf(" staged->allocated %ld\n", cbinfo.count_10_11); 674 kprintf(" ~2MB segs cleaned %ld\n", cbinfo.count_l0cleans); 675 kprintf(" linear adjusts %ld\n", 676 cbinfo.count_linadjusts); 677 kprintf(" dedup factor %ld\n", 678 cbinfo.count_dedup_factor); 679 } 680 681 return error; 682 } 683 684 static void 685 cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size) 686 { 687 hammer2_bmap_data_t *bmap = cbinfo->bmap; 688 hammer2_key_t key = cbinfo->sbase; 689 hammer2_key_t lokey; 690 hammer2_key_t hikey; 691 692 lokey = (cbinfo->hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) & 693 ~HAMMER2_SEGMASK64; 694 hikey = cbinfo->hmp->voldata.volu_size & ~HAMMER2_SEGMASK64; 695 696 bzero(bmap, size); 697 while (size) { 698 bzero(bmap, sizeof(*bmap)); 699 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX)) 700 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX); 701 if (lokey < H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64) 702 lokey = H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64; 703 if (key < lokey || key >= hikey) { 704 memset(bmap->bitmapq, -1, 705 sizeof(bmap->bitmapq)); 706 bmap->avail = 0; 707 bmap->linear = HAMMER2_SEGSIZE; 708 } else { 709 bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE; 710 } 711 size -= sizeof(*bmap); 712 key += HAMMER2_FREEMAP_LEVEL0_SIZE; 713 ++bmap; 714 } 715 } 716 717 static int 718 h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref) 719 { 720 hammer2_bmap_data_t *bmap; 721 hammer2_off_t data_off; 722 uint16_t class; 723 size_t bytes; 724 int radix; 725 726 /* 727 * Check for signal and allow yield to userland during scan. 728 */ 729 if (hammer2_signal_check(&cbinfo->save_time)) 730 return HAMMER2_ERROR_ABORTED; 731 732 /* 733 * Deal with kernel thread cpu or I/O hogging by limiting the 734 * number of chains scanned per second to hammer2_bulkfree_tps. 735 * Ignore leaf records (DIRENT and DATA), no per-record I/O is 736 * involved for those since we don't load their data. 737 */ 738 if (bref->type != HAMMER2_BREF_TYPE_DATA && 739 bref->type != HAMMER2_BREF_TYPE_DIRENT) { 740 ++cbinfo->bulkfree_calls; 741 if (cbinfo->bulkfree_calls > hammer2_bulkfree_tps) { 742 int dticks = ticks - cbinfo->bulkfree_ticks; 743 if (dticks < 0) 744 dticks = 0; 745 if (dticks < hz) { 746 tsleep(&cbinfo->bulkfree_ticks, 0, 747 "h2bw", hz - dticks); 748 } 749 cbinfo->bulkfree_calls = 0; 750 cbinfo->bulkfree_ticks = ticks; 751 } 752 } 753 754 /* 755 * Calculate the data offset and determine if it is within 756 * the current freemap range being gathered. 757 */ 758 data_off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX; 759 if (data_off < cbinfo->sbase || data_off >= cbinfo->sstop) 760 return 0; 761 if (data_off < cbinfo->hmp->voldata.allocator_beg) 762 return 0; 763 if (data_off >= cbinfo->hmp->voldata.volu_size) 764 return 0; 765 766 /* 767 * Calculate the information needed to generate the in-memory 768 * freemap record. 769 * 770 * Hammer2 does not allow allocations to cross the L1 (2GB) boundary, 771 * it's a problem if it does. (Or L0 (2MB) for that matter). 772 */ 773 radix = (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX); 774 KKASSERT(radix != 0); 775 bytes = (size_t)1 << radix; 776 class = (bref->type << 8) | hammer2_devblkradix(radix); 777 778 if (data_off + bytes > cbinfo->sstop) { 779 kprintf("hammer2_bulkfree_scan: illegal 2GB boundary " 780 "%016jx %016jx/%d\n", 781 (intmax_t)bref->data_off, 782 (intmax_t)bref->key, 783 bref->keybits); 784 bytes = cbinfo->sstop - data_off; /* XXX */ 785 } 786 787 /* 788 * Convert to a storage offset relative to the beginning of the 789 * storage range we are collecting. Then lookup the level0 bmap entry. 790 */ 791 data_off -= cbinfo->sbase; 792 bmap = cbinfo->bmap + (data_off >> HAMMER2_FREEMAP_LEVEL0_RADIX); 793 794 /* 795 * Convert data_off to a bmap-relative value (~4MB storage range). 796 * Adjust linear, class, and avail. 797 * 798 * Hammer2 does not allow allocations to cross the L0 (4MB) boundary, 799 */ 800 data_off &= HAMMER2_FREEMAP_LEVEL0_MASK; 801 if (data_off + bytes > HAMMER2_FREEMAP_LEVEL0_SIZE) { 802 kprintf("hammer2_bulkfree_scan: illegal 4MB boundary " 803 "%016jx %016jx/%d\n", 804 (intmax_t)bref->data_off, 805 (intmax_t)bref->key, 806 bref->keybits); 807 bytes = HAMMER2_FREEMAP_LEVEL0_SIZE - data_off; 808 } 809 810 if (bmap->class == 0) { 811 bmap->class = class; 812 bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE; 813 } 814 815 /* 816 * NOTE: bmap->class does not have to match class. Classification 817 * is relaxed when free space is low, so some mixing can occur. 818 */ 819 #if 0 820 /* 821 * XXX removed 822 */ 823 if (bmap->class != class) { 824 kprintf("hammer2_bulkfree_scan: illegal mixed class " 825 "%016jx %016jx/%d (%04x vs %04x)\n", 826 (intmax_t)bref->data_off, 827 (intmax_t)bref->key, 828 bref->keybits, 829 class, bmap->class); 830 } 831 #endif 832 833 /* 834 * Just record the highest byte-granular offset for now. Do not 835 * match against allocations which are in multiples of whole blocks. 836 * 837 * Make sure that any in-block linear offset at least covers the 838 * data range. This can cause bmap->linear to become block-aligned. 839 */ 840 if (bytes & HAMMER2_FREEMAP_BLOCK_MASK) { 841 if (bmap->linear < (int32_t)data_off + (int32_t)bytes) 842 bmap->linear = (int32_t)data_off + (int32_t)bytes; 843 } else if (bmap->linear >= (int32_t)data_off && 844 bmap->linear < (int32_t)data_off + (int32_t)bytes) { 845 bmap->linear = (int32_t)data_off + (int32_t)bytes; 846 } 847 848 /* 849 * Adjust the hammer2_bitmap_t bitmap[HAMMER2_BMAP_ELEMENTS]. 850 * 64-bit entries, 2 bits per entry, to code 11. 851 * 852 * NOTE: data_off mask to 524288, shift right by 14 (radix for 16384), 853 * and multiply shift amount by 2 for sets of 2 bits. 854 * 855 * NOTE: The allocation can be smaller than HAMMER2_FREEMAP_BLOCK_SIZE. 856 * also, data_off may not be FREEMAP_BLOCK_SIZE aligned. 857 */ 858 while (bytes > 0) { 859 hammer2_bitmap_t bmask; 860 int bindex; 861 862 bindex = (int)data_off >> (HAMMER2_FREEMAP_BLOCK_RADIX + 863 HAMMER2_BMAP_INDEX_RADIX); 864 bmask = (hammer2_bitmap_t)3 << 865 ((((int)data_off & HAMMER2_BMAP_INDEX_MASK) >> 866 HAMMER2_FREEMAP_BLOCK_RADIX) << 1); 867 868 /* 869 * NOTE! The (avail) calculation is bitmap-granular. Multiple 870 * sub-granular records can wind up at the same bitmap 871 * position. 872 */ 873 if ((bmap->bitmapq[bindex] & bmask) == 0) { 874 if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE) { 875 bmap->avail -= HAMMER2_FREEMAP_BLOCK_SIZE; 876 } else { 877 bmap->avail -= bytes; 878 } 879 bmap->bitmapq[bindex] |= bmask; 880 } 881 data_off += HAMMER2_FREEMAP_BLOCK_SIZE; 882 if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE) 883 bytes = 0; 884 else 885 bytes -= HAMMER2_FREEMAP_BLOCK_SIZE; 886 } 887 return 0; 888 } 889 890 /* 891 * Synchronize the in-memory bitmap with the live freemap. This is not a 892 * direct copy. Instead the bitmaps must be compared: 893 * 894 * In-memory Live-freemap 895 * 00 11 -> 10 (do nothing if live modified) 896 * 10 -> 00 (do nothing if live modified) 897 * 11 10 -> 11 handles race against live 898 * ** -> 11 nominally warn of corruption 899 * 900 * We must also fixup the hints in HAMMER2_BREF_TYPE_FREEMAP_LEAF. 901 */ 902 static int 903 h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo) 904 { 905 hammer2_off_t data_off; 906 hammer2_key_t key; 907 hammer2_key_t key_dummy; 908 hammer2_bmap_data_t *bmap; 909 hammer2_bmap_data_t *live; 910 hammer2_chain_t *live_parent; 911 hammer2_chain_t *live_chain; 912 int bmapindex; 913 int error; 914 915 kprintf("hammer2_bulkfree - range "); 916 917 if (cbinfo->sbase < cbinfo->hmp->voldata.allocator_beg) 918 kprintf("%016jx-", 919 (intmax_t)cbinfo->hmp->voldata.allocator_beg); 920 else 921 kprintf("%016jx-", 922 (intmax_t)cbinfo->sbase); 923 924 if (cbinfo->sstop > cbinfo->hmp->voldata.volu_size) 925 kprintf("%016jx\n", 926 (intmax_t)cbinfo->hmp->voldata.volu_size); 927 else 928 kprintf("%016jx\n", 929 (intmax_t)cbinfo->sstop); 930 931 data_off = cbinfo->sbase; 932 bmap = cbinfo->bmap; 933 934 live_parent = &cbinfo->hmp->fchain; 935 hammer2_chain_ref(live_parent); 936 hammer2_chain_lock(live_parent, HAMMER2_RESOLVE_ALWAYS); 937 live_chain = NULL; 938 error = 0; 939 940 /* 941 * Iterate each hammer2_bmap_data_t line (128 bytes) managing 942 * 4MB of storage. 943 */ 944 while (data_off < cbinfo->sstop) { 945 /* 946 * The freemap is not used below allocator_beg or beyond 947 * volu_size. 948 */ 949 950 if (data_off < cbinfo->hmp->voldata.allocator_beg) 951 goto next; 952 if (data_off >= cbinfo->hmp->voldata.volu_size) 953 goto next; 954 955 /* 956 * Locate the freemap leaf on the live filesystem 957 */ 958 key = (data_off & ~HAMMER2_FREEMAP_LEVEL1_MASK); 959 960 if (live_chain == NULL || live_chain->bref.key != key) { 961 if (live_chain) { 962 hammer2_chain_unlock(live_chain); 963 hammer2_chain_drop(live_chain); 964 } 965 live_chain = hammer2_chain_lookup( 966 &live_parent, 967 &key_dummy, 968 key, 969 key + HAMMER2_FREEMAP_LEVEL1_MASK, 970 &error, 971 HAMMER2_LOOKUP_ALWAYS); 972 if (error) { 973 kprintf("hammer2_bulkfree: freemap lookup " 974 "error near %016jx, error %s\n", 975 (intmax_t)data_off, 976 hammer2_error_str(live_chain->error)); 977 break; 978 } 979 } 980 if (live_chain == NULL) { 981 /* 982 * XXX if we implement a full recovery mode we need 983 * to create/recreate missing freemap chains if our 984 * bmap has any allocated blocks. 985 */ 986 if (bmap->class && 987 bmap->avail != HAMMER2_FREEMAP_LEVEL0_SIZE) { 988 kprintf("hammer2_bulkfree: cannot locate " 989 "live leaf for allocated data " 990 "near %016jx\n", 991 (intmax_t)data_off); 992 } 993 goto next; 994 } 995 if (live_chain->error) { 996 kprintf("hammer2_bulkfree: unable to access freemap " 997 "near %016jx, error %s\n", 998 (intmax_t)data_off, 999 hammer2_error_str(live_chain->error)); 1000 hammer2_chain_unlock(live_chain); 1001 hammer2_chain_drop(live_chain); 1002 live_chain = NULL; 1003 goto next; 1004 } 1005 1006 bmapindex = (data_off & HAMMER2_FREEMAP_LEVEL1_MASK) >> 1007 HAMMER2_FREEMAP_LEVEL0_RADIX; 1008 live = &live_chain->data->bmdata[bmapindex]; 1009 1010 /* 1011 * Shortcut if the bitmaps match and the live linear 1012 * indicator is sane. We can't do a perfect check of 1013 * live->linear because the only real requirement is that 1014 * if it is not block-aligned, that it not cover the space 1015 * within its current block which overlaps one of the data 1016 * ranges we scan. We don't retain enough fine-grained 1017 * data in our scan to be able to set it exactly. 1018 * 1019 * TODO - we could shortcut this by testing that both 1020 * live->class and bmap->class are 0, and both avails are 1021 * set to HAMMER2_FREEMAP_LEVEL0_SIZE (4MB). 1022 */ 1023 if (bcmp(live->bitmapq, bmap->bitmapq, 1024 sizeof(bmap->bitmapq)) == 0 && 1025 live->linear >= bmap->linear) { 1026 goto next; 1027 } 1028 if (hammer2_debug & 1) { 1029 kprintf("live %016jx %04d.%04x (avail=%d)\n", 1030 data_off, bmapindex, live->class, live->avail); 1031 } 1032 1033 hammer2_chain_modify(live_chain, cbinfo->mtid, 0, 0); 1034 live_chain->bref.check.freemap.bigmask = -1; 1035 cbinfo->hmp->freemap_relaxed = 0; /* reset heuristic */ 1036 live = &live_chain->data->bmdata[bmapindex]; 1037 1038 h2_bulkfree_sync_adjust(cbinfo, data_off, live, bmap, 1039 live_chain->bref.key + 1040 bmapindex * 1041 HAMMER2_FREEMAP_LEVEL0_SIZE); 1042 next: 1043 data_off += HAMMER2_FREEMAP_LEVEL0_SIZE; 1044 ++bmap; 1045 } 1046 if (live_chain) { 1047 hammer2_chain_unlock(live_chain); 1048 hammer2_chain_drop(live_chain); 1049 } 1050 if (live_parent) { 1051 hammer2_chain_unlock(live_parent); 1052 hammer2_chain_drop(live_parent); 1053 } 1054 return error; 1055 } 1056 1057 /* 1058 * Merge the bulkfree bitmap against the existing bitmap. 1059 */ 1060 static 1061 void 1062 h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo, 1063 hammer2_off_t data_off, hammer2_bmap_data_t *live, 1064 hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base) 1065 { 1066 int bindex; 1067 int scount; 1068 hammer2_off_t tmp_off; 1069 hammer2_bitmap_t lmask; 1070 hammer2_bitmap_t mmask; 1071 1072 tmp_off = data_off; 1073 1074 for (bindex = 0; bindex < HAMMER2_BMAP_ELEMENTS; ++bindex) { 1075 lmask = live->bitmapq[bindex]; /* live */ 1076 mmask = bmap->bitmapq[bindex]; /* snapshotted bulkfree */ 1077 if (lmask == mmask) { 1078 tmp_off += HAMMER2_BMAP_INDEX_SIZE; 1079 continue; 1080 } 1081 1082 for (scount = 0; 1083 scount < HAMMER2_BMAP_BITS_PER_ELEMENT; 1084 scount += 2) { 1085 if ((mmask & 3) == 0) { 1086 /* 1087 * in-memory 00 live 11 -> 10 1088 * live 10 -> 00 1089 * 1090 * Storage might be marked allocated or 1091 * staged and must be remarked staged or 1092 * free. 1093 */ 1094 switch (lmask & 3) { 1095 case 0: /* 00 */ 1096 break; 1097 case 1: /* 01 */ 1098 kprintf("hammer2_bulkfree: cannot " 1099 "transition m=00/l=01\n"); 1100 break; 1101 case 2: /* 10 -> 00 */ 1102 live->bitmapq[bindex] &= 1103 ~((hammer2_bitmap_t)2 << scount); 1104 live->avail += 1105 HAMMER2_FREEMAP_BLOCK_SIZE; 1106 if (live->avail > 1107 HAMMER2_FREEMAP_LEVEL0_SIZE) { 1108 live->avail = 1109 HAMMER2_FREEMAP_LEVEL0_SIZE; 1110 } 1111 cbinfo->adj_free += 1112 HAMMER2_FREEMAP_BLOCK_SIZE; 1113 ++cbinfo->count_10_00; 1114 hammer2_io_dedup_assert( 1115 cbinfo->hmp, 1116 tmp_off | 1117 HAMMER2_FREEMAP_BLOCK_RADIX, 1118 HAMMER2_FREEMAP_BLOCK_SIZE); 1119 break; 1120 case 3: /* 11 -> 10 */ 1121 live->bitmapq[bindex] &= 1122 ~((hammer2_bitmap_t)1 << scount); 1123 ++cbinfo->count_11_10; 1124 hammer2_io_dedup_delete( 1125 cbinfo->hmp, 1126 HAMMER2_BREF_TYPE_DATA, 1127 tmp_off | 1128 HAMMER2_FREEMAP_BLOCK_RADIX, 1129 HAMMER2_FREEMAP_BLOCK_SIZE); 1130 break; 1131 } 1132 } else if ((mmask & 3) == 3) { 1133 /* 1134 * in-memory 11 live 10 -> 11 1135 * live ** -> 11 1136 * 1137 * Storage might be incorrectly marked free 1138 * or staged and must be remarked fully 1139 * allocated. 1140 */ 1141 switch (lmask & 3) { 1142 case 0: /* 00 */ 1143 ++cbinfo->count_00_11; 1144 cbinfo->adj_free -= 1145 HAMMER2_FREEMAP_BLOCK_SIZE; 1146 live->avail -= 1147 HAMMER2_FREEMAP_BLOCK_SIZE; 1148 if ((int32_t)live->avail < 0) 1149 live->avail = 0; 1150 break; 1151 case 1: /* 01 */ 1152 ++cbinfo->count_01_11; 1153 break; 1154 case 2: /* 10 -> 11 */ 1155 ++cbinfo->count_10_11; 1156 break; 1157 case 3: /* 11 */ 1158 break; 1159 } 1160 live->bitmapq[bindex] |= 1161 ((hammer2_bitmap_t)3 << scount); 1162 } 1163 mmask >>= 2; 1164 lmask >>= 2; 1165 tmp_off += HAMMER2_FREEMAP_BLOCK_SIZE; 1166 } 1167 } 1168 1169 /* 1170 * Determine if the live bitmap is completely free and reset its 1171 * fields if so. Otherwise check to see if we can reduce the linear 1172 * offset. 1173 */ 1174 for (bindex = HAMMER2_BMAP_ELEMENTS - 1; bindex >= 0; --bindex) { 1175 if (live->bitmapq[bindex] != 0) 1176 break; 1177 } 1178 if (bindex < 0) { 1179 /* 1180 * Completely empty, reset entire segment 1181 */ 1182 #if 0 1183 kprintf("hammer2: cleanseg %016jx.%04x (%d)\n", 1184 alloc_base, live->class, live->avail); 1185 #endif 1186 live->avail = HAMMER2_FREEMAP_LEVEL0_SIZE; 1187 live->class = 0; 1188 live->linear = 0; 1189 ++cbinfo->count_l0cleans; 1190 } else if (bindex < 7) { 1191 /* 1192 * Partially full, bitmapq[bindex] != 0. Our bulkfree pass 1193 * does not record enough information to set live->linear 1194 * exactly. 1195 * 1196 * NOTE: Setting live->linear to a sub-block (16K) boundary 1197 * forces the live code to iterate to the next fully 1198 * free block. It does NOT mean that all blocks above 1199 * live->linear are available. 1200 * 1201 * Setting live->linear to a fragmentary (less than 1202 * 16K) boundary allows allocations to iterate within 1203 * that sub-block. 1204 */ 1205 if (live->linear < bmap->linear && 1206 ((live->linear ^ bmap->linear) & 1207 ~HAMMER2_FREEMAP_BLOCK_MASK) == 0) { 1208 /* 1209 * If greater than but still within the same 1210 * sub-block as live we can adjust linear upward. 1211 */ 1212 live->linear = bmap->linear; 1213 ++cbinfo->count_linadjusts; 1214 } else { 1215 /* 1216 * Otherwise adjust to the nearest higher or same 1217 * sub-block boundary. The live system may have 1218 * bounced live->linear around so we cannot make any 1219 * assumptions with regards to available fragmentary 1220 * allocations. 1221 */ 1222 live->linear = 1223 (bmap->linear + HAMMER2_FREEMAP_BLOCK_MASK) & 1224 ~HAMMER2_FREEMAP_BLOCK_MASK; 1225 ++cbinfo->count_linadjusts; 1226 } 1227 } else { 1228 /* 1229 * Completely full, effectively disable the linear iterator 1230 */ 1231 live->linear = HAMMER2_SEGSIZE; 1232 } 1233 1234 #if 0 1235 if (bmap->class) { 1236 kprintf("%016jx %04d.%04x (avail=%7d) " 1237 "%08x %08x %08x %08x %08x %08x %08x %08x\n", 1238 (intmax_t)data_off, 1239 (int)((data_off & 1240 HAMMER2_FREEMAP_LEVEL1_MASK) >> 1241 HAMMER2_FREEMAP_LEVEL0_RADIX), 1242 bmap->class, 1243 bmap->avail, 1244 bmap->bitmap[0], bmap->bitmap[1], 1245 bmap->bitmap[2], bmap->bitmap[3], 1246 bmap->bitmap[4], bmap->bitmap[5], 1247 bmap->bitmap[6], bmap->bitmap[7]); 1248 } 1249 #endif 1250 } 1251 1252 /* 1253 * BULKFREE DEDUP HEURISTIC 1254 * 1255 * WARNING! This code is SMP safe but the heuristic allows SMP collisions. 1256 * All fields must be loaded into locals and validated. 1257 */ 1258 static 1259 int 1260 h2_bulkfree_test(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref, 1261 int pri, int saved_error) 1262 { 1263 hammer2_dedup_t *dedup; 1264 int best; 1265 int n; 1266 int i; 1267 1268 n = hammer2_icrc32(&bref->data_off, sizeof(bref->data_off)); 1269 dedup = cbinfo->dedup + (n & (HAMMER2_DEDUP_HEUR_MASK & ~7)); 1270 1271 for (i = best = 0; i < 8; ++i) { 1272 if (dedup[i].data_off == bref->data_off) { 1273 if (dedup[i].ticks < pri) 1274 dedup[i].ticks = pri; 1275 if (pri == 1) 1276 cbinfo->count_dedup_factor += dedup[i].ticks; 1277 return (dedup[i].saved_error | HAMMER2_ERROR_EOF); 1278 } 1279 if (dedup[i].ticks < dedup[best].ticks) 1280 best = i; 1281 } 1282 dedup[best].data_off = bref->data_off; 1283 dedup[best].ticks = pri; 1284 dedup[best].saved_error = saved_error; 1285 1286 return 0; 1287 } 1288