1 /*
2  * Copyright (c) 2013-2019 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/proc.h>
38 #include <sys/mount.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_extern.h>
41 
42 #include "hammer2.h"
43 
44 /*
45  * breadth-first search
46  */
47 typedef struct hammer2_chain_save {
48 	TAILQ_ENTRY(hammer2_chain_save)	entry;
49 	hammer2_chain_t	*chain;
50 	int pri;
51 } hammer2_chain_save_t;
52 
53 TAILQ_HEAD(hammer2_chain_save_list, hammer2_chain_save);
54 typedef struct hammer2_chain_save_list hammer2_chain_save_list_t;
55 
56 typedef struct hammer2_bulkfree_info {
57 	hammer2_dev_t		*hmp;
58 	kmem_anon_desc_t	kp;
59 	hammer2_off_t		sbase;		/* sub-loop iteration */
60 	hammer2_off_t		sstop;
61 	hammer2_bmap_data_t	*bmap;
62 	int			depth;
63 	long			count_10_00;	/* staged->free	     */
64 	long			count_11_10;	/* allocated->staged */
65 	long			count_00_11;	/* (should not happen) */
66 	long			count_01_11;	/* (should not happen) */
67 	long			count_10_11;	/* staged->allocated */
68 	long			count_l0cleans;
69 	long			count_linadjusts;
70 	long			count_inodes_scanned;
71 	long			count_dirents_scanned;
72 	long			count_dedup_factor;
73 	long			count_bytes_scanned;
74 	long			count_chains_scanned;
75 	long			count_chains_reported;
76 	long			bulkfree_calls;
77 	int			bulkfree_ticks;
78 	hammer2_off_t		adj_free;
79 	hammer2_tid_t		mtid;
80 	hammer2_tid_t		saved_mirror_tid;
81 	time_t			save_time;
82 	hammer2_chain_save_list_t list;
83 	hammer2_dedup_t		*dedup;
84 	int			pri;
85 } hammer2_bulkfree_info_t;
86 
87 static int h2_bulkfree_test(hammer2_bulkfree_info_t *info,
88 			hammer2_blockref_t *bref, int pri, int saved_error);
89 
90 /*
91  * General bulk scan function with callback.  Called with a referenced
92  * but UNLOCKED parent.  The parent is returned in the same state.
93  */
94 static
95 int
96 hammer2_bulk_scan(hammer2_chain_t *parent,
97 		  int (*func)(hammer2_bulkfree_info_t *info,
98 			      hammer2_blockref_t *bref),
99 		  hammer2_bulkfree_info_t *info)
100 {
101 	hammer2_blockref_t bref;
102 	hammer2_chain_t *chain;
103 	hammer2_chain_save_t *tail;
104 	hammer2_chain_save_t *save;
105 	int first = 1;
106 	int rup_error;
107 	int error;
108 	int e2;
109 
110 	++info->pri;
111 
112 	chain = NULL;
113 	rup_error = 0;
114 	error = 0;
115 
116 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
117 				   HAMMER2_RESOLVE_SHARED);
118 	tail = TAILQ_LAST(&info->list, hammer2_chain_save_list);
119 
120 	/*
121 	 * The parent was previously retrieved NODATA and thus has not
122 	 * tested the CRC.  Now that we have locked it normally, check
123 	 * for a CRC problem and skip it if we found one.  The bulk scan
124 	 * cannot safely traverse invalid block tables (we could end up
125 	 * in an endless loop or cause a panic).
126 	 */
127 	if (parent->error & HAMMER2_ERROR_CHECK) {
128 		error = parent->error;
129 		goto done;
130 	}
131 
132 	/*
133 	 * Report which PFS is being scanned
134 	 */
135 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
136 	    (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) {
137 		kprintf("hammer2_bulkfree: Scanning %s\n",
138 			parent->data->ipdata.filename);
139 	}
140 
141 	/*
142 	 * Generally loop on the contents if we have not been flagged
143 	 * for abort.
144 	 *
145 	 * Remember that these chains are completely isolated from
146 	 * the frontend, so we can release locks temporarily without
147 	 * imploding.
148 	 */
149 	for (;;) {
150 		error |= hammer2_chain_scan(parent, &chain, &bref, &first,
151 					    HAMMER2_LOOKUP_NODATA |
152 					    HAMMER2_LOOKUP_SHARED);
153 
154 		/*
155 		 * Handle EOF or other error at current level.  This stops
156 		 * the bulkfree scan.
157 		 */
158 		if (error & ~HAMMER2_ERROR_CHECK)
159 			break;
160 
161 		/*
162 		 * Account for dirents before thre data_off test, since most
163 		 * dirents do not need a data reference.
164 		 */
165 		if (bref.type == HAMMER2_BREF_TYPE_DIRENT)
166 			++info->count_dirents_scanned;
167 
168 		/*
169 		 * Ignore brefs without data (typically dirents)
170 		 */
171 		if ((bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0)
172 			continue;
173 
174 		/*
175 		 * Process bref, chain is only non-NULL if the bref
176 		 * might be recursable (its possible that we sometimes get
177 		 * a non-NULL chain where the bref cannot be recursed).
178 		 *
179 		 * If we already ran down this tree we do not have to do it
180 		 * again, but we must still recover any cumulative error
181 		 * recorded from the time we did.
182 		 */
183 		++info->pri;
184 		e2 = h2_bulkfree_test(info, &bref, 1, 0);
185 		if (e2) {
186 			error |= e2 & ~HAMMER2_ERROR_EOF;
187 			continue;
188 		}
189 
190 		if (bref.type == HAMMER2_BREF_TYPE_INODE)
191 			++info->count_inodes_scanned;
192 
193 		error |= func(info, &bref);
194 		if (error & ~HAMMER2_ERROR_CHECK)
195 			break;
196 
197 		/*
198 		 * A non-null chain is always returned if it is
199 		 * recursive, otherwise a non-null chain might be
200 		 * returned but usually is not when not recursive.
201 		 */
202 		if (chain == NULL)
203 			continue;
204 
205 		if (chain) {
206 			info->count_bytes_scanned += chain->bytes;
207 			++info->count_chains_scanned;
208 
209 			if (info->count_chains_scanned >=
210 			    info->count_chains_reported + 1000000 ||
211 			    (info->count_chains_scanned < 1000000 &&
212 			     info->count_chains_scanned >=
213 			     info->count_chains_reported + 100000)) {
214 				kprintf(" chains %-7ld inodes %-7ld "
215 					"dirents %-7ld bytes %5ldMB\n",
216 					info->count_chains_scanned,
217 					info->count_inodes_scanned,
218 					info->count_dirents_scanned,
219 					info->count_bytes_scanned / 1000000);
220 				info->count_chains_reported =
221 					info->count_chains_scanned;
222 			}
223 		}
224 
225 		/*
226 		 * Else check type and setup depth-first scan.
227 		 *
228 		 * Account for bytes actually read.
229 		 */
230 		switch(chain->bref.type) {
231 		case HAMMER2_BREF_TYPE_INODE:
232 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
233 		case HAMMER2_BREF_TYPE_INDIRECT:
234 		case HAMMER2_BREF_TYPE_VOLUME:
235 		case HAMMER2_BREF_TYPE_FREEMAP:
236 			++info->depth;
237 			if (chain->error & HAMMER2_ERROR_CHECK) {
238 				/*
239 				 * Cannot safely recurse chains with crc
240 				 * errors, even in emergency mode.
241 				 */
242 				/* NOP */
243 			} else if (info->depth > 16) {
244 				save = kmalloc(sizeof(*save), M_HAMMER2,
245 					       M_WAITOK | M_ZERO);
246 				save->chain = chain;
247 				hammer2_chain_ref(chain);
248 				TAILQ_INSERT_TAIL(&info->list, save, entry);
249 
250 				/* guess */
251 				info->pri += 10;
252 			} else {
253 				int savepri = info->pri;
254 
255 				hammer2_chain_unlock(chain);
256 				hammer2_chain_unlock(parent);
257 				info->pri = 0;
258 				rup_error |= hammer2_bulk_scan(chain,
259 							       func, info);
260 				info->pri += savepri;
261 				hammer2_chain_lock(parent,
262 						   HAMMER2_RESOLVE_ALWAYS |
263 						   HAMMER2_RESOLVE_SHARED);
264 				hammer2_chain_lock(chain,
265 						   HAMMER2_RESOLVE_ALWAYS |
266 						   HAMMER2_RESOLVE_SHARED);
267 			}
268 			--info->depth;
269 			break;
270 		case HAMMER2_BREF_TYPE_DATA:
271 			break;
272 		default:
273 			/* does not recurse */
274 			break;
275 		}
276 		if (rup_error & HAMMER2_ERROR_ABORTED)
277 			break;
278 	}
279 	if (chain) {
280 		hammer2_chain_unlock(chain);
281 		hammer2_chain_drop(chain);
282 	}
283 
284 	/*
285 	 * If this is a PFSROOT, also re-run any defered elements
286 	 * added during our scan so we can report any cumulative errors
287 	 * for the PFS.
288 	 */
289 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
290 	    (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) {
291 		for (;;) {
292 			int opri;
293 
294 			save = tail ? TAILQ_NEXT(tail, entry) :
295 				      TAILQ_FIRST(&info->list);
296 			if (save == NULL)
297 				break;
298 
299 			TAILQ_REMOVE(&info->list, save, entry);
300 			opri = info->pri;
301 			info->pri = 0;
302 			rup_error |= hammer2_bulk_scan(save->chain, func, info);
303 			hammer2_chain_drop(save->chain);
304 			kfree(save, M_HAMMER2);
305 			info->pri = opri;
306 		}
307 	}
308 
309 	error |= rup_error;
310 
311 	/*
312 	 * Report which PFS the errors were encountered in.
313 	 */
314 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
315 	    (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
316 	    (error & ~HAMMER2_ERROR_EOF)) {
317 		kprintf("hammer2_bulkfree: Encountered errors (%08x) "
318 			"while scanning \"%s\"\n",
319 			error, parent->data->ipdata.filename);
320 	}
321 
322 	/*
323 	 * Save with higher pri now that we know what it is.
324 	 */
325 	h2_bulkfree_test(info, &parent->bref, info->pri + 1,
326 			 (error & ~HAMMER2_ERROR_EOF));
327 
328 done:
329 	hammer2_chain_unlock(parent);
330 
331 	return (error & ~HAMMER2_ERROR_EOF);
332 }
333 
334 /*
335  * Bulkfree algorithm
336  *
337  * Repeat {
338  *	Chain flush (partial synchronization) XXX removed
339  *	Scan the whole topology - build in-memory freemap (mark 11)
340  *	Reconcile the in-memory freemap against the on-disk freemap.
341  *		ondisk xx -> ondisk 11 (if allocated)
342  *		ondisk 11 -> ondisk 10 (if free in-memory)
343  *		ondisk 10 -> ondisk 00 (if free in-memory) - on next pass
344  * }
345  *
346  * The topology scan may have to be performed multiple times to window
347  * freemaps which are too large to fit in kernel memory.
348  *
349  * Races are handled using a double-transition (11->10, 10->00).  The bulkfree
350  * scan snapshots the volume root's blockset and thus can run concurrent with
351  * normal operations, as long as a full flush is made between each pass to
352  * synchronize any modified chains (otherwise their blocks might be improperly
353  * freed).
354  *
355  * Temporary memory in multiples of 64KB is required to reconstruct the leaf
356  * hammer2_bmap_data blocks so they can later be compared against the live
357  * freemap.  Each 64KB block represents 128 x 16KB x 1024 = ~2 GB of storage.
358  * A 32MB save area thus represents around ~1 TB.  The temporary memory
359  * allocated can be specified.  If it is not sufficient multiple topology
360  * passes will be made.
361  */
362 
363 /*
364  * Bulkfree callback info
365  */
366 static void hammer2_bulkfree_thread(void *arg __unused);
367 static void cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size);
368 static int h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo,
369 			hammer2_blockref_t *bref);
370 static int h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo);
371 static void h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
372 			hammer2_off_t data_off, hammer2_bmap_data_t *live,
373 			hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base);
374 
375 void
376 hammer2_bulkfree_init(hammer2_dev_t *hmp)
377 {
378 	hammer2_thr_create(&hmp->bfthr, NULL, hmp,
379 			   hmp->devrepname, -1, -1,
380 			   hammer2_bulkfree_thread);
381 }
382 
383 void
384 hammer2_bulkfree_uninit(hammer2_dev_t *hmp)
385 {
386 	hammer2_thr_delete(&hmp->bfthr);
387 }
388 
389 static void
390 hammer2_bulkfree_thread(void *arg)
391 {
392 	hammer2_thread_t *thr = arg;
393 	hammer2_ioc_bulkfree_t bfi;
394 	uint32_t flags;
395 
396 	for (;;) {
397 		hammer2_thr_wait_any(thr,
398 				     HAMMER2_THREAD_STOP |
399 				     HAMMER2_THREAD_FREEZE |
400 				     HAMMER2_THREAD_UNFREEZE |
401 				     HAMMER2_THREAD_REMASTER,
402 				     hz * 60);
403 
404 		flags = thr->flags;
405 		cpu_ccfence();
406 		if (flags & HAMMER2_THREAD_STOP)
407 			break;
408 		if (flags & HAMMER2_THREAD_FREEZE) {
409 			hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
410 						 HAMMER2_THREAD_FREEZE);
411 			continue;
412 		}
413 		if (flags & HAMMER2_THREAD_UNFREEZE) {
414 			hammer2_thr_signal2(thr, 0,
415 						 HAMMER2_THREAD_FROZEN |
416 						 HAMMER2_THREAD_UNFREEZE);
417 			continue;
418 		}
419 		if (flags & HAMMER2_THREAD_FROZEN)
420 			continue;
421 		if (flags & HAMMER2_THREAD_REMASTER) {
422 			hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
423 			bzero(&bfi, sizeof(bfi));
424 			bfi.size = 8192 * 1024;
425 			/* hammer2_bulkfree_pass(thr->hmp, &bfi); */
426 		}
427 	}
428 	thr->td = NULL;
429 	hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
430 	/* structure can go invalid at this point */
431 }
432 
433 int
434 hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain,
435 		      hammer2_ioc_bulkfree_t *bfi)
436 {
437 	hammer2_bulkfree_info_t cbinfo;
438 	hammer2_chain_save_t *save;
439 	hammer2_off_t incr;
440 	size_t size;
441 	int error;
442 
443 	/*
444 	 * We have to clear the live dedup cache as it might have entries
445 	 * that are freeable as of now.  Any new entries in the dedup cache
446 	 * made after this point, even if they become freeable, will have
447 	 * previously been fully allocated and will be protected by the
448 	 * 2-stage bulkfree.
449 	 */
450 	hammer2_dedup_clear(hmp);
451 
452 	/*
453 	 * Setup for free pass using the buffer size specified by the
454 	 * hammer2 utility, 32K-aligned.
455 	 */
456 	bzero(&cbinfo, sizeof(cbinfo));
457 	size = (bfi->size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
458 	       ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
459 
460 	/*
461 	 * Cap at 1/4 physical memory (hammer2 utility will not normally
462 	 * ever specify a buffer this big, but leave the option available).
463 	 */
464 	if (size > kmem_lim_size() * 1024 * 1024 / 4) {
465 		size = kmem_lim_size() * 1024 * 1024 / 4;
466 		kprintf("hammer2: Warning: capping bulkfree buffer at %jdM\n",
467 			(intmax_t)size / (1024 * 1024));
468 	}
469 
470 #define HAMMER2_FREEMAP_SIZEDIV	\
471 	(HAMMER2_FREEMAP_LEVEL1_SIZE / HAMMER2_FREEMAP_LEVELN_PSIZE)
472 #define HAMMER2_FREEMAP_SIZEMASK	(HAMMER2_FREEMAP_SIZEDIV - 1)
473 
474 	/*
475 	 * Cap at the size needed to cover the whole volume to avoid
476 	 * making an unnecessarily large allocation.
477 	 */
478 	if (size > hmp->total_size / HAMMER2_FREEMAP_SIZEDIV) {
479 		size = (hmp->total_size + HAMMER2_FREEMAP_SIZEMASK) /
480 			HAMMER2_FREEMAP_SIZEDIV;
481 	}
482 
483 	/*
484 	 * Minimum bitmap buffer size, then align to a LEVELN_PSIZE (32K)
485 	 * boundary.
486 	 */
487 	if (size < 1024 * 1024)
488 		size = 1024 * 1024;
489 	size = (size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
490 	       ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
491 
492 	cbinfo.hmp = hmp;
493 	cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size, VM_SUBSYS_HAMMER);
494 	cbinfo.saved_mirror_tid = hmp->voldata.mirror_tid;
495 
496 	cbinfo.dedup = kmalloc(sizeof(*cbinfo.dedup) * HAMMER2_DEDUP_HEUR_SIZE,
497 			       M_HAMMER2, M_WAITOK | M_ZERO);
498 
499 	kprintf("hammer2: bulkfree buf=%jdM\n",
500 		(intmax_t)size / (1024 * 1024));
501 
502 	/*
503 	 * Normalize start point to a 2GB boundary.  We operate on a
504 	 * 64KB leaf bitmap boundary which represents 2GB of storage.
505 	 */
506 	cbinfo.sbase = bfi->sbase;
507 	if (cbinfo.sbase > hmp->total_size)
508 		cbinfo.sbase = hmp->total_size;
509 	cbinfo.sbase &= ~HAMMER2_FREEMAP_LEVEL1_MASK;
510 	TAILQ_INIT(&cbinfo.list);
511 
512 	cbinfo.bulkfree_ticks = ticks;
513 
514 	/*
515 	 * Loop on a full meta-data scan as many times as required to
516 	 * get through all available storage.
517 	 */
518 	error = 0;
519 	while (cbinfo.sbase < hmp->total_size) {
520 		/*
521 		 * We have enough ram to represent (incr) bytes of storage.
522 		 * Each 64KB of ram represents 2GB of storage.
523 		 *
524 		 * We must also clean out our de-duplication heuristic for
525 		 * each (incr) bytes of storage, otherwise we wind up not
526 		 * scanning meta-data for later areas of storage because
527 		 * they had already been scanned in earlier areas of storage.
528 		 * Since the ranging is different, we have to restart
529 		 * the dedup heuristic too.
530 		 */
531 		int allmedia;
532 
533 		cbinfo_bmap_init(&cbinfo, size);
534 		bzero(cbinfo.dedup, sizeof(*cbinfo.dedup) *
535 				    HAMMER2_DEDUP_HEUR_SIZE);
536 		cbinfo.count_inodes_scanned = 0;
537 		cbinfo.count_dirents_scanned = 0;
538 		cbinfo.count_bytes_scanned = 0;
539 		cbinfo.count_chains_scanned = 0;
540 		cbinfo.count_chains_reported = 0;
541 
542 		incr = size / HAMMER2_FREEMAP_LEVELN_PSIZE *
543 		       HAMMER2_FREEMAP_LEVEL1_SIZE;
544 		if (hmp->total_size - cbinfo.sbase <= incr) {
545 			cbinfo.sstop = hmp->total_size;
546 			allmedia = 1;
547 		} else {
548 			cbinfo.sstop = cbinfo.sbase + incr;
549 			allmedia = 0;
550 		}
551 		kprintf("hammer2: pass %016jx-%016jx ",
552 			(intmax_t)cbinfo.sbase,
553 			(intmax_t)cbinfo.sstop);
554 		if (allmedia && cbinfo.sbase == 0)
555 			kprintf("(all media)\n");
556 		else if (allmedia)
557 			kprintf("(remaining media)\n");
558 		else
559 			kprintf("(%jdGB of media)\n",
560 				(intmax_t)incr / (1024L*1024*1024));
561 
562 		/*
563 		 * Scan topology for stuff inside this range.
564 		 *
565 		 * NOTE - By not using a transaction the operation can
566 		 *	  run concurrent with the frontend as well as
567 		 *	  with flushes.
568 		 *
569 		 *	  We cannot safely set a mtid without a transaction,
570 		 *	  and in fact we don't want to set one anyway.  We
571 		 *	  want the bulkfree to be passive and no interfere
572 		 *	  with crash recovery.
573 		 */
574 #undef HAMMER2_BULKFREE_TRANS	/* undef - don't use transaction */
575 #ifdef HAMMER2_BULKFREE_TRANS
576 		hammer2_trans_init(hmp->spmp, 0);
577 		cbinfo.mtid = hammer2_trans_sub(hmp->spmp);
578 #else
579 		cbinfo.mtid = 0;
580 #endif
581 		cbinfo.pri = 0;
582 		error |= hammer2_bulk_scan(vchain,
583 					   h2_bulkfree_callback, &cbinfo);
584 
585 		while ((save = TAILQ_FIRST(&cbinfo.list)) != NULL &&
586 		       (error & ~HAMMER2_ERROR_CHECK) == 0) {
587 			TAILQ_REMOVE(&cbinfo.list, save, entry);
588 			cbinfo.pri = 0;
589 			error |= hammer2_bulk_scan(save->chain,
590 						   h2_bulkfree_callback,
591 						   &cbinfo);
592 			hammer2_chain_drop(save->chain);
593 			kfree(save, M_HAMMER2);
594 		}
595 		while (save) {
596 			TAILQ_REMOVE(&cbinfo.list, save, entry);
597 			hammer2_chain_drop(save->chain);
598 			kfree(save, M_HAMMER2);
599 			save = TAILQ_FIRST(&cbinfo.list);
600 		}
601 
602 		/*
603 		 * If the complete scan succeeded we can synchronize our
604 		 * in-memory freemap against live storage.  If an abort
605 		 * occured we cannot safely synchronize our partially
606 		 * filled-out in-memory freemap.
607 		 *
608 		 * We still synchronize on CHECK failures.  That is, we still
609 		 * want bulkfree to operate even if the filesystem has defects.
610 		 */
611 		if (error & ~HAMMER2_ERROR_CHECK) {
612 			kprintf("bulkfree lastdrop %d %d error=0x%04x\n",
613 				vchain->refs, vchain->core.chain_count, error);
614 		} else {
615 			if (error & HAMMER2_ERROR_CHECK) {
616 				kprintf("bulkfree lastdrop %d %d "
617 					"(with check errors)\n",
618 					vchain->refs, vchain->core.chain_count);
619 			} else {
620 				kprintf("bulkfree lastdrop %d %d\n",
621 					vchain->refs, vchain->core.chain_count);
622 			}
623 
624 			error = h2_bulkfree_sync(&cbinfo);
625 
626 			hammer2_voldata_lock(hmp);
627 			hammer2_voldata_modify(hmp);
628 			hmp->voldata.allocator_free += cbinfo.adj_free;
629 			hammer2_voldata_unlock(hmp);
630 		}
631 
632 		/*
633 		 * Cleanup for next loop.
634 		 */
635 #ifdef HAMMER2_BULKFREE_TRANS
636 		hammer2_trans_done(hmp->spmp, 0);
637 #endif
638 		if (error & ~HAMMER2_ERROR_CHECK)
639 			break;
640 		cbinfo.sbase = cbinfo.sstop;
641 		cbinfo.adj_free = 0;
642 	}
643 	kmem_free_swapbacked(&cbinfo.kp);
644 	kfree(cbinfo.dedup, M_HAMMER2);
645 	cbinfo.dedup = NULL;
646 
647 	bfi->sstop = cbinfo.sbase;
648 
649 	incr = bfi->sstop / (hmp->total_size / 10000);
650 	if (incr > 10000)
651 		incr = 10000;
652 
653 	kprintf("bulkfree pass statistics (%d.%02d%% storage processed):\n",
654 		(int)incr / 100,
655 		(int)incr % 100);
656 
657 	if (error & ~HAMMER2_ERROR_CHECK) {
658 		kprintf("    bulkfree was aborted\n");
659 	} else {
660 		if (error & HAMMER2_ERROR_CHECK) {
661 			kprintf("    WARNING: bulkfree "
662 				"encountered CRC errors\n");
663 		}
664 		kprintf("    transition->free   %ld\n", cbinfo.count_10_00);
665 		kprintf("    transition->staged %ld\n", cbinfo.count_11_10);
666 		kprintf("    ERR(00)->allocated %ld\n", cbinfo.count_00_11);
667 		kprintf("    ERR(01)->allocated %ld\n", cbinfo.count_01_11);
668 		kprintf("    staged->allocated  %ld\n", cbinfo.count_10_11);
669 		kprintf("    ~2MB segs cleaned  %ld\n", cbinfo.count_l0cleans);
670 		kprintf("    linear adjusts     %ld\n",
671 			cbinfo.count_linadjusts);
672 		kprintf("    dedup factor       %ld\n",
673 			cbinfo.count_dedup_factor);
674 	}
675 
676 	return error;
677 }
678 
679 static void
680 cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size)
681 {
682 	hammer2_bmap_data_t *bmap = cbinfo->bmap;
683 	hammer2_key_t key = cbinfo->sbase;
684 	hammer2_key_t lokey;
685 	hammer2_key_t hikey;
686 
687 	lokey = (cbinfo->hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
688 		~HAMMER2_SEGMASK64;
689 	hikey = cbinfo->hmp->total_size & ~HAMMER2_SEGMASK64;
690 
691 	bzero(bmap, size);
692 	while (size) {
693 		bzero(bmap, sizeof(*bmap));
694 		if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX))
695 			lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX);
696 		if (lokey < H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64)
697 			lokey = H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64;
698 		if (key < lokey || key >= hikey) {
699 			memset(bmap->bitmapq, -1,
700 			       sizeof(bmap->bitmapq));
701 			bmap->avail = 0;
702 			bmap->linear = HAMMER2_SEGSIZE;
703 		} else {
704 			bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
705 		}
706 		size -= sizeof(*bmap);
707 		key += HAMMER2_FREEMAP_LEVEL0_SIZE;
708 		++bmap;
709 	}
710 }
711 
712 static int
713 h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref)
714 {
715 	hammer2_bmap_data_t *bmap;
716 	hammer2_off_t data_off;
717 	uint16_t class;
718 	size_t bytes;
719 	int radix;
720 
721 	/*
722 	 * Check for signal and allow yield to userland during scan.
723 	 */
724 	if (hammer2_signal_check(&cbinfo->save_time))
725 		return HAMMER2_ERROR_ABORTED;
726 
727 	/*
728 	 * Deal with kernel thread cpu or I/O hogging by limiting the
729 	 * number of chains scanned per second to hammer2_bulkfree_tps.
730 	 * Ignore leaf records (DIRENT and DATA), no per-record I/O is
731 	 * involved for those since we don't load their data.
732 	 */
733 	if (bref->type != HAMMER2_BREF_TYPE_DATA &&
734 	    bref->type != HAMMER2_BREF_TYPE_DIRENT) {
735 		++cbinfo->bulkfree_calls;
736 		if (cbinfo->bulkfree_calls > hammer2_bulkfree_tps) {
737 			int dticks = ticks - cbinfo->bulkfree_ticks;
738 			if (dticks < 0)
739 				dticks = 0;
740 			if (dticks < hz) {
741 				tsleep(&cbinfo->bulkfree_ticks, 0,
742 				       "h2bw", hz - dticks);
743 			}
744 			cbinfo->bulkfree_calls = 0;
745 			cbinfo->bulkfree_ticks = ticks;
746 		}
747 	}
748 
749 	/*
750 	 * Calculate the data offset and determine if it is within
751 	 * the current freemap range being gathered.
752 	 */
753 	data_off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
754 	if (data_off < cbinfo->sbase || data_off >= cbinfo->sstop)
755 		return 0;
756 	if (data_off < cbinfo->hmp->voldata.allocator_beg)
757 		return 0;
758 	if (data_off >= cbinfo->hmp->total_size)
759 		return 0;
760 
761 	/*
762 	 * Calculate the information needed to generate the in-memory
763 	 * freemap record.
764 	 *
765 	 * Hammer2 does not allow allocations to cross the L1 (2GB) boundary,
766 	 * it's a problem if it does.  (Or L0 (2MB) for that matter).
767 	 */
768 	radix = (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
769 	KKASSERT(radix != 0);
770 	bytes = (size_t)1 << radix;
771 	class = (bref->type << 8) | HAMMER2_PBUFRADIX;
772 
773 	if (data_off + bytes > cbinfo->sstop) {
774 		kprintf("hammer2_bulkfree_scan: illegal 2GB boundary "
775 			"%016jx %016jx/%d\n",
776 			(intmax_t)bref->data_off,
777 			(intmax_t)bref->key,
778 			bref->keybits);
779 		bytes = cbinfo->sstop - data_off;	/* XXX */
780 	}
781 
782 	/*
783 	 * Convert to a storage offset relative to the beginning of the
784 	 * storage range we are collecting.  Then lookup the level0 bmap entry.
785 	 */
786 	data_off -= cbinfo->sbase;
787 	bmap = cbinfo->bmap + (data_off >> HAMMER2_FREEMAP_LEVEL0_RADIX);
788 
789 	/*
790 	 * Convert data_off to a bmap-relative value (~4MB storage range).
791 	 * Adjust linear, class, and avail.
792 	 *
793 	 * Hammer2 does not allow allocations to cross the L0 (4MB) boundary,
794 	 */
795 	data_off &= HAMMER2_FREEMAP_LEVEL0_MASK;
796 	if (data_off + bytes > HAMMER2_FREEMAP_LEVEL0_SIZE) {
797 		kprintf("hammer2_bulkfree_scan: illegal 4MB boundary "
798 			"%016jx %016jx/%d\n",
799 			(intmax_t)bref->data_off,
800 			(intmax_t)bref->key,
801 			bref->keybits);
802 		bytes = HAMMER2_FREEMAP_LEVEL0_SIZE - data_off;
803 	}
804 
805 	if (bmap->class == 0) {
806 		bmap->class = class;
807 		bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
808 	}
809 
810 	/*
811 	 * NOTE: bmap->class does not have to match class.  Classification
812 	 *	 is relaxed when free space is low, so some mixing can occur.
813 	 */
814 #if 0
815 	/*
816 	 * XXX removed
817 	 */
818 	if (bmap->class != class) {
819 		kprintf("hammer2_bulkfree_scan: illegal mixed class "
820 			"%016jx %016jx/%d (%04x vs %04x)\n",
821 			(intmax_t)bref->data_off,
822 			(intmax_t)bref->key,
823 			bref->keybits,
824 			class, bmap->class);
825 	}
826 #endif
827 
828 	/*
829 	 * Just record the highest byte-granular offset for now.  Do not
830 	 * match against allocations which are in multiples of whole blocks.
831 	 *
832 	 * Make sure that any in-block linear offset at least covers the
833 	 * data range.  This can cause bmap->linear to become block-aligned.
834 	 */
835 	if (bytes & HAMMER2_FREEMAP_BLOCK_MASK) {
836 		if (bmap->linear < (int32_t)data_off + (int32_t)bytes)
837 			bmap->linear = (int32_t)data_off + (int32_t)bytes;
838 	} else if (bmap->linear >= (int32_t)data_off &&
839 		   bmap->linear < (int32_t)data_off + (int32_t)bytes) {
840 		bmap->linear = (int32_t)data_off + (int32_t)bytes;
841 	}
842 
843 	/*
844 	 * Adjust the hammer2_bitmap_t bitmap[HAMMER2_BMAP_ELEMENTS].
845 	 * 64-bit entries, 2 bits per entry, to code 11.
846 	 *
847 	 * NOTE: data_off mask to 524288, shift right by 14 (radix for 16384),
848 	 *	 and multiply shift amount by 2 for sets of 2 bits.
849 	 *
850 	 * NOTE: The allocation can be smaller than HAMMER2_FREEMAP_BLOCK_SIZE.
851 	 *	 also, data_off may not be FREEMAP_BLOCK_SIZE aligned.
852 	 */
853 	while (bytes > 0) {
854 		hammer2_bitmap_t bmask;
855 		int bindex;
856 
857 		bindex = (int)data_off >> (HAMMER2_FREEMAP_BLOCK_RADIX +
858 					   HAMMER2_BMAP_INDEX_RADIX);
859 		bmask = (hammer2_bitmap_t)3 <<
860 			((((int)data_off & HAMMER2_BMAP_INDEX_MASK) >>
861 			 HAMMER2_FREEMAP_BLOCK_RADIX) << 1);
862 
863 		/*
864 		 * NOTE! The (avail) calculation is bitmap-granular.  Multiple
865 		 *	 sub-granular records can wind up at the same bitmap
866 		 *	 position.
867 		 */
868 		if ((bmap->bitmapq[bindex] & bmask) == 0) {
869 			if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE) {
870 				bmap->avail -= HAMMER2_FREEMAP_BLOCK_SIZE;
871 			} else {
872 				bmap->avail -= bytes;
873 			}
874 			bmap->bitmapq[bindex] |= bmask;
875 		}
876 		data_off += HAMMER2_FREEMAP_BLOCK_SIZE;
877 		if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE)
878 			bytes = 0;
879 		else
880 			bytes -= HAMMER2_FREEMAP_BLOCK_SIZE;
881 	}
882 	return 0;
883 }
884 
885 /*
886  * Synchronize the in-memory bitmap with the live freemap.  This is not a
887  * direct copy.  Instead the bitmaps must be compared:
888  *
889  *	In-memory	Live-freemap
890  *	   00		  11 -> 10	(do nothing if live modified)
891  *			  10 -> 00	(do nothing if live modified)
892  *	   11		  10 -> 11	handles race against live
893  *			  ** -> 11	nominally warn of corruption
894  *
895  * We must also fixup the hints in HAMMER2_BREF_TYPE_FREEMAP_LEAF.
896  */
897 static int
898 h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo)
899 {
900 	hammer2_off_t data_off;
901 	hammer2_key_t key;
902 	hammer2_key_t key_dummy;
903 	hammer2_bmap_data_t *bmap;
904 	hammer2_bmap_data_t *live;
905 	hammer2_chain_t *live_parent;
906 	hammer2_chain_t *live_chain;
907 	int bmapindex;
908 	int error;
909 
910 	kprintf("hammer2_bulkfree - range ");
911 
912 	if (cbinfo->sbase < cbinfo->hmp->voldata.allocator_beg)
913 		kprintf("%016jx-",
914 			(intmax_t)cbinfo->hmp->voldata.allocator_beg);
915 	else
916 		kprintf("%016jx-",
917 			(intmax_t)cbinfo->sbase);
918 
919 	if (cbinfo->sstop > cbinfo->hmp->total_size)
920 		kprintf("%016jx\n",
921 			(intmax_t)cbinfo->hmp->total_size);
922 	else
923 		kprintf("%016jx\n",
924 			(intmax_t)cbinfo->sstop);
925 
926 	data_off = cbinfo->sbase;
927 	bmap = cbinfo->bmap;
928 
929 	live_parent = &cbinfo->hmp->fchain;
930 	hammer2_chain_ref(live_parent);
931 	hammer2_chain_lock(live_parent, HAMMER2_RESOLVE_ALWAYS);
932 	live_chain = NULL;
933 	error = 0;
934 
935 	/*
936 	 * Iterate each hammer2_bmap_data_t line (128 bytes) managing
937 	 * 4MB of storage.
938 	 */
939 	while (data_off < cbinfo->sstop) {
940 		/*
941 		 * The freemap is not used below allocator_beg or beyond
942 		 * total_size.
943 		 */
944 
945 		if (data_off < cbinfo->hmp->voldata.allocator_beg)
946 			goto next;
947 		if (data_off >= cbinfo->hmp->total_size)
948 			goto next;
949 
950 		/*
951 		 * Locate the freemap leaf on the live filesystem
952 		 */
953 		key = (data_off & ~HAMMER2_FREEMAP_LEVEL1_MASK);
954 
955 		if (live_chain == NULL || live_chain->bref.key != key) {
956 			if (live_chain) {
957 				hammer2_chain_unlock(live_chain);
958 				hammer2_chain_drop(live_chain);
959 			}
960 			live_chain = hammer2_chain_lookup(
961 					    &live_parent,
962 					    &key_dummy,
963 					    key,
964 					    key + HAMMER2_FREEMAP_LEVEL1_MASK,
965 					    &error,
966 					    HAMMER2_LOOKUP_ALWAYS);
967 			if (error) {
968 				kprintf("hammer2_bulkfree: freemap lookup "
969 					"error near %016jx, error %s\n",
970 					(intmax_t)data_off,
971 					hammer2_error_str(live_chain->error));
972 				break;
973 			}
974 		}
975 		if (live_chain == NULL) {
976 			/*
977 			 * XXX if we implement a full recovery mode we need
978 			 * to create/recreate missing freemap chains if our
979 			 * bmap has any allocated blocks.
980 			 */
981 			if (bmap->class &&
982 			    bmap->avail != HAMMER2_FREEMAP_LEVEL0_SIZE) {
983 				kprintf("hammer2_bulkfree: cannot locate "
984 					"live leaf for allocated data "
985 					"near %016jx\n",
986 					(intmax_t)data_off);
987 			}
988 			goto next;
989 		}
990 		if (live_chain->error) {
991 			kprintf("hammer2_bulkfree: unable to access freemap "
992 				"near %016jx, error %s\n",
993 				(intmax_t)data_off,
994 				hammer2_error_str(live_chain->error));
995 			hammer2_chain_unlock(live_chain);
996 			hammer2_chain_drop(live_chain);
997 			live_chain = NULL;
998 			goto next;
999 		}
1000 
1001 		bmapindex = (data_off & HAMMER2_FREEMAP_LEVEL1_MASK) >>
1002 			    HAMMER2_FREEMAP_LEVEL0_RADIX;
1003 		live = &live_chain->data->bmdata[bmapindex];
1004 
1005 		/*
1006 		 * Shortcut if the bitmaps match and the live linear
1007 		 * indicator is sane.  We can't do a perfect check of
1008 		 * live->linear because the only real requirement is that
1009 		 * if it is not block-aligned, that it not cover the space
1010 		 * within its current block which overlaps one of the data
1011 		 * ranges we scan.  We don't retain enough fine-grained
1012 		 * data in our scan to be able to set it exactly.
1013 		 *
1014 		 * TODO - we could shortcut this by testing that both
1015 		 * live->class and bmap->class are 0, and both avails are
1016 		 * set to HAMMER2_FREEMAP_LEVEL0_SIZE (4MB).
1017 		 */
1018 		if (bcmp(live->bitmapq, bmap->bitmapq,
1019 			 sizeof(bmap->bitmapq)) == 0 &&
1020 		    live->linear >= bmap->linear) {
1021 			goto next;
1022 		}
1023 		if (hammer2_debug & 1) {
1024 			kprintf("live %016jx %04d.%04x (avail=%d)\n",
1025 				data_off, bmapindex, live->class, live->avail);
1026 		}
1027 
1028 		hammer2_chain_modify(live_chain, cbinfo->mtid, 0, 0);
1029 		live_chain->bref.check.freemap.bigmask = -1;
1030 		cbinfo->hmp->freemap_relaxed = 0;	/* reset heuristic */
1031 		live = &live_chain->data->bmdata[bmapindex];
1032 
1033 		h2_bulkfree_sync_adjust(cbinfo, data_off, live, bmap,
1034 					live_chain->bref.key +
1035 					bmapindex *
1036 					HAMMER2_FREEMAP_LEVEL0_SIZE);
1037 next:
1038 		data_off += HAMMER2_FREEMAP_LEVEL0_SIZE;
1039 		++bmap;
1040 	}
1041 	if (live_chain) {
1042 		hammer2_chain_unlock(live_chain);
1043 		hammer2_chain_drop(live_chain);
1044 	}
1045 	if (live_parent) {
1046 		hammer2_chain_unlock(live_parent);
1047 		hammer2_chain_drop(live_parent);
1048 	}
1049 	return error;
1050 }
1051 
1052 /*
1053  * Merge the bulkfree bitmap against the existing bitmap.
1054  */
1055 static
1056 void
1057 h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
1058 			hammer2_off_t data_off, hammer2_bmap_data_t *live,
1059 			hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base)
1060 {
1061 	int bindex;
1062 	int scount;
1063 	hammer2_off_t tmp_off;
1064 	hammer2_bitmap_t lmask;
1065 	hammer2_bitmap_t mmask;
1066 
1067 	tmp_off = data_off;
1068 
1069 	for (bindex = 0; bindex < HAMMER2_BMAP_ELEMENTS; ++bindex) {
1070 		lmask = live->bitmapq[bindex];	/* live */
1071 		mmask = bmap->bitmapq[bindex];	/* snapshotted bulkfree */
1072 		if (lmask == mmask) {
1073 			tmp_off += HAMMER2_BMAP_INDEX_SIZE;
1074 			continue;
1075 		}
1076 
1077 		for (scount = 0;
1078 		     scount < HAMMER2_BMAP_BITS_PER_ELEMENT;
1079 		     scount += 2) {
1080 			if ((mmask & 3) == 0) {
1081 				/*
1082 				 * in-memory 00		live 11 -> 10
1083 				 *			live 10 -> 00
1084 				 *
1085 				 * Storage might be marked allocated or
1086 				 * staged and must be remarked staged or
1087 				 * free.
1088 				 */
1089 				switch (lmask & 3) {
1090 				case 0:	/* 00 */
1091 					break;
1092 				case 1:	/* 01 */
1093 					kprintf("hammer2_bulkfree: cannot "
1094 						"transition m=00/l=01\n");
1095 					break;
1096 				case 2:	/* 10 -> 00 */
1097 					live->bitmapq[bindex] &=
1098 					    ~((hammer2_bitmap_t)2 << scount);
1099 					live->avail +=
1100 						HAMMER2_FREEMAP_BLOCK_SIZE;
1101 					if (live->avail >
1102 					    HAMMER2_FREEMAP_LEVEL0_SIZE) {
1103 						live->avail =
1104 						    HAMMER2_FREEMAP_LEVEL0_SIZE;
1105 					}
1106 					cbinfo->adj_free +=
1107 						HAMMER2_FREEMAP_BLOCK_SIZE;
1108 					++cbinfo->count_10_00;
1109 					hammer2_io_dedup_assert(
1110 						cbinfo->hmp,
1111 						tmp_off |
1112 						HAMMER2_FREEMAP_BLOCK_RADIX,
1113 						HAMMER2_FREEMAP_BLOCK_SIZE);
1114 					break;
1115 				case 3:	/* 11 -> 10 */
1116 					live->bitmapq[bindex] &=
1117 					    ~((hammer2_bitmap_t)1 << scount);
1118 					++cbinfo->count_11_10;
1119 					hammer2_io_dedup_delete(
1120 						cbinfo->hmp,
1121 						HAMMER2_BREF_TYPE_DATA,
1122 						tmp_off |
1123 						HAMMER2_FREEMAP_BLOCK_RADIX,
1124 						HAMMER2_FREEMAP_BLOCK_SIZE);
1125 					break;
1126 				}
1127 			} else if ((mmask & 3) == 3) {
1128 				/*
1129 				 * in-memory 11		live 10 -> 11
1130 				 *			live ** -> 11
1131 				 *
1132 				 * Storage might be incorrectly marked free
1133 				 * or staged and must be remarked fully
1134 				 * allocated.
1135 				 */
1136 				switch (lmask & 3) {
1137 				case 0:	/* 00 */
1138 					++cbinfo->count_00_11;
1139 					cbinfo->adj_free -=
1140 						HAMMER2_FREEMAP_BLOCK_SIZE;
1141 					live->avail -=
1142 						HAMMER2_FREEMAP_BLOCK_SIZE;
1143 					if ((int32_t)live->avail < 0)
1144 						live->avail = 0;
1145 					break;
1146 				case 1:	/* 01 */
1147 					++cbinfo->count_01_11;
1148 					break;
1149 				case 2:	/* 10 -> 11 */
1150 					++cbinfo->count_10_11;
1151 					break;
1152 				case 3:	/* 11 */
1153 					break;
1154 				}
1155 				live->bitmapq[bindex] |=
1156 					((hammer2_bitmap_t)3 << scount);
1157 			}
1158 			mmask >>= 2;
1159 			lmask >>= 2;
1160 			tmp_off += HAMMER2_FREEMAP_BLOCK_SIZE;
1161 		}
1162 	}
1163 
1164 	/*
1165 	 * Determine if the live bitmap is completely free and reset its
1166 	 * fields if so.  Otherwise check to see if we can reduce the linear
1167 	 * offset.
1168 	 */
1169 	for (bindex = HAMMER2_BMAP_ELEMENTS - 1; bindex >= 0; --bindex) {
1170 		if (live->bitmapq[bindex] != 0)
1171 			break;
1172 	}
1173 	if (bindex < 0) {
1174 		/*
1175 		 * Completely empty, reset entire segment
1176 		 */
1177 #if 0
1178 		kprintf("hammer2: cleanseg %016jx.%04x (%d)\n",
1179 			alloc_base, live->class, live->avail);
1180 #endif
1181 		live->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
1182 		live->class = 0;
1183 		live->linear = 0;
1184 		++cbinfo->count_l0cleans;
1185 	} else if (bindex < 7) {
1186 		/*
1187 		 * Partially full, bitmapq[bindex] != 0.  Our bulkfree pass
1188 		 * does not record enough information to set live->linear
1189 		 * exactly.
1190 		 *
1191 		 * NOTE: Setting live->linear to a sub-block (16K) boundary
1192 		 *	 forces the live code to iterate to the next fully
1193 		 *	 free block.  It does NOT mean that all blocks above
1194 		 *	 live->linear are available.
1195 		 *
1196 		 *	 Setting live->linear to a fragmentary (less than
1197 		 *	 16K) boundary allows allocations to iterate within
1198 		 *	 that sub-block.
1199 		 */
1200 		if (live->linear < bmap->linear &&
1201 		    ((live->linear ^ bmap->linear) &
1202 		     ~HAMMER2_FREEMAP_BLOCK_MASK) == 0) {
1203 			/*
1204 			 * If greater than but still within the same
1205 			 * sub-block as live we can adjust linear upward.
1206 			 */
1207 			live->linear = bmap->linear;
1208 			++cbinfo->count_linadjusts;
1209 		} else {
1210 			/*
1211 			 * Otherwise adjust to the nearest higher or same
1212 			 * sub-block boundary.  The live system may have
1213 			 * bounced live->linear around so we cannot make any
1214 			 * assumptions with regards to available fragmentary
1215 			 * allocations.
1216 			 */
1217 			live->linear =
1218 				(bmap->linear + HAMMER2_FREEMAP_BLOCK_MASK) &
1219 				~HAMMER2_FREEMAP_BLOCK_MASK;
1220 			++cbinfo->count_linadjusts;
1221 		}
1222 	} else {
1223 		/*
1224 		 * Completely full, effectively disable the linear iterator
1225 		 */
1226 		live->linear = HAMMER2_SEGSIZE;
1227 	}
1228 
1229 #if 0
1230 	if (bmap->class) {
1231 		kprintf("%016jx %04d.%04x (avail=%7d) "
1232 			"%08x %08x %08x %08x %08x %08x %08x %08x\n",
1233 			(intmax_t)data_off,
1234 			(int)((data_off &
1235 			       HAMMER2_FREEMAP_LEVEL1_MASK) >>
1236 			      HAMMER2_FREEMAP_LEVEL0_RADIX),
1237 			bmap->class,
1238 			bmap->avail,
1239 			bmap->bitmap[0], bmap->bitmap[1],
1240 			bmap->bitmap[2], bmap->bitmap[3],
1241 			bmap->bitmap[4], bmap->bitmap[5],
1242 			bmap->bitmap[6], bmap->bitmap[7]);
1243 	}
1244 #endif
1245 }
1246 
1247 /*
1248  * BULKFREE DEDUP HEURISTIC
1249  *
1250  * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1251  *	    All fields must be loaded into locals and validated.
1252  */
1253 static
1254 int
1255 h2_bulkfree_test(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref,
1256 		 int pri, int saved_error)
1257 {
1258 	hammer2_dedup_t *dedup;
1259 	int best;
1260 	int n;
1261 	int i;
1262 
1263 	n = hammer2_icrc32(&bref->data_off, sizeof(bref->data_off));
1264 	dedup = cbinfo->dedup + (n & (HAMMER2_DEDUP_HEUR_MASK & ~7));
1265 
1266 	for (i = best = 0; i < 8; ++i) {
1267 		if (dedup[i].data_off == bref->data_off) {
1268 			if (dedup[i].ticks < pri)
1269 				dedup[i].ticks = pri;
1270 			if (pri == 1)
1271 				cbinfo->count_dedup_factor += dedup[i].ticks;
1272 			return (dedup[i].saved_error | HAMMER2_ERROR_EOF);
1273 		}
1274 		if (dedup[i].ticks < dedup[best].ticks)
1275 			best = i;
1276 	}
1277 	dedup[best].data_off = bref->data_off;
1278 	dedup[best].ticks = pri;
1279 	dedup[best].saved_error = saved_error;
1280 
1281 	return 0;
1282 }
1283