1 /*
2  * Copyright (c) 2013-2019 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/proc.h>
38 #include <sys/mount.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_extern.h>
41 
42 #include "hammer2.h"
43 
44 /*
45  * breadth-first search
46  */
47 typedef struct hammer2_chain_save {
48 	TAILQ_ENTRY(hammer2_chain_save)	entry;
49 	hammer2_chain_t	*chain;
50 } hammer2_chain_save_t;
51 
52 TAILQ_HEAD(hammer2_chain_save_list, hammer2_chain_save);
53 typedef struct hammer2_chain_save_list hammer2_chain_save_list_t;
54 
55 typedef struct hammer2_bulkfree_info {
56 	hammer2_dev_t		*hmp;
57 	kmem_anon_desc_t	kp;
58 	hammer2_off_t		sbase;		/* sub-loop iteration */
59 	hammer2_off_t		sstop;
60 	hammer2_bmap_data_t	*bmap;
61 	int			depth;
62 	long			count_10_00;	/* staged->free	     */
63 	long			count_11_10;	/* allocated->staged */
64 	long			count_00_11;	/* (should not happen) */
65 	long			count_01_11;	/* (should not happen) */
66 	long			count_10_11;	/* staged->allocated */
67 	long			count_l0cleans;
68 	long			count_linadjusts;
69 	long			count_inodes_scanned;
70 	long			count_dirents_scanned;
71 	long			count_dedup_factor;
72 	long			count_bytes_scanned;
73 	long			count_chains_scanned;
74 	long			count_chains_reported;
75 	long			bulkfree_calls;
76 	int			bulkfree_ticks;
77 	int			list_alert;
78 	hammer2_off_t		adj_free;
79 	hammer2_tid_t		mtid;
80 	time_t			save_time;
81 	hammer2_chain_save_list_t list;
82 	long			list_count;
83 	long			list_count_max;
84 	hammer2_chain_save_t	*backout;	/* ins pt while backing out */
85 	hammer2_dedup_t		*dedup;
86 	int			pri;
87 } hammer2_bulkfree_info_t;
88 
89 static int h2_bulkfree_test(hammer2_bulkfree_info_t *info,
90 			hammer2_blockref_t *bref, int pri, int saved_error);
91 static uint32_t bigmask_get(hammer2_bmap_data_t *bmap);
92 static int bigmask_good(hammer2_bmap_data_t *bmap, uint32_t live_bigmask);
93 
94 /*
95  * General bulk scan function with callback.  Called with a referenced
96  * but UNLOCKED parent.  The parent is returned in the same state.
97  */
98 static
99 int
100 hammer2_bulkfree_scan(hammer2_chain_t *parent,
101 		  int (*func)(hammer2_bulkfree_info_t *info,
102 			      hammer2_blockref_t *bref),
103 		  hammer2_bulkfree_info_t *info)
104 {
105 	hammer2_blockref_t bref;
106 	hammer2_chain_t *chain;
107 	hammer2_chain_save_t *tail;
108 	hammer2_chain_save_t *save;
109 	int first = 1;
110 	int rup_error;
111 	int error;
112 	int e2;
113 
114 	++info->pri;
115 
116 	chain = NULL;
117 	rup_error = 0;
118 	error = 0;
119 
120 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
121 				   HAMMER2_RESOLVE_SHARED);
122 
123 	/*
124 	 * End of scan if parent is a PFS
125 	 */
126 	tail = TAILQ_FIRST(&info->list);
127 
128 	/*
129 	 * The parent was previously retrieved NODATA and thus has not
130 	 * tested the CRC.  Now that we have locked it normally, check
131 	 * for a CRC problem and skip it if we found one.  The bulk scan
132 	 * cannot safely traverse invalid block tables (we could end up
133 	 * in an endless loop or cause a panic).
134 	 */
135 	if (parent->error & HAMMER2_ERROR_CHECK) {
136 		error = parent->error;
137 		goto done;
138 	}
139 
140 	/*
141 	 * Report which PFS is being scanned
142 	 */
143 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
144 	    (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) {
145 		kprintf("hammer2_bulkfree: Scanning %s\n",
146 			parent->data->ipdata.filename);
147 	}
148 
149 	/*
150 	 * Generally loop on the contents if we have not been flagged
151 	 * for abort.
152 	 *
153 	 * Remember that these chains are completely isolated from
154 	 * the frontend, so we can release locks temporarily without
155 	 * imploding.
156 	 */
157 	for (;;) {
158 		error |= hammer2_chain_scan(parent, &chain, &bref, &first,
159 					    HAMMER2_LOOKUP_NODATA |
160 					    HAMMER2_LOOKUP_SHARED);
161 
162 		/*
163 		 * Handle EOF or other error at current level.  This stops
164 		 * the bulkfree scan.
165 		 */
166 		if (error & ~HAMMER2_ERROR_CHECK)
167 			break;
168 
169 		/*
170 		 * Account for dirents before thre data_off test, since most
171 		 * dirents do not need a data reference.
172 		 */
173 		if (bref.type == HAMMER2_BREF_TYPE_DIRENT)
174 			++info->count_dirents_scanned;
175 
176 		/*
177 		 * Ignore brefs without data (typically dirents)
178 		 */
179 		if ((bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0)
180 			continue;
181 
182 		/*
183 		 * Process bref, chain is only non-NULL if the bref
184 		 * might be recursable (its possible that we sometimes get
185 		 * a non-NULL chain where the bref cannot be recursed).
186 		 *
187 		 * If we already ran down this tree we do not have to do it
188 		 * again, but we must still recover any cumulative error
189 		 * recorded from the time we did.
190 		 */
191 		++info->pri;
192 		e2 = h2_bulkfree_test(info, &bref, 1, 0);
193 		if (e2) {
194 			error |= e2 & ~HAMMER2_ERROR_EOF;
195 			continue;
196 		}
197 
198 		if (bref.type == HAMMER2_BREF_TYPE_INODE)
199 			++info->count_inodes_scanned;
200 
201 		error |= func(info, &bref);
202 		if (error & ~HAMMER2_ERROR_CHECK)
203 			break;
204 
205 		/*
206 		 * A non-null chain is always returned if it is
207 		 * recursive, otherwise a non-null chain might be
208 		 * returned but usually is not when not recursive.
209 		 */
210 		if (chain == NULL)
211 			continue;
212 
213 		if (chain) {
214 			info->count_bytes_scanned += chain->bytes;
215 			++info->count_chains_scanned;
216 
217 			if (info->count_chains_scanned >=
218 			    info->count_chains_reported + 1000000 ||
219 			    (info->count_chains_scanned < 1000000 &&
220 			     info->count_chains_scanned >=
221 			     info->count_chains_reported + 100000)) {
222 				kprintf(" chains %-7ld inodes %-7ld "
223 					"dirents %-7ld bytes %5ldMB\n",
224 					info->count_chains_scanned,
225 					info->count_inodes_scanned,
226 					info->count_dirents_scanned,
227 					info->count_bytes_scanned / 1000000);
228 				info->count_chains_reported =
229 					info->count_chains_scanned;
230 			}
231 		}
232 
233 		/*
234 		 * Else check type and setup depth-first scan.
235 		 *
236 		 * Account for bytes actually read.
237 		 */
238 		switch(chain->bref.type) {
239 		case HAMMER2_BREF_TYPE_INODE:
240 		case HAMMER2_BREF_TYPE_FREEMAP_NODE:
241 		case HAMMER2_BREF_TYPE_INDIRECT:
242 		case HAMMER2_BREF_TYPE_VOLUME:
243 		case HAMMER2_BREF_TYPE_FREEMAP:
244 			++info->depth;
245 			if (chain->error & HAMMER2_ERROR_CHECK) {
246 				/*
247 				 * Cannot safely recurse chains with crc
248 				 * errors, even in emergency mode.
249 				 */
250 				/* NOP */
251 			} else if (info->depth > 16 ||
252 				   info->backout ||
253 				   (info->depth > hammer2_limit_saved_depth &&
254 				   info->list_count >=
255 				    (hammer2_limit_saved_chains >> 2)))
256 			{
257 				/*
258 				 * We must defer the recursion if it runs
259 				 * too deep or if too many saved chains are
260 				 * allocated.
261 				 *
262 				 * In the case of too many saved chains, we
263 				 * have to stop recursing ASAP to avoid an
264 				 * explosion of memory use since each radix
265 				 * level can hold 512 elements.
266 				 *
267 				 * If we had to defer at a deeper level
268 				 * backout is non-NULL.  We must backout
269 				 * completely before resuming.
270 				 */
271 				if (info->list_count >
272 				     hammer2_limit_saved_chains &&
273 				    info->list_alert == 0)
274 				{
275 					kprintf("hammer2: during bulkfree, "
276 						"saved chains exceeded %ld "
277 						"at depth %d, "
278 						"backing off to less-efficient "
279 						"operation\n",
280 						hammer2_limit_saved_chains,
281 						info->depth);
282 					info->list_alert = 1;
283 				}
284 
285 				/*
286 				 * Must be placed at head so pfsroot scan
287 				 * can exhaust saved elements for that pfs
288 				 * first.
289 				 *
290 				 * Must be placed at head for depth-first
291 				 * recovery when too many saved chains, to
292 				 * limit number of chains saved during
293 				 * saved-chain reruns.  The worst-case excess
294 				 * is (maximum_depth * 512) saved chains above
295 				 * the threshold.
296 				 *
297 				 * The maximum_depth generally occurs in the
298 				 * inode index and can be fairly deep once
299 				 * the radix tree becomes a bit fragmented.
300 				 * nominally 100M inodes would be only 4 deep,
301 				 * plus a maximally sized file would be another
302 				 * 8 deep, but with fragmentation it can wind
303 				 * up being a lot more.
304 				 *
305 				 * However, when backing out, we have to place
306 				 * all the entries in each parent node not
307 				 * yet processed on the list too, and because
308 				 * these entries are shallower they must be
309 				 * placed after each other in order to maintain
310 				 * our depth-first processing.
311 				 */
312 				save = kmalloc(sizeof(*save), M_HAMMER2,
313 					       M_WAITOK | M_ZERO);
314 				save->chain = chain;
315 				hammer2_chain_ref(chain);
316 
317 				if (info->backout) {
318 					TAILQ_INSERT_AFTER(&info->list,
319 							   info->backout,
320 							   save, entry);
321 				} else {
322 					TAILQ_INSERT_HEAD(&info->list,
323 							  save, entry);
324 				}
325 				info->backout = save;
326 				++info->list_count;
327 				if (info->list_count_max < info->list_count)
328 					info->list_count_max = info->list_count;
329 
330 				/* guess */
331 				info->pri += 10;
332 			} else {
333 				int savepri = info->pri;
334 
335 				hammer2_chain_unlock(chain);
336 				hammer2_chain_unlock(parent);
337 				info->pri = 0;
338 				rup_error |= hammer2_bulkfree_scan(chain,
339 								   func, info);
340 				info->pri += savepri;
341 				hammer2_chain_lock(parent,
342 						   HAMMER2_RESOLVE_ALWAYS |
343 						   HAMMER2_RESOLVE_SHARED);
344 				hammer2_chain_lock(chain,
345 						   HAMMER2_RESOLVE_ALWAYS |
346 						   HAMMER2_RESOLVE_SHARED);
347 			}
348 			--info->depth;
349 			break;
350 		case HAMMER2_BREF_TYPE_DATA:
351 			break;
352 		default:
353 			/* does not recurse */
354 			break;
355 		}
356 		if (rup_error & HAMMER2_ERROR_ABORTED)
357 			break;
358 	}
359 	if (chain) {
360 		hammer2_chain_unlock(chain);
361 		hammer2_chain_drop(chain);
362 	}
363 
364 	/*
365 	 * If this is a PFSROOT, also re-run any defered elements
366 	 * added during our scan so we can report any cumulative errors
367 	 * for the PFS.
368 	 */
369 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
370 	    (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT)) {
371 		for (;;) {
372 			int opri;
373 
374 			save = TAILQ_FIRST(&info->list);
375 			if (save == tail)	/* exhaust this PFS only */
376 				break;
377 
378 			TAILQ_REMOVE(&info->list, save, entry);
379 			info->backout = NULL;
380 			--info->list_count;
381 			opri = info->pri;
382 			info->pri = 0;
383 			rup_error |= hammer2_bulkfree_scan(save->chain, func, info);
384 			hammer2_chain_drop(save->chain);
385 			kfree(save, M_HAMMER2);
386 			info->pri = opri;
387 		}
388 	}
389 
390 	error |= rup_error;
391 
392 	/*
393 	 * Report which PFS the errors were encountered in.
394 	 */
395 	if (parent->bref.type == HAMMER2_BREF_TYPE_INODE &&
396 	    (parent->bref.flags & HAMMER2_BREF_FLAG_PFSROOT) &&
397 	    (error & ~HAMMER2_ERROR_EOF)) {
398 		kprintf("hammer2_bulkfree: Encountered errors (%08x) "
399 			"while scanning \"%s\"\n",
400 			error, parent->data->ipdata.filename);
401 	}
402 
403 	/*
404 	 * Save with higher pri now that we know what it is.
405 	 */
406 	h2_bulkfree_test(info, &parent->bref, info->pri + 1,
407 			 (error & ~HAMMER2_ERROR_EOF));
408 
409 done:
410 	hammer2_chain_unlock(parent);
411 
412 	return (error & ~HAMMER2_ERROR_EOF);
413 }
414 
415 /*
416  * Bulkfree algorithm
417  *
418  * Repeat {
419  *	Chain flush (partial synchronization) XXX removed
420  *	Scan the whole topology - build in-memory freemap (mark 11)
421  *	Reconcile the in-memory freemap against the on-disk freemap.
422  *		ondisk xx -> ondisk 11 (if allocated)
423  *		ondisk 11 -> ondisk 10 (if free in-memory)
424  *		ondisk 10 -> ondisk 00 (if free in-memory) - on next pass
425  * }
426  *
427  * The topology scan may have to be performed multiple times to window
428  * freemaps which are too large to fit in kernel memory.
429  *
430  * Races are handled using a double-transition (11->10, 10->00).  The bulkfree
431  * scan snapshots the volume root's blockset and thus can run concurrent with
432  * normal operations, as long as a full flush is made between each pass to
433  * synchronize any modified chains (otherwise their blocks might be improperly
434  * freed).
435  *
436  * Temporary memory in multiples of 32KB is required to reconstruct the leaf
437  * hammer2_bmap_data blocks so they can later be compared against the live
438  * freemap.  Each 32KB represents 256 x 16KB x 256 = ~1 GB of storage.
439  * A 32MB save area thus represents around ~1 TB.  The temporary memory
440  * allocated can be specified.  If it is not sufficient multiple topology
441  * passes will be made.
442  */
443 
444 /*
445  * Bulkfree callback info
446  */
447 static void hammer2_bulkfree_thread(void *arg __unused);
448 static void cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size);
449 static int h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo,
450 			hammer2_blockref_t *bref);
451 static int h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo);
452 static void h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
453 			hammer2_off_t data_off, hammer2_bmap_data_t *live,
454 			hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base);
455 
456 void
457 hammer2_bulkfree_init(hammer2_dev_t *hmp)
458 {
459 	hammer2_thr_create(&hmp->bfthr, NULL, hmp,
460 			   hmp->devrepname, -1, -1,
461 			   hammer2_bulkfree_thread);
462 }
463 
464 void
465 hammer2_bulkfree_uninit(hammer2_dev_t *hmp)
466 {
467 	hammer2_thr_delete(&hmp->bfthr);
468 }
469 
470 static void
471 hammer2_bulkfree_thread(void *arg)
472 {
473 	hammer2_thread_t *thr = arg;
474 	hammer2_ioc_bulkfree_t bfi;
475 	uint32_t flags;
476 
477 	for (;;) {
478 		hammer2_thr_wait_any(thr,
479 				     HAMMER2_THREAD_STOP |
480 				     HAMMER2_THREAD_FREEZE |
481 				     HAMMER2_THREAD_UNFREEZE |
482 				     HAMMER2_THREAD_REMASTER,
483 				     hz * 60);
484 
485 		flags = thr->flags;
486 		cpu_ccfence();
487 		if (flags & HAMMER2_THREAD_STOP)
488 			break;
489 		if (flags & HAMMER2_THREAD_FREEZE) {
490 			hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
491 						 HAMMER2_THREAD_FREEZE);
492 			continue;
493 		}
494 		if (flags & HAMMER2_THREAD_UNFREEZE) {
495 			hammer2_thr_signal2(thr, 0,
496 						 HAMMER2_THREAD_FROZEN |
497 						 HAMMER2_THREAD_UNFREEZE);
498 			continue;
499 		}
500 		if (flags & HAMMER2_THREAD_FROZEN)
501 			continue;
502 		if (flags & HAMMER2_THREAD_REMASTER) {
503 			hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
504 			bzero(&bfi, sizeof(bfi));
505 			bfi.size = 8192 * 1024;
506 			/* hammer2_bulkfree_pass(thr->hmp, &bfi); */
507 		}
508 	}
509 	thr->td = NULL;
510 	hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
511 	/* structure can go invalid at this point */
512 }
513 
514 int
515 hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain,
516 		      hammer2_ioc_bulkfree_t *bfi)
517 {
518 	hammer2_bulkfree_info_t cbinfo;
519 	hammer2_chain_save_t *save;
520 	hammer2_off_t incr;
521 	size_t size;
522 	int error;
523 
524 	/*
525 	 * We have to clear the live dedup cache as it might have entries
526 	 * that are freeable as of now.  Any new entries in the dedup cache
527 	 * made after this point, even if they become freeable, will have
528 	 * previously been fully allocated and will be protected by the
529 	 * 2-stage bulkfree.
530 	 */
531 	hammer2_dedup_clear(hmp);
532 
533 	/*
534 	 * Setup for free pass using the buffer size specified by the
535 	 * hammer2 utility, 32K-aligned.
536 	 */
537 	bzero(&cbinfo, sizeof(cbinfo));
538 	size = (bfi->size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
539 	       ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
540 
541 	/*
542 	 * Cap at 1/4 physical memory (hammer2 utility will not normally
543 	 * ever specify a buffer this big, but leave the option available).
544 	 */
545 	if (size > kmem_lim_size() * 1024 * 1024 / 4) {
546 		size = kmem_lim_size() * 1024 * 1024 / 4;
547 		kprintf("hammer2: Warning: capping bulkfree buffer at %jdM\n",
548 			(intmax_t)size / (1024 * 1024));
549 	}
550 
551 #define HAMMER2_FREEMAP_SIZEDIV	\
552 	(HAMMER2_FREEMAP_LEVEL1_SIZE / HAMMER2_FREEMAP_LEVELN_PSIZE)
553 #define HAMMER2_FREEMAP_SIZEMASK	(HAMMER2_FREEMAP_SIZEDIV - 1)
554 
555 	/*
556 	 * Cap at the size needed to cover the whole volume to avoid
557 	 * making an unnecessarily large allocation.
558 	 */
559 	if (size > hmp->total_size / HAMMER2_FREEMAP_SIZEDIV) {
560 		size = (hmp->total_size + HAMMER2_FREEMAP_SIZEMASK) /
561 			HAMMER2_FREEMAP_SIZEDIV;
562 	}
563 
564 	/*
565 	 * Minimum bitmap buffer size, then align to a LEVELN_PSIZE (32K)
566 	 * boundary.
567 	 */
568 	if (size < 1024 * 1024)
569 		size = 1024 * 1024;
570 	size = (size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
571 	       ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
572 
573 	cbinfo.hmp = hmp;
574 	cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size, VM_SUBSYS_HAMMER);
575 	cbinfo.dedup = kmalloc(sizeof(*cbinfo.dedup) * HAMMER2_DEDUP_HEUR_SIZE,
576 			       M_HAMMER2, M_WAITOK | M_ZERO);
577 
578 	kprintf("hammer2: bulkfree buf=%jdM\n",
579 		(intmax_t)size / (1024 * 1024));
580 
581 	/*
582 	 * Normalize start point to a 1GB boundary.  We operate on a
583 	 * 32KB leaf bitmap boundary which represents 1GB of storage.
584 	 */
585 	cbinfo.sbase = bfi->sbase;
586 	if (cbinfo.sbase > hmp->total_size)
587 		cbinfo.sbase = hmp->total_size;
588 	cbinfo.sbase &= ~HAMMER2_FREEMAP_LEVEL1_MASK;
589 	TAILQ_INIT(&cbinfo.list);
590 
591 	cbinfo.bulkfree_ticks = ticks;
592 
593 	/*
594 	 * Loop on a full meta-data scan as many times as required to
595 	 * get through all available storage.
596 	 */
597 	error = 0;
598 	while (cbinfo.sbase < hmp->total_size) {
599 		/*
600 		 * We have enough ram to represent (incr) bytes of storage.
601 		 * Each 32KB of ram represents 1GB of storage.
602 		 *
603 		 * We must also clean out our de-duplication heuristic for
604 		 * each (incr) bytes of storage, otherwise we wind up not
605 		 * scanning meta-data for later areas of storage because
606 		 * they had already been scanned in earlier areas of storage.
607 		 * Since the ranging is different, we have to restart
608 		 * the dedup heuristic too.
609 		 */
610 		int allmedia;
611 
612 		cbinfo_bmap_init(&cbinfo, size);
613 		bzero(cbinfo.dedup, sizeof(*cbinfo.dedup) *
614 				    HAMMER2_DEDUP_HEUR_SIZE);
615 		cbinfo.count_inodes_scanned = 0;
616 		cbinfo.count_dirents_scanned = 0;
617 		cbinfo.count_bytes_scanned = 0;
618 		cbinfo.count_chains_scanned = 0;
619 		cbinfo.count_chains_reported = 0;
620 
621 		incr = size / HAMMER2_FREEMAP_LEVELN_PSIZE *
622 		       HAMMER2_FREEMAP_LEVEL1_SIZE;
623 		if (hmp->total_size - cbinfo.sbase <= incr) {
624 			cbinfo.sstop = hmp->total_size;
625 			allmedia = 1;
626 		} else {
627 			cbinfo.sstop = cbinfo.sbase + incr;
628 			allmedia = 0;
629 		}
630 		kprintf("hammer2: pass %016jx-%016jx ",
631 			(intmax_t)cbinfo.sbase,
632 			(intmax_t)cbinfo.sstop);
633 		if (allmedia && cbinfo.sbase == 0)
634 			kprintf("(all media)\n");
635 		else if (allmedia)
636 			kprintf("(remaining media)\n");
637 		else
638 			kprintf("(%jdGB of media)\n",
639 				(intmax_t)incr / (1024L*1024*1024));
640 
641 		/*
642 		 * Scan topology for stuff inside this range.
643 		 *
644 		 * NOTE - By not using a transaction the operation can
645 		 *	  run concurrent with the frontend as well as
646 		 *	  with flushes.
647 		 *
648 		 *	  We cannot safely set a mtid without a transaction,
649 		 *	  and in fact we don't want to set one anyway.  We
650 		 *	  want the bulkfree to be passive and no interfere
651 		 *	  with crash recovery.
652 		 */
653 #undef HAMMER2_BULKFREE_TRANS	/* undef - don't use transaction */
654 #ifdef HAMMER2_BULKFREE_TRANS
655 		hammer2_trans_init(hmp->spmp, 0);
656 		cbinfo.mtid = hammer2_trans_sub(hmp->spmp);
657 #else
658 		cbinfo.mtid = 0;
659 #endif
660 		cbinfo.pri = 0;
661 		error |= hammer2_bulkfree_scan(vchain,
662 					       h2_bulkfree_callback, &cbinfo);
663 
664 		while ((save = TAILQ_FIRST(&cbinfo.list)) != NULL &&
665 		       (error & ~HAMMER2_ERROR_CHECK) == 0) {
666 			TAILQ_REMOVE(&cbinfo.list, save, entry);
667 			--cbinfo.list_count;
668 			cbinfo.pri = 0;
669 			cbinfo.backout = NULL;
670 			error |= hammer2_bulkfree_scan(save->chain,
671 						       h2_bulkfree_callback,
672 						       &cbinfo);
673 			hammer2_chain_drop(save->chain);
674 			kfree(save, M_HAMMER2);
675 		}
676 		while (save) {
677 			TAILQ_REMOVE(&cbinfo.list, save, entry);
678 			--cbinfo.list_count;
679 			hammer2_chain_drop(save->chain);
680 			kfree(save, M_HAMMER2);
681 			save = TAILQ_FIRST(&cbinfo.list);
682 		}
683 		cbinfo.backout = NULL;
684 
685 		/*
686 		 * If the complete scan succeeded we can synchronize our
687 		 * in-memory freemap against live storage.  If an abort
688 		 * occured we cannot safely synchronize our partially
689 		 * filled-out in-memory freemap.
690 		 *
691 		 * We still synchronize on CHECK failures.  That is, we still
692 		 * want bulkfree to operate even if the filesystem has defects.
693 		 */
694 		if (error & ~HAMMER2_ERROR_CHECK) {
695 			kprintf("bulkfree lastdrop %d %d error=0x%04x\n",
696 				vchain->refs, vchain->core.chain_count, error);
697 		} else {
698 			if (error & HAMMER2_ERROR_CHECK) {
699 				kprintf("bulkfree lastdrop %d %d "
700 					"(with check errors)\n",
701 					vchain->refs, vchain->core.chain_count);
702 			} else {
703 				kprintf("bulkfree lastdrop %d %d\n",
704 					vchain->refs, vchain->core.chain_count);
705 			}
706 
707 			error = h2_bulkfree_sync(&cbinfo);
708 
709 			hammer2_voldata_lock(hmp);
710 			hammer2_voldata_modify(hmp);
711 			hmp->voldata.allocator_free += cbinfo.adj_free;
712 			hammer2_voldata_unlock(hmp);
713 		}
714 
715 		/*
716 		 * Cleanup for next loop.
717 		 */
718 #ifdef HAMMER2_BULKFREE_TRANS
719 		hammer2_trans_done(hmp->spmp, 0);
720 #endif
721 		if (error & ~HAMMER2_ERROR_CHECK)
722 			break;
723 		cbinfo.sbase = cbinfo.sstop;
724 		cbinfo.adj_free = 0;
725 	}
726 	kmem_free_swapbacked(&cbinfo.kp);
727 	kfree(cbinfo.dedup, M_HAMMER2);
728 	cbinfo.dedup = NULL;
729 
730 	bfi->sstop = cbinfo.sbase;
731 
732 	incr = bfi->sstop / (hmp->total_size / 10000);
733 	if (incr > 10000)
734 		incr = 10000;
735 
736 	kprintf("bulkfree pass statistics (%d.%02d%% storage processed):\n",
737 		(int)incr / 100,
738 		(int)incr % 100);
739 
740 	if (error & ~HAMMER2_ERROR_CHECK) {
741 		kprintf("    bulkfree was aborted\n");
742 	} else {
743 		if (error & HAMMER2_ERROR_CHECK) {
744 			kprintf("    WARNING: bulkfree "
745 				"encountered CRC errors\n");
746 		}
747 		kprintf("    transition->free   %ld\n", cbinfo.count_10_00);
748 		kprintf("    transition->staged %ld\n", cbinfo.count_11_10);
749 		kprintf("    ERR(00)->allocated %ld\n", cbinfo.count_00_11);
750 		kprintf("    ERR(01)->allocated %ld\n", cbinfo.count_01_11);
751 		kprintf("    staged->allocated  %ld\n", cbinfo.count_10_11);
752 		kprintf("    ~4MB segs cleaned  %ld\n", cbinfo.count_l0cleans);
753 		kprintf("    linear adjusts     %ld\n",
754 			cbinfo.count_linadjusts);
755 		kprintf("    dedup factor       %ld\n",
756 			cbinfo.count_dedup_factor);
757 		kprintf("    max saved chains   %ld\n", cbinfo.list_count_max);
758 	}
759 
760 	return error;
761 }
762 
763 static void
764 cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size)
765 {
766 	hammer2_bmap_data_t *bmap = cbinfo->bmap;
767 	hammer2_key_t key = cbinfo->sbase;
768 	hammer2_key_t lokey;
769 	hammer2_key_t hikey;
770 
771 	lokey = (cbinfo->hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
772 		~HAMMER2_SEGMASK64;
773 	hikey = cbinfo->hmp->total_size & ~HAMMER2_SEGMASK64;
774 
775 	bzero(bmap, size);
776 	while (size) {
777 		bzero(bmap, sizeof(*bmap));
778 		if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX))
779 			lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX);
780 		if (lokey < H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64)
781 			lokey = H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64;
782 		if (key < lokey || key >= hikey) {
783 			memset(bmap->bitmapq, -1,
784 			       sizeof(bmap->bitmapq));
785 			bmap->avail = 0;
786 			bmap->linear = HAMMER2_SEGSIZE;
787 		} else {
788 			bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
789 		}
790 		size -= sizeof(*bmap);
791 		key += HAMMER2_FREEMAP_LEVEL0_SIZE;
792 		++bmap;
793 	}
794 }
795 
796 static int
797 h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref)
798 {
799 	hammer2_bmap_data_t *bmap;
800 	hammer2_off_t data_off;
801 	uint16_t class;
802 	size_t bytes;
803 	int radix;
804 
805 	/*
806 	 * Check for signal and allow yield to userland during scan.
807 	 */
808 	if (hammer2_signal_check(&cbinfo->save_time))
809 		return HAMMER2_ERROR_ABORTED;
810 
811 	/*
812 	 * Deal with kernel thread cpu or I/O hogging by limiting the
813 	 * number of chains scanned per second to hammer2_bulkfree_tps.
814 	 * Ignore leaf records (DIRENT and DATA), no per-record I/O is
815 	 * involved for those since we don't load their data.
816 	 */
817 	if (bref->type != HAMMER2_BREF_TYPE_DATA &&
818 	    bref->type != HAMMER2_BREF_TYPE_DIRENT) {
819 		++cbinfo->bulkfree_calls;
820 		if (cbinfo->bulkfree_calls > hammer2_bulkfree_tps) {
821 			int dticks = ticks - cbinfo->bulkfree_ticks;
822 			if (dticks < 0)
823 				dticks = 0;
824 			if (dticks < hz) {
825 				tsleep(&cbinfo->bulkfree_ticks, 0,
826 				       "h2bw", hz - dticks);
827 			}
828 			cbinfo->bulkfree_calls = 0;
829 			cbinfo->bulkfree_ticks = ticks;
830 		}
831 	}
832 
833 	/*
834 	 * Calculate the data offset and determine if it is within
835 	 * the current freemap range being gathered.
836 	 */
837 	data_off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
838 	if (data_off < cbinfo->sbase || data_off >= cbinfo->sstop)
839 		return 0;
840 	if (data_off < cbinfo->hmp->voldata.allocator_beg)
841 		return 0;
842 	if (data_off >= cbinfo->hmp->total_size)
843 		return 0;
844 
845 	/*
846 	 * Calculate the information needed to generate the in-memory
847 	 * freemap record.
848 	 *
849 	 * Hammer2 does not allow allocations to cross the L1 (1GB) boundary,
850 	 * it's a problem if it does.  (Or L0 (4MB) for that matter).
851 	 */
852 	radix = (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
853 	KKASSERT(radix != 0);
854 	bytes = (size_t)1 << radix;
855 	class = (bref->type << 8) | HAMMER2_PBUFRADIX;
856 
857 	if (data_off + bytes > cbinfo->sstop) {
858 		kprintf("hammer2_bulkfree_scan: illegal 1GB boundary "
859 			"%016jx %016jx/%d\n",
860 			(intmax_t)bref->data_off,
861 			(intmax_t)bref->key,
862 			bref->keybits);
863 		bytes = cbinfo->sstop - data_off;	/* XXX */
864 	}
865 
866 	/*
867 	 * Convert to a storage offset relative to the beginning of the
868 	 * storage range we are collecting.  Then lookup the level0 bmap entry.
869 	 */
870 	data_off -= cbinfo->sbase;
871 	bmap = cbinfo->bmap + (data_off >> HAMMER2_FREEMAP_LEVEL0_RADIX);
872 
873 	/*
874 	 * Convert data_off to a bmap-relative value (~4MB storage range).
875 	 * Adjust linear, class, and avail.
876 	 *
877 	 * Hammer2 does not allow allocations to cross the L0 (4MB) boundary,
878 	 */
879 	data_off &= HAMMER2_FREEMAP_LEVEL0_MASK;
880 	if (data_off + bytes > HAMMER2_FREEMAP_LEVEL0_SIZE) {
881 		kprintf("hammer2_bulkfree_scan: illegal 4MB boundary "
882 			"%016jx %016jx/%d\n",
883 			(intmax_t)bref->data_off,
884 			(intmax_t)bref->key,
885 			bref->keybits);
886 		bytes = HAMMER2_FREEMAP_LEVEL0_SIZE - data_off;
887 	}
888 
889 	if (bmap->class == 0) {
890 		bmap->class = class;
891 		bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
892 	}
893 
894 	/*
895 	 * NOTE: bmap->class does not have to match class.  Classification
896 	 *	 is relaxed when free space is low, so some mixing can occur.
897 	 */
898 #if 0
899 	/*
900 	 * XXX removed
901 	 */
902 	if (bmap->class != class) {
903 		kprintf("hammer2_bulkfree_scan: illegal mixed class "
904 			"%016jx %016jx/%d (%04x vs %04x)\n",
905 			(intmax_t)bref->data_off,
906 			(intmax_t)bref->key,
907 			bref->keybits,
908 			class, bmap->class);
909 	}
910 #endif
911 
912 	/*
913 	 * Just record the highest byte-granular offset for now.  Do not
914 	 * match against allocations which are in multiples of whole blocks.
915 	 *
916 	 * Make sure that any in-block linear offset at least covers the
917 	 * data range.  This can cause bmap->linear to become block-aligned.
918 	 */
919 	if (bytes & HAMMER2_FREEMAP_BLOCK_MASK) {
920 		if (bmap->linear < (int32_t)data_off + (int32_t)bytes)
921 			bmap->linear = (int32_t)data_off + (int32_t)bytes;
922 	} else if (bmap->linear >= (int32_t)data_off &&
923 		   bmap->linear < (int32_t)data_off + (int32_t)bytes) {
924 		bmap->linear = (int32_t)data_off + (int32_t)bytes;
925 	}
926 
927 	/*
928 	 * Adjust the hammer2_bitmap_t bitmap[HAMMER2_BMAP_ELEMENTS].
929 	 * 64-bit entries, 2 bits per entry, to code 11.
930 	 *
931 	 * NOTE: data_off mask to 524288, shift right by 14 (radix for 16384),
932 	 *	 and multiply shift amount by 2 for sets of 2 bits.
933 	 *
934 	 * NOTE: The allocation can be smaller than HAMMER2_FREEMAP_BLOCK_SIZE.
935 	 *	 also, data_off may not be FREEMAP_BLOCK_SIZE aligned.
936 	 */
937 	while (bytes > 0) {
938 		hammer2_bitmap_t bmask;
939 		int bindex;
940 
941 		bindex = (int)data_off >> (HAMMER2_FREEMAP_BLOCK_RADIX +
942 					   HAMMER2_BMAP_INDEX_RADIX);
943 		bmask = (hammer2_bitmap_t)3 <<
944 			((((int)data_off & HAMMER2_BMAP_INDEX_MASK) >>
945 			 HAMMER2_FREEMAP_BLOCK_RADIX) << 1);
946 
947 		/*
948 		 * NOTE! The (avail) calculation is bitmap-granular.  Multiple
949 		 *	 sub-granular records can wind up at the same bitmap
950 		 *	 position.
951 		 */
952 		if ((bmap->bitmapq[bindex] & bmask) == 0) {
953 			if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE) {
954 				bmap->avail -= HAMMER2_FREEMAP_BLOCK_SIZE;
955 			} else {
956 				bmap->avail -= bytes;
957 			}
958 			bmap->bitmapq[bindex] |= bmask;
959 		}
960 		data_off += HAMMER2_FREEMAP_BLOCK_SIZE;
961 		if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE)
962 			bytes = 0;
963 		else
964 			bytes -= HAMMER2_FREEMAP_BLOCK_SIZE;
965 	}
966 	return 0;
967 }
968 
969 /*
970  * Synchronize the in-memory bitmap with the live freemap.  This is not a
971  * direct copy.  Instead the bitmaps must be compared:
972  *
973  *	In-memory	Live-freemap
974  *	   00		  11 -> 10	(do nothing if live modified)
975  *			  10 -> 00	(do nothing if live modified)
976  *	   11		  10 -> 11	handles race against live
977  *			  ** -> 11	nominally warn of corruption
978  *
979  * We must also fixup the hints in HAMMER2_BREF_TYPE_FREEMAP_LEAF.
980  */
981 static int
982 h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo)
983 {
984 	hammer2_off_t data_off;
985 	hammer2_key_t key;
986 	hammer2_key_t key_dummy;
987 	hammer2_bmap_data_t *bmap;
988 	hammer2_bmap_data_t *live;
989 	hammer2_chain_t *live_parent;
990 	hammer2_chain_t *live_chain;
991 	int bmapindex;
992 	int error;
993 
994 	kprintf("hammer2_bulkfree - range ");
995 
996 	if (cbinfo->sbase < cbinfo->hmp->voldata.allocator_beg)
997 		kprintf("%016jx-",
998 			(intmax_t)cbinfo->hmp->voldata.allocator_beg);
999 	else
1000 		kprintf("%016jx-",
1001 			(intmax_t)cbinfo->sbase);
1002 
1003 	if (cbinfo->sstop > cbinfo->hmp->total_size)
1004 		kprintf("%016jx\n",
1005 			(intmax_t)cbinfo->hmp->total_size);
1006 	else
1007 		kprintf("%016jx\n",
1008 			(intmax_t)cbinfo->sstop);
1009 
1010 	data_off = cbinfo->sbase;
1011 	bmap = cbinfo->bmap;
1012 
1013 	live_parent = &cbinfo->hmp->fchain;
1014 	hammer2_chain_ref(live_parent);
1015 	hammer2_chain_lock(live_parent, HAMMER2_RESOLVE_ALWAYS);
1016 	live_chain = NULL;
1017 	error = 0;
1018 
1019 	/*
1020 	 * Iterate each hammer2_bmap_data_t line (128 bytes) managing
1021 	 * 4MB of storage.
1022 	 */
1023 	while (data_off < cbinfo->sstop) {
1024 		/*
1025 		 * The freemap is not used below allocator_beg or beyond
1026 		 * total_size.
1027 		 */
1028 
1029 		if (data_off < cbinfo->hmp->voldata.allocator_beg)
1030 			goto next;
1031 		if (data_off >= cbinfo->hmp->total_size)
1032 			goto next;
1033 
1034 		/*
1035 		 * Locate the freemap leaf on the live filesystem
1036 		 */
1037 		key = (data_off & ~HAMMER2_FREEMAP_LEVEL1_MASK);
1038 
1039 		if (live_chain == NULL || live_chain->bref.key != key) {
1040 			if (live_chain) {
1041 				hammer2_chain_unlock(live_chain);
1042 				hammer2_chain_drop(live_chain);
1043 			}
1044 			live_chain = hammer2_chain_lookup(
1045 					    &live_parent,
1046 					    &key_dummy,
1047 					    key,
1048 					    key + HAMMER2_FREEMAP_LEVEL1_MASK,
1049 					    &error,
1050 					    HAMMER2_LOOKUP_ALWAYS);
1051 			if (error) {
1052 				kprintf("hammer2_bulkfree: freemap lookup "
1053 					"error near %016jx, error %s\n",
1054 					(intmax_t)data_off,
1055 					hammer2_error_str(live_chain->error));
1056 				break;
1057 			}
1058 		}
1059 		if (live_chain == NULL) {
1060 			/*
1061 			 * XXX if we implement a full recovery mode we need
1062 			 * to create/recreate missing freemap chains if our
1063 			 * bmap has any allocated blocks.
1064 			 */
1065 			if (bmap->class &&
1066 			    bmap->avail != HAMMER2_FREEMAP_LEVEL0_SIZE) {
1067 				kprintf("hammer2_bulkfree: cannot locate "
1068 					"live leaf for allocated data "
1069 					"near %016jx\n",
1070 					(intmax_t)data_off);
1071 			}
1072 			goto next;
1073 		}
1074 		if (live_chain->error) {
1075 			kprintf("hammer2_bulkfree: unable to access freemap "
1076 				"near %016jx, error %s\n",
1077 				(intmax_t)data_off,
1078 				hammer2_error_str(live_chain->error));
1079 			hammer2_chain_unlock(live_chain);
1080 			hammer2_chain_drop(live_chain);
1081 			live_chain = NULL;
1082 			goto next;
1083 		}
1084 
1085 		bmapindex = (data_off & HAMMER2_FREEMAP_LEVEL1_MASK) >>
1086 			    HAMMER2_FREEMAP_LEVEL0_RADIX;
1087 		live = &live_chain->data->bmdata[bmapindex];
1088 
1089 		/*
1090 		 * Shortcut if the bitmaps match and the live linear
1091 		 * indicator is sane.  We can't do a perfect check of
1092 		 * live->linear because the only real requirement is that
1093 		 * if it is not block-aligned, that it not cover the space
1094 		 * within its current block which overlaps one of the data
1095 		 * ranges we scan.  We don't retain enough fine-grained
1096 		 * data in our scan to be able to set it exactly.
1097 		 *
1098 		 * TODO - we could shortcut this by testing that both
1099 		 * live->class and bmap->class are 0, and both avails are
1100 		 * set to HAMMER2_FREEMAP_LEVEL0_SIZE (4MB).
1101 		 */
1102 		if (bcmp(live->bitmapq, bmap->bitmapq,
1103 			 sizeof(bmap->bitmapq)) == 0 &&
1104 		    live->linear >= bmap->linear &&
1105 		    (hammer2_aux_flags & 1) == 0 &&
1106 		    bigmask_good(bmap, live_chain->bref.check.freemap.bigmask))
1107 		{
1108 			goto next;
1109 		}
1110 		if (hammer2_debug & 1) {
1111 			kprintf("live %016jx %04d.%04x (avail=%d) "
1112 				"bigmask %08x->%08x\n",
1113 				data_off, bmapindex, live->class, live->avail,
1114 				live_chain->bref.check.freemap.bigmask,
1115 				live_chain->bref.check.freemap.bigmask |
1116 				bigmask_get(bmap));
1117 		}
1118 
1119 		if (hammer2_chain_modify(live_chain, cbinfo->mtid, 0, 0)) {
1120 			kprintf("hammer2_bulkfree: unable to modify freemap "
1121 				"at %016jx for data-block %016jx, error %s\n",
1122 				live_chain->bref.data_off,
1123 				(intmax_t)data_off,
1124 				hammer2_error_str(live_chain->error));
1125 			hammer2_chain_unlock(live_chain);
1126 			hammer2_chain_drop(live_chain);
1127 			live_chain = NULL;
1128 			goto next;
1129 		}
1130 		live_chain->bref.check.freemap.bigmask = -1;
1131 		cbinfo->hmp->freemap_relaxed = 0;	/* reset heuristic */
1132 		live = &live_chain->data->bmdata[bmapindex];
1133 
1134 		h2_bulkfree_sync_adjust(cbinfo, data_off, live, bmap,
1135 					live_chain->bref.key +
1136 					bmapindex *
1137 					HAMMER2_FREEMAP_LEVEL0_SIZE);
1138 next:
1139 		data_off += HAMMER2_FREEMAP_LEVEL0_SIZE;
1140 		++bmap;
1141 	}
1142 	if (live_chain) {
1143 		hammer2_chain_unlock(live_chain);
1144 		hammer2_chain_drop(live_chain);
1145 	}
1146 	if (live_parent) {
1147 		hammer2_chain_unlock(live_parent);
1148 		hammer2_chain_drop(live_parent);
1149 	}
1150 	return error;
1151 }
1152 
1153 /*
1154  * Merge the bulkfree bitmap against the existing bitmap.
1155  */
1156 static
1157 void
1158 h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
1159 			hammer2_off_t data_off, hammer2_bmap_data_t *live,
1160 			hammer2_bmap_data_t *bmap, hammer2_key_t alloc_base)
1161 {
1162 	int bindex;
1163 	int scount;
1164 	hammer2_off_t tmp_off;
1165 	hammer2_bitmap_t lmask;
1166 	hammer2_bitmap_t mmask;
1167 
1168 	tmp_off = data_off;
1169 
1170 	for (bindex = 0; bindex < HAMMER2_BMAP_ELEMENTS; ++bindex) {
1171 		lmask = live->bitmapq[bindex];	/* live */
1172 		mmask = bmap->bitmapq[bindex];	/* snapshotted bulkfree */
1173 		if (lmask == mmask) {
1174 			tmp_off += HAMMER2_BMAP_INDEX_SIZE;
1175 			continue;
1176 		}
1177 
1178 		for (scount = 0;
1179 		     scount < HAMMER2_BMAP_BITS_PER_ELEMENT;
1180 		     scount += 2) {
1181 			if ((mmask & 3) == 0) {
1182 				/*
1183 				 * in-memory 00		live 11 -> 10
1184 				 *			live 10 -> 00
1185 				 *
1186 				 * Storage might be marked allocated or
1187 				 * staged and must be remarked staged or
1188 				 * free.
1189 				 */
1190 				switch (lmask & 3) {
1191 				case 0:	/* 00 */
1192 					break;
1193 				case 1:	/* 01 */
1194 					kprintf("hammer2_bulkfree: cannot "
1195 						"transition m=00/l=01\n");
1196 					break;
1197 				case 2:	/* 10 -> 00 */
1198 					live->bitmapq[bindex] &=
1199 					    ~((hammer2_bitmap_t)2 << scount);
1200 					live->avail +=
1201 						HAMMER2_FREEMAP_BLOCK_SIZE;
1202 					if (live->avail >
1203 					    HAMMER2_FREEMAP_LEVEL0_SIZE) {
1204 						live->avail =
1205 						    HAMMER2_FREEMAP_LEVEL0_SIZE;
1206 					}
1207 					cbinfo->adj_free +=
1208 						HAMMER2_FREEMAP_BLOCK_SIZE;
1209 					++cbinfo->count_10_00;
1210 					hammer2_io_dedup_assert(
1211 						cbinfo->hmp,
1212 						tmp_off |
1213 						HAMMER2_FREEMAP_BLOCK_RADIX,
1214 						HAMMER2_FREEMAP_BLOCK_SIZE);
1215 					break;
1216 				case 3:	/* 11 -> 10 */
1217 					live->bitmapq[bindex] &=
1218 					    ~((hammer2_bitmap_t)1 << scount);
1219 					++cbinfo->count_11_10;
1220 					hammer2_io_dedup_delete(
1221 						cbinfo->hmp,
1222 						HAMMER2_BREF_TYPE_DATA,
1223 						tmp_off |
1224 						HAMMER2_FREEMAP_BLOCK_RADIX,
1225 						HAMMER2_FREEMAP_BLOCK_SIZE);
1226 					break;
1227 				}
1228 			} else if ((mmask & 3) == 3) {
1229 				/*
1230 				 * in-memory 11		live 10 -> 11
1231 				 *			live ** -> 11
1232 				 *
1233 				 * Storage might be incorrectly marked free
1234 				 * or staged and must be remarked fully
1235 				 * allocated.
1236 				 */
1237 				switch (lmask & 3) {
1238 				case 0:	/* 00 */
1239 					/*
1240 					 * This case is not supposed to
1241 					 * happen.  If it does, it means
1242 					 * that an allocated block was
1243 					 * thought by the filesystem to be
1244 					 * free.
1245 					 */
1246 					kprintf("hammer2_bulkfree: "
1247 						"00->11 critical freemap "
1248 						"transition for datablock "
1249 						"%016jx\n",
1250 						tmp_off);
1251 					++cbinfo->count_00_11;
1252 					cbinfo->adj_free -=
1253 						HAMMER2_FREEMAP_BLOCK_SIZE;
1254 					live->avail -=
1255 						HAMMER2_FREEMAP_BLOCK_SIZE;
1256 					if ((int32_t)live->avail < 0)
1257 						live->avail = 0;
1258 					break;
1259 				case 1:	/* 01 */
1260 					++cbinfo->count_01_11;
1261 					break;
1262 				case 2:	/* 10 -> 11 */
1263 					++cbinfo->count_10_11;
1264 					break;
1265 				case 3:	/* 11 */
1266 					break;
1267 				}
1268 				live->bitmapq[bindex] |=
1269 					((hammer2_bitmap_t)3 << scount);
1270 			}
1271 			mmask >>= 2;
1272 			lmask >>= 2;
1273 			tmp_off += HAMMER2_FREEMAP_BLOCK_SIZE;
1274 		}
1275 	}
1276 
1277 	/*
1278 	 * Determine if the live bitmap is completely free and reset its
1279 	 * fields if so.  Otherwise check to see if we can reduce the linear
1280 	 * offset.
1281 	 */
1282 	for (bindex = HAMMER2_BMAP_ELEMENTS - 1; bindex >= 0; --bindex) {
1283 		if (live->bitmapq[bindex] != 0)
1284 			break;
1285 	}
1286 	if (bindex < 0) {
1287 		/*
1288 		 * Completely empty, reset entire segment
1289 		 */
1290 #if 0
1291 		kprintf("hammer2: cleanseg %016jx.%04x (%d)\n",
1292 			alloc_base, live->class, live->avail);
1293 #endif
1294 		live->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
1295 		live->class = 0;
1296 		live->linear = 0;
1297 		++cbinfo->count_l0cleans;
1298 	} else if (bindex < 7) {
1299 		/*
1300 		 * Partially full, bitmapq[bindex] != 0.  Our bulkfree pass
1301 		 * does not record enough information to set live->linear
1302 		 * exactly.
1303 		 *
1304 		 * NOTE: Setting live->linear to a sub-block (16K) boundary
1305 		 *	 forces the live code to iterate to the next fully
1306 		 *	 free block.  It does NOT mean that all blocks above
1307 		 *	 live->linear are available.
1308 		 *
1309 		 *	 Setting live->linear to a fragmentary (less than
1310 		 *	 16K) boundary allows allocations to iterate within
1311 		 *	 that sub-block.
1312 		 */
1313 		if (live->linear < bmap->linear &&
1314 		    ((live->linear ^ bmap->linear) &
1315 		     ~HAMMER2_FREEMAP_BLOCK_MASK) == 0) {
1316 			/*
1317 			 * If greater than but still within the same
1318 			 * sub-block as live we can adjust linear upward.
1319 			 */
1320 			live->linear = bmap->linear;
1321 			++cbinfo->count_linadjusts;
1322 		} else {
1323 			/*
1324 			 * Otherwise adjust to the nearest higher or same
1325 			 * sub-block boundary.  The live system may have
1326 			 * bounced live->linear around so we cannot make any
1327 			 * assumptions with regards to available fragmentary
1328 			 * allocations.
1329 			 */
1330 			live->linear =
1331 				(bmap->linear + HAMMER2_FREEMAP_BLOCK_MASK) &
1332 				~HAMMER2_FREEMAP_BLOCK_MASK;
1333 			++cbinfo->count_linadjusts;
1334 		}
1335 	} else {
1336 		/*
1337 		 * Completely full, effectively disable the linear iterator
1338 		 */
1339 		live->linear = HAMMER2_SEGSIZE;
1340 	}
1341 
1342 #if 0
1343 	if (bmap->class) {
1344 		kprintf("%016jx %04d.%04x (avail=%7d) "
1345 			"%08x %08x %08x %08x %08x %08x %08x %08x\n",
1346 			(intmax_t)data_off,
1347 			(int)((data_off &
1348 			       HAMMER2_FREEMAP_LEVEL1_MASK) >>
1349 			      HAMMER2_FREEMAP_LEVEL0_RADIX),
1350 			bmap->class,
1351 			bmap->avail,
1352 			bmap->bitmap[0], bmap->bitmap[1],
1353 			bmap->bitmap[2], bmap->bitmap[3],
1354 			bmap->bitmap[4], bmap->bitmap[5],
1355 			bmap->bitmap[6], bmap->bitmap[7]);
1356 	}
1357 #endif
1358 }
1359 
1360 /*
1361  * BULKFREE DEDUP HEURISTIC
1362  *
1363  * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1364  *	    All fields must be loaded into locals and validated.
1365  */
1366 static
1367 int
1368 h2_bulkfree_test(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref,
1369 		 int pri, int saved_error)
1370 {
1371 	hammer2_dedup_t *dedup;
1372 	int best;
1373 	int n;
1374 	int i;
1375 
1376 	n = hammer2_icrc32(&bref->data_off, sizeof(bref->data_off));
1377 	dedup = cbinfo->dedup + (n & (HAMMER2_DEDUP_HEUR_MASK & ~7));
1378 
1379 	for (i = best = 0; i < 8; ++i) {
1380 		if (dedup[i].data_off == bref->data_off) {
1381 			if (dedup[i].ticks < pri)
1382 				dedup[i].ticks = pri;
1383 			if (pri == 1)
1384 				cbinfo->count_dedup_factor += dedup[i].ticks;
1385 			return (dedup[i].saved_error | HAMMER2_ERROR_EOF);
1386 		}
1387 		if (dedup[i].ticks < dedup[best].ticks)
1388 			best = i;
1389 	}
1390 	dedup[best].data_off = bref->data_off;
1391 	dedup[best].ticks = pri;
1392 	dedup[best].saved_error = saved_error;
1393 
1394 	return 0;
1395 }
1396 
1397 /*
1398  * Calculate what the bigmask should be.  bigmask is permissive, so the
1399  * bits returned must be set at a minimum in the live bigmask.  Other bits
1400  * might also be set in the live bigmask.
1401  */
1402 static uint32_t
1403 bigmask_get(hammer2_bmap_data_t *bmap)
1404 {
1405 	hammer2_bitmap_t mask;	/* 64-bit mask to check */
1406 	hammer2_bitmap_t scan;
1407 	uint32_t bigmask;
1408 	uint32_t radix_mask;
1409 	int iter;
1410 	int i;
1411 	int j;
1412 
1413 	bigmask = 0;
1414 	for (i = 0; i < HAMMER2_BMAP_ELEMENTS; ++i) {
1415 		mask = bmap->bitmapq[i];
1416 
1417 		radix_mask = 1U << HAMMER2_FREEMAP_BLOCK_RADIX;
1418 		radix_mask |= radix_mask - 1;
1419 		iter = 2;	/* each bitmap entry is 2 bits. 2, 4, 8... */
1420 		while (iter <= HAMMER2_BMAP_BITS_PER_ELEMENT) {
1421 			if (iter == HAMMER2_BMAP_BITS_PER_ELEMENT)
1422 				scan = -1;
1423 			else
1424 				scan = (1LU << iter) - 1;
1425 			j = 0;
1426 			while (j < HAMMER2_BMAP_BITS_PER_ELEMENT) {
1427 				/*
1428 				 * Check if all bits are 0 (free block).
1429 				 * If so, set the bit in bigmask for the
1430 				 * allocation radix under test.
1431 				 */
1432 				if ((scan & mask) == 0) {
1433 					bigmask |= radix_mask;
1434 				}
1435 				scan <<= iter;
1436 				j += iter;
1437 			}
1438 			iter <<= 1;
1439 			radix_mask = (radix_mask << 1) | 1;
1440 		}
1441 	}
1442 	return bigmask;
1443 }
1444 
1445 static int
1446 bigmask_good(hammer2_bmap_data_t *bmap, uint32_t live_bigmask)
1447 {
1448 	uint32_t bigmask;
1449 
1450 	bigmask = bigmask_get(bmap);
1451 	return ((live_bigmask & bigmask) == bigmask);
1452 }
1453