1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5  * Copyright (c) 2011-2022 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Matthew Dillon <dillon@dragonflybsd.org>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 /*
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/mount.h>
43 */
44 
45 #include "hammer2.h"
46 
47 #define FREEMAP_DEBUG	0
48 
49 struct hammer2_fiterate {
50 	hammer2_off_t	bpref;
51 	hammer2_off_t	bnext;
52 	int		loops;
53 	int		relaxed;
54 };
55 
56 typedef struct hammer2_fiterate hammer2_fiterate_t;
57 
58 static int hammer2_freemap_try_alloc(hammer2_chain_t **parentp,
59 			hammer2_blockref_t *bref, int radix,
60 			hammer2_fiterate_t *iter, hammer2_tid_t mtid);
61 static void hammer2_freemap_init(hammer2_dev_t *hmp,
62 			hammer2_key_t key, hammer2_chain_t *chain);
63 static int hammer2_bmap_alloc(hammer2_dev_t *hmp,
64 			hammer2_bmap_data_t *bmap, uint16_t class,
65 			int n, int sub_key, int radix, hammer2_key_t *basep);
66 static int hammer2_freemap_iterate(hammer2_chain_t **parentp,
67 			hammer2_chain_t **chainp,
68 			hammer2_fiterate_t *iter);
69 
70 /*
71  * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
72  * bref.  Return a combined media offset and physical size radix.  Freemap
73  * chains use fixed storage offsets in the 4MB reserved area at the
74  * beginning of each 1GB zone.
75  *
76  * Rotate between eight possibilities.  Theoretically this means we have seven
77  * good freemaps in case of a crash which we can use as a base for the fixup
78  * scan at mount-time.
79  */
80 static
81 int
82 hammer2_freemap_reserve(hammer2_chain_t *chain, int radix)
83 {
84 	hammer2_blockref_t *bref = &chain->bref;
85 	hammer2_off_t off;
86 	int index;
87 	int index_inc;
88 	size_t bytes;
89 
90 	/*
91 	 * Physical allocation size.
92 	 */
93 	bytes = (size_t)1 << radix;
94 
95 	/*
96 	 * Calculate block selection index 0..7 of current block.  If this
97 	 * is the first allocation of the block (verses a modification of an
98 	 * existing block), we use index 0, otherwise we use the next rotating
99 	 * index.
100 	 */
101 	if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) {
102 		index = 0;
103 	} else {
104 		off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX &
105 		      HAMMER2_SEGMASK;
106 		off = off / HAMMER2_PBUFSIZE;
107 		KKASSERT(off >= HAMMER2_ZONE_FREEMAP_00 &&
108 			 off < HAMMER2_ZONE_FREEMAP_END);
109 		index = (int)(off - HAMMER2_ZONE_FREEMAP_00) /
110 			HAMMER2_ZONE_FREEMAP_INC;
111 		KKASSERT(index >= 0 && index < HAMMER2_NFREEMAPS);
112 		if (++index == HAMMER2_NFREEMAPS)
113 			index = 0;
114 	}
115 
116 	/*
117 	 * Calculate the block offset of the reserved block.  This will
118 	 * point into the 4MB reserved area at the base of the appropriate
119 	 * 2GB zone, once added to the FREEMAP_x selection above.
120 	 */
121 	index_inc = index * HAMMER2_ZONE_FREEMAP_INC;
122 
123 	switch(bref->keybits) {
124 	/* case HAMMER2_FREEMAP_LEVEL6_RADIX: not applicable */
125 	case HAMMER2_FREEMAP_LEVEL5_RADIX:	/* 4EB */
126 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
127 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
128 		off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL5_RADIX) +
129 		      (index_inc + HAMMER2_ZONE_FREEMAP_00 +
130 		       HAMMER2_ZONEFM_LEVEL5) * HAMMER2_PBUFSIZE;
131 		break;
132 	case HAMMER2_FREEMAP_LEVEL4_RADIX:	/* 16PB */
133 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
134 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
135 		off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) +
136 		      (index_inc + HAMMER2_ZONE_FREEMAP_00 +
137 		       HAMMER2_ZONEFM_LEVEL4) * HAMMER2_PBUFSIZE;
138 		break;
139 	case HAMMER2_FREEMAP_LEVEL3_RADIX:	/* 64TB */
140 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
141 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
142 		off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) +
143 		      (index_inc + HAMMER2_ZONE_FREEMAP_00 +
144 		       HAMMER2_ZONEFM_LEVEL3) * HAMMER2_PBUFSIZE;
145 		break;
146 	case HAMMER2_FREEMAP_LEVEL2_RADIX:	/* 256GB */
147 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
148 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
149 		off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) +
150 		      (index_inc + HAMMER2_ZONE_FREEMAP_00 +
151 		       HAMMER2_ZONEFM_LEVEL2) * HAMMER2_PBUFSIZE;
152 		break;
153 	case HAMMER2_FREEMAP_LEVEL1_RADIX:	/* 1GB */
154 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
155 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
156 		off = H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
157 		      (index_inc + HAMMER2_ZONE_FREEMAP_00 +
158 		       HAMMER2_ZONEFM_LEVEL1) * HAMMER2_PBUFSIZE;
159 		break;
160 	default:
161 		panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits);
162 		/* NOT REACHED */
163 		off = (hammer2_off_t)-1;
164 		break;
165 	}
166 	bref->data_off = off | radix;
167 #if FREEMAP_DEBUG
168 	kprintf("FREEMAP BLOCK TYPE %d %016jx/%d DATA_OFF=%016jx\n",
169 		bref->type, bref->key, bref->keybits, bref->data_off);
170 #endif
171 	return (0);
172 }
173 
174 /*
175  * Normal freemap allocator
176  *
177  * Use available hints to allocate space using the freemap.  Create missing
178  * freemap infrastructure on-the-fly as needed (including marking initial
179  * allocations using the iterator as allocated, instantiating new 2GB zones,
180  * and dealing with the end-of-media edge case).
181  *
182  * bpref is only used as a heuristic to determine locality of reference.
183  *
184  * This function is a NOP if bytes is 0.
185  */
186 int
187 hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes)
188 {
189 	hammer2_dev_t *hmp = chain->hmp;
190 	hammer2_blockref_t *bref = &chain->bref;
191 	hammer2_chain_t *parent;
192 	hammer2_tid_t mtid;
193 	int radix;
194 	int error;
195 	unsigned int hindex;
196 	hammer2_fiterate_t iter;
197 
198 	/*
199 	 * If allocating or downsizing to zero we just get rid of whatever
200 	 * data_off we had.
201 	 */
202 	if (bytes == 0) {
203 		chain->bref.data_off = 0;
204 		return 0;
205 	}
206 
207 	KKASSERT(hmp->spmp);
208 	mtid = hammer2_trans_sub(hmp->spmp);
209 
210 	/*
211 	 * Validate the allocation size.  It must be a power of 2.
212 	 *
213 	 * For now require that the caller be aware of the minimum
214 	 * allocation (1K).
215 	 */
216 	radix = hammer2_getradix(bytes);
217 	KKASSERT((size_t)1 << radix == bytes);
218 
219 	if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
220 	    bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
221 		/*
222 		 * Freemap blocks themselves are assigned from the reserve
223 		 * area, not allocated from the freemap.
224 		 */
225 		error = hammer2_freemap_reserve(chain, radix);
226 
227 		return error;
228 	}
229 
230 	KKASSERT(bytes >= HAMMER2_ALLOC_MIN && bytes <= HAMMER2_ALLOC_MAX);
231 
232 	/*
233 	 * Heuristic tracking index.  We would like one for each distinct
234 	 * bref type if possible.  heur_freemap[] has room for two classes
235 	 * for each type.  At a minimum we have to break-up our heuristic
236 	 * by device block sizes.
237 	 */
238 	hindex = HAMMER2_PBUFRADIX - HAMMER2_LBUFRADIX;
239 	KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX);
240 	hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX;
241 	hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1;
242 	KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_SIZE);
243 
244 	iter.bpref = hmp->heur_freemap[hindex];
245 	iter.relaxed = hmp->freemap_relaxed;
246 
247 	/*
248 	 * Make sure bpref is in-bounds.  It's ok if bpref covers a zone's
249 	 * reserved area, the try code will iterate past it.
250 	 */
251 	if (iter.bpref > hmp->total_size)
252 		iter.bpref = hmp->total_size - 1;
253 
254 	/*
255 	 * Iterate the freemap looking for free space before and after.
256 	 */
257 	parent = &hmp->fchain;
258 	hammer2_chain_ref(parent);
259 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
260 	error = HAMMER2_ERROR_EAGAIN;
261 	iter.bnext = iter.bpref;
262 	iter.loops = 0;
263 
264 	while (error == HAMMER2_ERROR_EAGAIN) {
265 		error = hammer2_freemap_try_alloc(&parent, bref, radix,
266 						  &iter, mtid);
267 	}
268 	hmp->freemap_relaxed |= iter.relaxed;	/* heuristical, SMP race ok */
269 	hmp->heur_freemap[hindex] = iter.bnext;
270 	hammer2_chain_unlock(parent);
271 	hammer2_chain_drop(parent);
272 
273 	return (error);
274 }
275 
276 static int
277 hammer2_freemap_try_alloc(hammer2_chain_t **parentp,
278 			  hammer2_blockref_t *bref, int radix,
279 			  hammer2_fiterate_t *iter, hammer2_tid_t mtid)
280 {
281 	hammer2_dev_t *hmp = (*parentp)->hmp;
282 	hammer2_off_t l0size;
283 	hammer2_off_t l1size;
284 	hammer2_off_t l1mask;
285 	hammer2_key_t key_dummy;
286 	hammer2_chain_t *chain;
287 	hammer2_off_t key;
288 	size_t bytes;
289 	uint16_t class;
290 	int error;
291 
292 	/*
293 	 * Calculate the number of bytes being allocated.
294 	 */
295 	bytes = (size_t)1 << radix;
296 	class = (bref->type << 8) | HAMMER2_PBUFRADIX;
297 
298 	/*
299 	 * Lookup the level1 freemap chain, creating and initializing one
300 	 * if necessary.  Intermediate levels will be created automatically
301 	 * when necessary by hammer2_chain_create().
302 	 */
303 	key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX);
304 	l0size = HAMMER2_FREEMAP_LEVEL0_SIZE;
305 	l1size = HAMMER2_FREEMAP_LEVEL1_SIZE;
306 	l1mask = l1size - 1;
307 
308 	chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask,
309 				     &error,
310 				     HAMMER2_LOOKUP_ALWAYS |
311 				     HAMMER2_LOOKUP_MATCHIND);
312 
313 	if (chain == NULL) {
314 		/*
315 		 * Create the missing leaf, be sure to initialize
316 		 * the auxillary freemap tracking information in
317 		 * the bref.check.freemap structure.
318 		 */
319 #if 0
320 		kprintf("freemap create L1 @ %016jx bpref %016jx\n",
321 			key, iter->bpref);
322 #endif
323 		error = hammer2_chain_create(parentp, &chain, NULL, hmp->spmp,
324 				     HAMMER2_METH_DEFAULT,
325 				     key, HAMMER2_FREEMAP_LEVEL1_RADIX,
326 				     HAMMER2_BREF_TYPE_FREEMAP_LEAF,
327 				     HAMMER2_FREEMAP_LEVELN_PSIZE,
328 				     mtid, 0, 0);
329 		KKASSERT(error == 0);
330 		if (error == 0) {
331 			hammer2_chain_modify(chain, mtid, 0, 0);
332 			bzero(&chain->data->bmdata[0],
333 			      HAMMER2_FREEMAP_LEVELN_PSIZE);
334 			chain->bref.check.freemap.bigmask = (uint32_t)-1;
335 			chain->bref.check.freemap.avail = l1size;
336 			/* bref.methods should already be inherited */
337 
338 			hammer2_freemap_init(hmp, key, chain);
339 		}
340 	} else if (chain->error) {
341 		/*
342 		 * Error during lookup.
343 		 */
344 		kprintf("hammer2_freemap_try_alloc: %016jx: error %s\n",
345 			(intmax_t)bref->data_off,
346 			hammer2_error_str(chain->error));
347 		error = HAMMER2_ERROR_EIO;
348 	} else if ((chain->bref.check.freemap.bigmask &
349 		   ((size_t)1 << radix)) == 0) {
350 		/*
351 		 * Already flagged as not having enough space
352 		 */
353 		error = HAMMER2_ERROR_ENOSPC;
354 	} else {
355 		/*
356 		 * Modify existing chain to setup for adjustment.
357 		 */
358 		hammer2_chain_modify(chain, mtid, 0, 0);
359 	}
360 
361 	/*
362 	 * Scan 4MB entries.
363 	 */
364 	if (error == 0) {
365 		hammer2_bmap_data_t *bmap;
366 		hammer2_key_t base_key;
367 		int count;
368 		int start;
369 		int n;
370 
371 		KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
372 		start = (int)((iter->bnext - key) >>
373 			      HAMMER2_FREEMAP_LEVEL0_RADIX);
374 		KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT);
375 		hammer2_chain_modify(chain, mtid, 0, 0);
376 
377 		error = HAMMER2_ERROR_ENOSPC;
378 		for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
379 			int availchk;
380 
381 			if (start + count >= HAMMER2_FREEMAP_COUNT &&
382 			    start - count < 0) {
383 				break;
384 			}
385 
386 			/*
387 			 * Calculate bmap pointer from thart starting index
388 			 * forwards.
389 			 *
390 			 * NOTE: bmap pointer is invalid if n >= FREEMAP_COUNT.
391 			 */
392 			n = start + count;
393 			bmap = &chain->data->bmdata[n];
394 
395 			if (n >= HAMMER2_FREEMAP_COUNT) {
396 				availchk = 0;
397 			} else if (bmap->avail) {
398 				availchk = 1;
399 			} else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX &&
400 			          (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) {
401 				availchk = 1;
402 			} else {
403 				availchk = 0;
404 			}
405 
406 			/*
407 			 * Try to allocate from a matching freemap class
408 			 * superblock.  If we are in relaxed mode we allocate
409 			 * from any freemap class superblock.
410 			 */
411 			if (availchk &&
412 			    (bmap->class == 0 || bmap->class == class ||
413 			     iter->relaxed)) {
414 				base_key = key + n * l0size;
415 				error = hammer2_bmap_alloc(hmp, bmap,
416 							   class, n,
417 							   (int)bref->key,
418 							   radix,
419 							   &base_key);
420 				if (error != HAMMER2_ERROR_ENOSPC) {
421 					key = base_key;
422 					break;
423 				}
424 			}
425 
426 			/*
427 			 * Calculate bmap pointer from the starting index
428 			 * backwards (locality).
429 			 *
430 			 * Must recalculate after potentially having called
431 			 * hammer2_bmap_alloc() above in case chain was
432 			 * reallocated.
433 			 *
434 			 * NOTE: bmap pointer is invalid if n < 0.
435 			 */
436 			n = start - count;
437 			bmap = &chain->data->bmdata[n];
438 			if (n < 0) {
439 				availchk = 0;
440 			} else if (bmap->avail) {
441 				availchk = 1;
442 			} else if (radix < HAMMER2_FREEMAP_BLOCK_RADIX &&
443 			          (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK)) {
444 				availchk = 1;
445 			} else {
446 				availchk = 0;
447 			}
448 
449 			/*
450 			 * Try to allocate from a matching freemap class
451 			 * superblock.  If we are in relaxed mode we allocate
452 			 * from any freemap class superblock.
453 			 */
454 			if (availchk &&
455 			    (bmap->class == 0 || bmap->class == class ||
456 			    iter->relaxed)) {
457 				base_key = key + n * l0size;
458 				error = hammer2_bmap_alloc(hmp, bmap,
459 							   class, n,
460 							   (int)bref->key,
461 							   radix,
462 							   &base_key);
463 				if (error != HAMMER2_ERROR_ENOSPC) {
464 					key = base_key;
465 					break;
466 				}
467 			}
468 		}
469 
470 		/*
471 		 * We only know for sure that we can clear the bitmap bit
472 		 * if we scanned the entire array (start == 0) in relaxed
473 		 * mode.
474 		 */
475 		if (error == HAMMER2_ERROR_ENOSPC &&
476 		    start == 0 &&
477 		    iter->relaxed)
478 		{
479 			chain->bref.check.freemap.bigmask &=
480 				(uint32_t)~((size_t)1 << radix);
481 		}
482 		/* XXX also scan down from original count */
483 	}
484 
485 	if (error == 0) {
486 		/*
487 		 * Assert validity.  Must be beyond the static allocator used
488 		 * by newfs_hammer2 (and thus also beyond the aux area),
489 		 * not go past the volume size, and must not be in the
490 		 * reserved segment area for a zone.
491 		 */
492 		KKASSERT(key >= hmp->voldata.allocator_beg &&
493 			 key + bytes <= hmp->total_size);
494 		KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
495 		bref->data_off = key | radix;
496 
497 		/*
498 		 * Record dedupability.  The dedup bits are cleared
499 		 * when bulkfree transitions the freemap from 11->10,
500 		 * and asserted to be clear on the 10->00 transition.
501 		 *
502 		 * We must record the bitmask with the chain locked
503 		 * at the time we set the allocation bits to avoid
504 		 * racing a bulkfree.
505 		 */
506 		if (bref->type == HAMMER2_BREF_TYPE_DATA)
507 			hammer2_io_dedup_set(hmp, bref);
508 #if 0
509 		kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
510 			chain,
511 			bref->key, bref->data_off, chain->bref.data_off);
512 #endif
513 	} else if (error == HAMMER2_ERROR_ENOSPC) {
514 		/*
515 		 * Return EAGAIN with next iteration in iter->bnext, or
516 		 * return ENOSPC if the allocation map has been exhausted.
517 		 */
518 		error = hammer2_freemap_iterate(parentp, &chain, iter);
519 	}
520 
521 	/*
522 	 * Cleanup
523 	 */
524 	if (chain) {
525 		hammer2_chain_unlock(chain);
526 		hammer2_chain_drop(chain);
527 	}
528 	return (error);
529 }
530 
531 /*
532  * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
533  *
534  * If the linear iterator is mid-block we use it directly (the bitmap should
535  * already be marked allocated), otherwise we search for a block in the
536  * bitmap that fits the allocation request.
537  *
538  * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
539  * to fully allocated and adjusts the linear allocator to allow the
540  * remaining space to be allocated.
541  *
542  * sub_key is the lower 32 bits of the chain->bref.key for the chain whos
543  * bref is being allocated.  If the radix represents an allocation >= 16KB
544  * (aka HAMMER2_FREEMAP_BLOCK_RADIX) we try to use this key to select the
545  * blocks directly out of the bmap.
546  */
547 static
548 int
549 hammer2_bmap_alloc(hammer2_dev_t *hmp, hammer2_bmap_data_t *bmap,
550 		   uint16_t class, int n, int sub_key,
551 		   int radix, hammer2_key_t *basep)
552 {
553 	size_t size;
554 	size_t bgsize;
555 	int bmradix;
556 	hammer2_bitmap_t bmmask;
557 	int offset;
558 	int i;
559 	int j;
560 
561 	/*
562 	 * Take into account 2-bits per block when calculating bmradix.
563 	 */
564 	size = (size_t)1 << radix;
565 
566 	if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) {
567 		bmradix = 2;
568 		/* (16K) 2 bits per allocation block */
569 	} else {
570 		bmradix = (hammer2_bitmap_t)2 <<
571 			  (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
572 		/* (32K-64K) 4, 8 bits per allocation block */
573 	}
574 
575 	/*
576 	 * Use the linear iterator to pack small allocations, otherwise
577 	 * fall-back to finding a free 16KB chunk.  The linear iterator
578 	 * is only valid when *NOT* on a freemap chunking boundary (16KB).
579 	 * If it is the bitmap must be scanned.  It can become invalid
580 	 * once we pack to the boundary.  We adjust it after a bitmap
581 	 * allocation only for sub-16KB allocations (so the perfectly good
582 	 * previous value can still be used for fragments when 16KB+
583 	 * allocations are made inbetween fragmentary allocations).
584 	 *
585 	 * Beware of hardware artifacts when bmradix == 64 (intermediate
586 	 * result can wind up being '1' instead of '0' if hardware masks
587 	 * bit-count & 63).
588 	 *
589 	 * NOTE: j needs to be even in the j= calculation.  As an artifact
590 	 *	 of the /2 division, our bitmask has to clear bit 0.
591 	 *
592 	 * NOTE: TODO this can leave little unallocatable fragments lying
593 	 *	 around.
594 	 */
595 	if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <=
596 	    HAMMER2_FREEMAP_BLOCK_SIZE &&
597 	    (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) &&
598 	    bmap->linear < HAMMER2_SEGSIZE) {
599 		/*
600 		 * Use linear iterator if it is not block-aligned to avoid
601 		 * wasting space.
602 		 *
603 		 * Calculate the bitmapq[] index (i) and calculate the
604 		 * shift count within the 64-bit bitmapq[] entry.
605 		 *
606 		 * The freemap block size is 16KB, but each bitmap
607 		 * entry is two bits so use a little trick to get
608 		 * a (j) shift of 0, 2, 4, ... 62 in 16KB chunks.
609 		 */
610 		KKASSERT(bmap->linear >= 0 &&
611 			 bmap->linear + size <= HAMMER2_SEGSIZE &&
612 			 (bmap->linear & (HAMMER2_ALLOC_MIN - 1)) == 0);
613 		offset = bmap->linear;
614 		i = offset / (HAMMER2_SEGSIZE / HAMMER2_BMAP_ELEMENTS);
615 		j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 62;
616 		bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
617 			 HAMMER2_BMAP_ALLONES :
618 			 ((hammer2_bitmap_t)1 << bmradix) - 1;
619 		bmmask <<= j;
620 		bmap->linear = offset + size;
621 	} else {
622 		/*
623 		 * Try to index a starting point based on sub_key.  This
624 		 * attempts to restore sequential block ordering on-disk
625 		 * whenever possible, even if data is committed out of
626 		 * order.
627 		 *
628 		 * i - Index bitmapq[], full data range represented is
629 		 *     HAMMER2_BMAP_SIZE.
630 		 *
631 		 * j - Index within bitmapq[i], full data range represented is
632 		 *     HAMMER2_BMAP_INDEX_SIZE.
633 		 *
634 		 * WARNING!
635 		 */
636 		i = -1;
637 		j = -1;
638 
639 		switch(class >> 8) {
640 		case HAMMER2_BREF_TYPE_DATA:
641 			if (radix >= HAMMER2_FREEMAP_BLOCK_RADIX) {
642 				i = (sub_key & HAMMER2_BMAP_MASK) /
643 				    (HAMMER2_BMAP_SIZE / HAMMER2_BMAP_ELEMENTS);
644 				j = (sub_key & HAMMER2_BMAP_INDEX_MASK) /
645 				    (HAMMER2_BMAP_INDEX_SIZE /
646 				     HAMMER2_BMAP_BLOCKS_PER_ELEMENT);
647 				j = j * 2;
648 			}
649 			break;
650 		case HAMMER2_BREF_TYPE_INODE:
651 			break;
652 		default:
653 			break;
654 		}
655 		if (i >= 0) {
656 			KKASSERT(i < HAMMER2_BMAP_ELEMENTS &&
657 				 j < 2 * HAMMER2_BMAP_BLOCKS_PER_ELEMENT);
658 			KKASSERT(j + bmradix <= HAMMER2_BMAP_BITS_PER_ELEMENT);
659 			bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
660 				 HAMMER2_BMAP_ALLONES :
661 				 ((hammer2_bitmap_t)1 << bmradix) - 1;
662 			bmmask <<= j;
663 
664 			if ((bmap->bitmapq[i] & bmmask) == 0)
665 				goto success;
666 		}
667 
668 		/*
669 		 * General element scan.
670 		 *
671 		 * WARNING: (j) is iterating a bit index (by 2's)
672 		 */
673 		for (i = 0; i < HAMMER2_BMAP_ELEMENTS; ++i) {
674 			bmmask = (bmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
675 				 HAMMER2_BMAP_ALLONES :
676 				 ((hammer2_bitmap_t)1 << bmradix) - 1;
677 			for (j = 0;
678 			     j < HAMMER2_BMAP_BITS_PER_ELEMENT;
679 			     j += bmradix) {
680 				if ((bmap->bitmapq[i] & bmmask) == 0)
681 					goto success;
682 				bmmask <<= bmradix;
683 			}
684 		}
685 		/*fragments might remain*/
686 		/*KKASSERT(bmap->avail == 0);*/
687 		return (HAMMER2_ERROR_ENOSPC);
688 success:
689 		offset = i * (HAMMER2_SEGSIZE / HAMMER2_BMAP_ELEMENTS) +
690 			 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2));
691 		if (size & HAMMER2_FREEMAP_BLOCK_MASK)
692 			bmap->linear = offset + size;
693 	}
694 
695 	/* 8 x (64/2) -> 256 x 16K -> 4MB */
696 	KKASSERT(i >= 0 && i < HAMMER2_BMAP_ELEMENTS);
697 
698 	/*
699 	 * Optimize the buffer cache to avoid unnecessary read-before-write
700 	 * operations.
701 	 *
702 	 * The device block size could be larger than the allocation size
703 	 * so the actual bitmap test is somewhat more involved.  We have
704 	 * to use a compatible buffer size for this operation.
705 	 */
706 	if ((bmap->bitmapq[i] & bmmask) == 0 &&
707 	    HAMMER2_PBUFSIZE != size) {
708 		size_t psize = HAMMER2_PBUFSIZE;
709 		hammer2_off_t pmask = (hammer2_off_t)psize - 1;
710 		int pbmradix = (hammer2_bitmap_t)2 <<
711 					(HAMMER2_PBUFRADIX -
712 			       HAMMER2_FREEMAP_BLOCK_RADIX);
713 		hammer2_bitmap_t pbmmask;
714 		int pradix = hammer2_getradix(psize);
715 
716 		pbmmask = (pbmradix == HAMMER2_BMAP_BITS_PER_ELEMENT) ?
717 			HAMMER2_BMAP_ALLONES :
718 			((hammer2_bitmap_t)1 << pbmradix) - 1;
719 		while ((pbmmask & bmmask) == 0)
720 			pbmmask <<= pbmradix;
721 
722 #if 0
723 		kprintf("%016jx mask %016jx %016jx %016jx (%zd/%zd)\n",
724 			*basep + offset, bmap->bitmapq[i],
725 			pbmmask, bmmask, size, psize);
726 #endif
727 
728 		if ((bmap->bitmapq[i] & pbmmask) == 0) {
729 			hammer2_io_t *dio;
730 
731 			hammer2_io_newnz(hmp, class >> 8,
732 					(*basep + (offset & ~pmask)) |
733 					pradix, psize, &dio);
734 			hammer2_io_putblk(&dio);
735 		}
736 	}
737 
738 #if 0
739 	/*
740 	 * When initializing a new inode segment also attempt to initialize
741 	 * an adjacent segment.  Be careful not to index beyond the array
742 	 * bounds.
743 	 *
744 	 * We do this to try to localize inode accesses to improve
745 	 * directory scan rates.  XXX doesn't improve scan rates.
746 	 */
747 	if (size == HAMMER2_INODE_BYTES) {
748 		if (n & 1) {
749 			if (bmap[-1].radix == 0 && bmap[-1].avail)
750 				bmap[-1].radix = radix;
751 		} else {
752 			if (bmap[1].radix == 0 && bmap[1].avail)
753 				bmap[1].radix = radix;
754 		}
755 	}
756 #endif
757 	/*
758 	 * Calculate the bitmap-granular change in bgsize for the volume
759 	 * header.  We cannot use the fine-grained change here because
760 	 * the bulkfree code can't undo it.  If the bitmap element is already
761 	 * marked allocated it has already been accounted for.
762 	 */
763 	if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
764 		if (bmap->bitmapq[i] & bmmask)
765 			bgsize = 0;
766 		else
767 			bgsize = HAMMER2_FREEMAP_BLOCK_SIZE;
768 	} else {
769 		bgsize = size;
770 	}
771 
772 	/*
773 	 * Adjust the bitmap, set the class (it might have been 0),
774 	 * and available bytes, update the allocation offset (*basep)
775 	 * from the L0 base to the actual offset.
776 	 *
777 	 * Do not override the class if doing a relaxed class allocation.
778 	 *
779 	 * avail must reflect the bitmap-granular availability.  The allocator
780 	 * tests will also check the linear iterator.
781 	 */
782 	bmap->bitmapq[i] |= bmmask;
783 	if (bmap->class == 0)
784 		bmap->class = class;
785 	bmap->avail -= bgsize;
786 	*basep += offset;
787 
788 	/*
789 	 * Adjust the volume header's allocator_free parameter.  This
790 	 * parameter has to be fixed up by bulkfree which has no way to
791 	 * figure out sub-16K chunking, so it must be adjusted by the
792 	 * bitmap-granular size.
793 	 */
794 	if (bgsize) {
795 		hammer2_voldata_lock(hmp);
796 		hammer2_voldata_modify(hmp);
797 		hmp->voldata.allocator_free -= bgsize;
798 		hammer2_voldata_unlock(hmp);
799 	}
800 
801 	return(0);
802 }
803 
804 /*
805  * Initialize a freemap for the storage area (in bytes) that begins at (key).
806  */
807 static
808 void
809 hammer2_freemap_init(hammer2_dev_t *hmp, hammer2_key_t key,
810 		     hammer2_chain_t *chain)
811 {
812 	hammer2_off_t lokey;
813 	hammer2_off_t hikey;
814 	hammer2_bmap_data_t *bmap;
815 	int count;
816 
817 	/*
818 	 * Calculate the portion of the 1GB map that should be initialized
819 	 * as free.  Portions below or after will be initialized as allocated.
820 	 * SEGMASK-align the areas so we don't have to worry about sub-scans
821 	 * or endianess when using memset.
822 	 *
823 	 * WARNING! It is possible for lokey to be larger than hikey if the
824 	 *	    entire 2GB segment is within the static allocation.
825 	 */
826 	/*
827 	 * (1) Ensure that all statically allocated space from newfs_hammer2
828 	 *     is marked allocated, and take it up to the level1 base for
829 	 *     this key.
830 	 */
831 	lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
832 		~HAMMER2_SEGMASK64;
833 	if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX))
834 		lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX);
835 
836 	/*
837 	 * (2) Ensure that the reserved area is marked allocated (typically
838 	 *     the first 4MB of each 2GB area being represented).  Since
839 	 *     each LEAF represents 1GB of storage and the zone is 2GB, we
840 	 *     have to adjust lowkey upward every other LEAF sequentially.
841 	 */
842 	if (lokey < H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64)
843 		lokey = H2FMZONEBASE(key) + HAMMER2_ZONE_SEG64;
844 
845 	/*
846 	 * (3) Ensure that any trailing space at the end-of-volume is marked
847 	 *     allocated.
848 	 */
849 	hikey = key + HAMMER2_FREEMAP_LEVEL1_SIZE;
850 	if (hikey > hmp->total_size) {
851 		hikey = hmp->total_size & ~HAMMER2_SEGMASK64;
852 	}
853 
854 	/*
855 	 * Heuristic highest possible value
856 	 */
857 	chain->bref.check.freemap.avail = HAMMER2_FREEMAP_LEVEL1_SIZE;
858 	bmap = &chain->data->bmdata[0];
859 
860 	/*
861 	 * Initialize bitmap (bzero'd by caller)
862 	 */
863 	for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
864 		if (key < lokey || key >= hikey) {
865 			memset(bmap->bitmapq, -1,
866 			       sizeof(bmap->bitmapq));
867 			bmap->avail = 0;
868 			bmap->linear = HAMMER2_SEGSIZE;
869 			chain->bref.check.freemap.avail -=
870 				HAMMER2_FREEMAP_LEVEL0_SIZE;
871 		} else {
872 			bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
873 		}
874 		key += HAMMER2_FREEMAP_LEVEL0_SIZE;
875 		++bmap;
876 	}
877 }
878 
879 /*
880  * The current Level 1 freemap has been exhausted, iterate to the next
881  * one, return ENOSPC if no freemaps remain.
882  *
883  * At least two loops are required.  If we are not in relaxed mode and
884  * we run out of storage we enter relaxed mode and do a third loop.
885  * The relaxed mode is recorded back in the hmp so once we enter the mode
886  * we remain relaxed until stuff begins to get freed and only do 2 loops.
887  *
888  * XXX this should rotate back to the beginning to handle freed-up space
889  * XXX or use intermediate entries to locate free space. TODO
890  */
891 static int
892 hammer2_freemap_iterate(hammer2_chain_t **parentp, hammer2_chain_t **chainp,
893 			hammer2_fiterate_t *iter)
894 {
895 	hammer2_dev_t *hmp = (*parentp)->hmp;
896 
897 	iter->bnext &= ~HAMMER2_FREEMAP_LEVEL1_MASK;
898 	iter->bnext += HAMMER2_FREEMAP_LEVEL1_SIZE;
899 	if (iter->bnext >= hmp->total_size) {
900 		iter->bnext = 0;
901 		if (++iter->loops >= 2) {
902 			if (iter->relaxed == 0)
903 				iter->relaxed = 1;
904 			else
905 				return (HAMMER2_ERROR_ENOSPC);
906 		}
907 	}
908 	return(HAMMER2_ERROR_EAGAIN);
909 }
910 
911 /*
912  * Adjust the bit-pattern for data in the freemap bitmap according to
913  * (how).  This code is called from on-mount recovery to fixup (mark
914  * as allocated) blocks whos freemap upates might not have been committed
915  * in the last crash and is used by the bulk freemap scan to stage frees.
916  *
917  * WARNING! Cannot be called with a empty-data bref (radix == 0).
918  *
919  * XXX currently disabled when how == 0 (the normal real-time case).  At
920  * the moment we depend on the bulk freescan to actually free blocks.  It
921  * will still call this routine with a non-zero how to stage possible frees
922  * and to do the actual free.
923  */
924 void
925 hammer2_freemap_adjust(hammer2_dev_t *hmp, hammer2_blockref_t *bref,
926 		       int how)
927 {
928 	hammer2_off_t data_off = bref->data_off;
929 	hammer2_chain_t *chain;
930 	hammer2_chain_t *parent;
931 	hammer2_bmap_data_t *bmap;
932 	hammer2_key_t key;
933 	hammer2_key_t key_dummy;
934 	hammer2_off_t l1size;
935 	hammer2_off_t l1mask;
936 	hammer2_tid_t mtid;
937 	hammer2_bitmap_t *bitmap;
938 	const hammer2_bitmap_t bmmask00 = 0;
939 	//hammer2_bitmap_t bmmask01;
940 	//hammer2_bitmap_t bmmask10;
941 	hammer2_bitmap_t bmmask11;
942 	size_t bytes;
943 	uint16_t class;
944 	int radix;
945 	int start;
946 	int count;
947 	int modified = 0;
948 	int error;
949 	size_t bgsize = 0;
950 
951 	KKASSERT(how == HAMMER2_FREEMAP_DORECOVER);
952 
953 	KKASSERT(hmp->spmp);
954 	mtid = hammer2_trans_sub(hmp->spmp);
955 
956 	radix = (int)data_off & HAMMER2_OFF_MASK_RADIX;
957 	KKASSERT(radix != 0);
958 	data_off &= ~HAMMER2_OFF_MASK_RADIX;
959 	KKASSERT(radix <= HAMMER2_RADIX_MAX);
960 
961 	if (radix)
962 		bytes = (size_t)1 << radix;
963 	else
964 		bytes = 0;
965 	class = (bref->type << 8) | HAMMER2_PBUFRADIX;
966 
967 	/*
968 	 * We can't adjust the freemap for data allocations made by
969 	 * newfs_hammer2.
970 	 */
971 	if (data_off < hmp->voldata.allocator_beg)
972 		return;
973 
974 	KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
975 
976 	/*
977 	 * Lookup the level1 freemap chain.  The chain must exist.
978 	 */
979 	key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX);
980 	l1size = HAMMER2_FREEMAP_LEVEL1_SIZE;
981 	l1mask = l1size - 1;
982 
983 	parent = &hmp->fchain;
984 	hammer2_chain_ref(parent);
985 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
986 
987 	chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask,
988 				     &error,
989 				     HAMMER2_LOOKUP_ALWAYS |
990 				     HAMMER2_LOOKUP_MATCHIND);
991 
992 	/*
993 	 * Stop early if we are trying to free something but no leaf exists.
994 	 */
995 	if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) {
996 		kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
997 			(intmax_t)bref->data_off);
998 		goto done;
999 	}
1000 	if (chain->error) {
1001 		kprintf("hammer2_freemap_adjust: %016jx: error %s\n",
1002 			(intmax_t)bref->data_off,
1003 			hammer2_error_str(chain->error));
1004 		hammer2_chain_unlock(chain);
1005 		hammer2_chain_drop(chain);
1006 		chain = NULL;
1007 		goto done;
1008 	}
1009 
1010 	/*
1011 	 * Create any missing leaf(s) if we are doing a recovery (marking
1012 	 * the block(s) as being allocated instead of being freed).  Be sure
1013 	 * to initialize the auxillary freemap tracking info in the
1014 	 * bref.check.freemap structure.
1015 	 */
1016 	if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) {
1017 		error = hammer2_chain_create(&parent, &chain, NULL, hmp->spmp,
1018 				     HAMMER2_METH_DEFAULT,
1019 				     key, HAMMER2_FREEMAP_LEVEL1_RADIX,
1020 				     HAMMER2_BREF_TYPE_FREEMAP_LEAF,
1021 				     HAMMER2_FREEMAP_LEVELN_PSIZE,
1022 				     mtid, 0, 0);
1023 
1024 		if (hammer2_debug & 0x0040) {
1025 			kprintf("fixup create chain %p %016jx:%d\n",
1026 				chain, chain->bref.key, chain->bref.keybits);
1027 		}
1028 
1029 		if (error == 0) {
1030 			error = hammer2_chain_modify(chain, mtid, 0, 0);
1031 			KKASSERT(error == 0);
1032 			bzero(&chain->data->bmdata[0],
1033 			      HAMMER2_FREEMAP_LEVELN_PSIZE);
1034 			chain->bref.check.freemap.bigmask = (uint32_t)-1;
1035 			chain->bref.check.freemap.avail = l1size;
1036 			/* bref.methods should already be inherited */
1037 
1038 			hammer2_freemap_init(hmp, key, chain);
1039 		}
1040 		/* XXX handle error */
1041 	}
1042 
1043 #if FREEMAP_DEBUG
1044 	kprintf("FREEMAP ADJUST TYPE %d %016jx/%d DATA_OFF=%016jx\n",
1045 		chain->bref.type, chain->bref.key,
1046 		chain->bref.keybits, chain->bref.data_off);
1047 #endif
1048 
1049 	/*
1050 	 * Calculate the bitmask (runs in 2-bit pairs).
1051 	 */
1052 	start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2;
1053 	//bmmask01 = (hammer2_bitmap_t)1 << start;
1054 	//bmmask10 = (hammer2_bitmap_t)2 << start;
1055 	bmmask11 = (hammer2_bitmap_t)3 << start;
1056 
1057 	/*
1058 	 * Fixup the bitmap.  Partial blocks cannot be fully freed unless
1059 	 * a bulk scan is able to roll them up.
1060 	 */
1061 	if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
1062 		count = 1;
1063 #if 0
1064 		if (how == HAMMER2_FREEMAP_DOREALFREE)
1065 			how = HAMMER2_FREEMAP_DOMAYFREE;
1066 #endif
1067 	} else {
1068 		count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
1069 	}
1070 
1071 	/*
1072 	 * [re]load the bmap and bitmap pointers.  Each bmap entry covers
1073 	 * a 4MB swath.  The bmap itself (LEVEL1) covers 2GB.
1074 	 *
1075 	 * Be sure to reset the linear iterator to ensure that the adjustment
1076 	 * is not ignored.
1077 	 */
1078 again:
1079 	bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) &
1080 				    (HAMMER2_FREEMAP_COUNT - 1)];
1081 	bitmap = &bmap->bitmapq[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7];
1082 
1083 	if (modified)
1084 		bmap->linear = 0;
1085 
1086 	while (count) {
1087 		KKASSERT(bmmask11);
1088 		if (how == HAMMER2_FREEMAP_DORECOVER) {
1089 			/*
1090 			 * Recovery request, mark as allocated.
1091 			 */
1092 			if ((*bitmap & bmmask11) != bmmask11) {
1093 				if (modified == 0) {
1094 					hammer2_chain_modify(chain, mtid, 0, 0);
1095 					modified = 1;
1096 					goto again;
1097 				}
1098 				if ((*bitmap & bmmask11) == bmmask00) {
1099 					bmap->avail -=
1100 						HAMMER2_FREEMAP_BLOCK_SIZE;
1101 					bgsize += HAMMER2_FREEMAP_BLOCK_SIZE;
1102 				}
1103 				if (bmap->class == 0)
1104 					bmap->class = class;
1105 				*bitmap |= bmmask11;
1106 				if (hammer2_debug & 0x0040) {
1107 					kprintf("hammer2_freemap_adjust: "
1108 						"fixup type=%02x "
1109 						"block=%016jx/%zd\n",
1110 						bref->type, data_off, bytes);
1111 				}
1112 			} else {
1113 				/*
1114 				kprintf("hammer2_freemap_adjust:  good "
1115 					"type=%02x block=%016jx/%zd\n",
1116 					bref->type, data_off, bytes);
1117 				*/
1118 			}
1119 		}
1120 #if 0
1121 		/*
1122 		 * XXX this stuff doesn't work, avail is miscalculated and
1123 		 * code 10 means something else now.
1124 		 */
1125 		else if ((*bitmap & bmmask11) == bmmask11) {
1126 			/*
1127 			 * Mayfree/Realfree request and bitmap is currently
1128 			 * marked as being fully allocated.
1129 			 */
1130 			if (!modified) {
1131 				hammer2_chain_modify(chain, 0);
1132 				modified = 1;
1133 				goto again;
1134 			}
1135 			if (how == HAMMER2_FREEMAP_DOREALFREE)
1136 				*bitmap &= ~bmmask11;
1137 			else
1138 				*bitmap = (*bitmap & ~bmmask11) | bmmask10;
1139 		} else if ((*bitmap & bmmask11) == bmmask10) {
1140 			/*
1141 			 * Mayfree/Realfree request and bitmap is currently
1142 			 * marked as being possibly freeable.
1143 			 */
1144 			if (how == HAMMER2_FREEMAP_DOREALFREE) {
1145 				if (!modified) {
1146 					hammer2_chain_modify(chain, 0);
1147 					modified = 1;
1148 					goto again;
1149 				}
1150 				*bitmap &= ~bmmask11;
1151 			}
1152 		} else {
1153 			/*
1154 			 * 01 - Not implemented, currently illegal state
1155 			 * 00 - Not allocated at all, illegal free.
1156 			 */
1157 			panic("hammer2_freemap_adjust: "
1158 			      "Illegal state %08x(%08x)",
1159 			      *bitmap, *bitmap & bmmask11);
1160 		}
1161 #endif
1162 		--count;
1163 		//bmmask01 <<= 2;
1164 		//bmmask10 <<= 2;
1165 		bmmask11 <<= 2;
1166 	}
1167 #if 0
1168 #if HAMMER2_BMAP_ELEMENTS != 8
1169 #error "hammer2_freemap.c: HAMMER2_BMAP_ELEMENTS expected to be 8"
1170 #endif
1171 	if (how == HAMMER2_FREEMAP_DOREALFREE && modified) {
1172 		bmap->avail += 1 << radix;
1173 		KKASSERT(bmap->avail <= HAMMER2_SEGSIZE);
1174 		if (bmap->avail == HAMMER2_SEGSIZE &&
1175 		    bmap->bitmapq[0] == 0 &&
1176 		    bmap->bitmapq[1] == 0 &&
1177 		    bmap->bitmapq[2] == 0 &&
1178 		    bmap->bitmapq[3] == 0 &&
1179 		    bmap->bitmapq[4] == 0 &&
1180 		    bmap->bitmapq[5] == 0 &&
1181 		    bmap->bitmapq[6] == 0 &&
1182 		    bmap->bitmapq[7] == 0) {
1183 			key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX);
1184 			kprintf("Freeseg %016jx\n", (intmax_t)key);
1185 			bmap->class = 0;
1186 		}
1187 	}
1188 #endif
1189 
1190 	/*
1191 	 * chain->bref.check.freemap.bigmask (XXX)
1192 	 *
1193 	 * Setting bigmask is a hint to the allocation code that there might
1194 	 * be something allocatable.  We also set this in recovery... it
1195 	 * doesn't hurt and we might want to use the hint for other validation
1196 	 * operations later on.
1197 	 *
1198 	 * We could calculate the largest possible allocation and set the
1199 	 * radixes that could fit, but its easier just to set bigmask to -1.
1200 	 */
1201 	if (modified) {
1202 		chain->bref.check.freemap.bigmask = -1;
1203 		hmp->freemap_relaxed = 0;	/* reset heuristic */
1204 	}
1205 
1206 	hammer2_chain_unlock(chain);
1207 	hammer2_chain_drop(chain);
1208 done:
1209 	hammer2_chain_unlock(parent);
1210 	hammer2_chain_drop(parent);
1211 
1212 	if (bgsize) {
1213 		hammer2_voldata_lock(hmp);
1214 		hammer2_voldata_modify(hmp);
1215 		hmp->voldata.allocator_free -= bgsize;
1216 		hammer2_voldata_unlock(hmp);
1217 	}
1218 }
1219 
1220 /*
1221  * Validate the freemap, in three stages.
1222  *
1223  * stage-1	ALLOCATED     -> POSSIBLY FREE
1224  *		POSSIBLY FREE -> POSSIBLY FREE (type corrected)
1225  *
1226  *	This transitions bitmap entries from ALLOCATED to POSSIBLY FREE.
1227  *	The POSSIBLY FREE state does not mean that a block is actually free
1228  *	and may be transitioned back to ALLOCATED in stage-2.
1229  *
1230  *	This is typically done during normal filesystem operations when
1231  *	something is deleted or a block is replaced.
1232  *
1233  *	This is done by bulkfree in-bulk after a memory-bounded meta-data
1234  *	scan to try to determine what might be freeable.
1235  *
1236  *	This can be done unconditionally through a freemap scan when the
1237  *	intention is to brute-force recover the proper state of the freemap.
1238  *
1239  * stage-2	POSSIBLY FREE -> ALLOCATED	(scan metadata topology)
1240  *
1241  *	This is done by bulkfree during a meta-data scan to ensure that
1242  *	all blocks still actually allocated by the filesystem are marked
1243  *	as such.
1244  *
1245  *	NOTE! Live filesystem transitions to POSSIBLY FREE can occur while
1246  *	      the bulkfree stage-2 and stage-3 is running.  The live filesystem
1247  *	      will use the alternative POSSIBLY FREE type (2) to prevent
1248  *	      stage-3 from improperly transitioning unvetted possibly-free
1249  *	      blocks to FREE.
1250  *
1251  * stage-3	POSSIBLY FREE (type 1) -> FREE	(scan freemap)
1252  *
1253  *	This is done by bulkfree to finalize POSSIBLY FREE states.
1254  *
1255  */
1256