xref: /dragonfly/sys/vfs/hammer2/hammer2_freemap.c (revision 38b930d0)
1 /*
2  * Copyright (c) 2011-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
39 #include <sys/buf.h>
40 #include <sys/proc.h>
41 #include <sys/namei.h>
42 #include <sys/mount.h>
43 #include <sys/vnode.h>
44 #include <sys/mountctl.h>
45 
46 #include "hammer2.h"
47 
48 struct hammer2_fiterate {
49 	hammer2_off_t	bpref;
50 	hammer2_off_t	bnext;
51 	int		loops;
52 };
53 
54 typedef struct hammer2_fiterate hammer2_fiterate_t;
55 
56 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans,
57 			hammer2_chain_t **parentp, hammer2_blockref_t *bref,
58 			int radix, hammer2_fiterate_t *iter);
59 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
60 			hammer2_key_t key, hammer2_chain_t *chain);
61 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
62 			hammer2_bmap_data_t *bmap, uint16_t class,
63 			int n, int radix, hammer2_key_t *basep);
64 static int hammer2_freemap_iterate(hammer2_trans_t *trans,
65 			hammer2_chain_t **parentp, hammer2_chain_t **chainp,
66 			hammer2_fiterate_t *iter);
67 
68 static __inline
69 int
70 hammer2_freemapradix(int radix)
71 {
72 	return(radix);
73 }
74 
75 /*
76  * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF
77  * bref.  Return a combined media offset and physical size radix.  Freemap
78  * chains use fixed storage offsets in the 4MB reserved area at the
79  * beginning of each 2GB zone
80  *
81  * Rotate between four possibilities.  Theoretically this means we have three
82  * good freemaps in case of a crash which we can use as a base for the fixup
83  * scan at mount-time.
84  */
85 #define H2FMBASE(key, radix)	((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
86 #define H2FMSHIFT(radix)	((hammer2_off_t)1 << (radix))
87 
88 static
89 int
90 hammer2_freemap_reserve(hammer2_trans_t *trans, hammer2_chain_t *chain,
91 			int radix)
92 {
93 	hammer2_blockref_t *bref = &chain->bref;
94 	hammer2_off_t off;
95 	size_t bytes;
96 
97 	/*
98 	 * Physical allocation size -> radix.  Typically either 256 for
99 	 * a level 0 freemap leaf or 65536 for a level N freemap node.
100 	 *
101 	 * NOTE: A 256 byte bitmap represents 256 x 8 x 1024 = 2MB of storage.
102 	 *	 Do not use hammer2_allocsize() here as it has a min cap.
103 	 */
104 	bytes = 1 << radix;
105 
106 	/*
107 	 * Adjust by HAMMER2_ZONE_FREEMAP_{A,B,C,D} using the existing
108 	 * offset as a basis.  Start in zone A if previously unallocated.
109 	 */
110 #if 0
111 	kprintf("trans %04jx/%08x freemap chain %p.%d [%08x] %016jx/%d %016jx",
112 		trans->sync_tid, trans->flags,
113 		chain, chain->bref.type, chain->flags,
114 		chain->bref.key, chain->bref.keybits,
115 		bref->data_off);
116 #endif
117 	if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) {
118 		off = HAMMER2_ZONE_FREEMAP_A;
119 	} else {
120 		off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX &
121 		      (((hammer2_off_t)1 << HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
122 		off = off / HAMMER2_PBUFSIZE;
123 		KKASSERT(off >= HAMMER2_ZONE_FREEMAP_A);
124 		KKASSERT(off < HAMMER2_ZONE_FREEMAP_D + 4);
125 	}
126 
127 	if ((trans->flags &
128 	     (HAMMER2_TRANS_ISFLUSH | HAMMER2_TRANS_ISALLOCATING)) ==
129 	    HAMMER2_TRANS_ISFLUSH) {
130 		/*
131 		 * Delete-Duplicates while flushing the fchain topology
132 		 * itself.
133 		 */
134 #if 0
135 		kprintf(" flush ");
136 #endif
137 		if (off >= HAMMER2_ZONE_FREEMAP_D)
138 			off = HAMMER2_ZONE_FREEMAP_B;
139 		else if (off >= HAMMER2_ZONE_FREEMAP_C)
140 			off = HAMMER2_ZONE_FREEMAP_A;
141 		else if (off >= HAMMER2_ZONE_FREEMAP_B)
142 			off = HAMMER2_ZONE_FREEMAP_D;
143 		else
144 			off = HAMMER2_ZONE_FREEMAP_C;
145 	} else {
146 		/*
147 		 * Allocations from the freemap via a normal transaction
148 		 * or a flush whos sync_tid has been bumped (so effectively
149 		 * done as a normal transaction).
150 		 */
151 #if 0
152 		kprintf(" alloc ");
153 #endif
154 		if (off >= HAMMER2_ZONE_FREEMAP_D)
155 			off = HAMMER2_ZONE_FREEMAP_A;
156 		else if (off >= HAMMER2_ZONE_FREEMAP_C)
157 			off = HAMMER2_ZONE_FREEMAP_D;
158 		else if (off >= HAMMER2_ZONE_FREEMAP_B)
159 			off = HAMMER2_ZONE_FREEMAP_C;
160 		else
161 			off = HAMMER2_ZONE_FREEMAP_B;
162 	}
163 
164 
165 	off = off * HAMMER2_PBUFSIZE;
166 
167 	/*
168 	 * Calculate the block offset of the reserved block.  This will
169 	 * point into the 4MB reserved area at the base of the appropriate
170 	 * 2GB zone, once added to the FREEMAP_x selection above.
171 	 */
172 	switch(bref->keybits) {
173 	/* case HAMMER2_FREEMAP_LEVEL5_RADIX: not applicable */
174 	case HAMMER2_FREEMAP_LEVEL4_RADIX:	/* 2EB */
175 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
176 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
177 		off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) +
178 		       HAMMER2_ZONEFM_LEVEL4 * HAMMER2_PBUFSIZE;
179 		break;
180 	case HAMMER2_FREEMAP_LEVEL3_RADIX:	/* 2PB */
181 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
182 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
183 		off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) +
184 		       HAMMER2_ZONEFM_LEVEL3 * HAMMER2_PBUFSIZE;
185 		break;
186 	case HAMMER2_FREEMAP_LEVEL2_RADIX:	/* 2TB */
187 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE);
188 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
189 		off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) +
190 		       HAMMER2_ZONEFM_LEVEL2 * HAMMER2_PBUFSIZE;
191 		break;
192 	case HAMMER2_FREEMAP_LEVEL1_RADIX:	/* 2GB */
193 		KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
194 		KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE);
195 		off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
196 		       HAMMER2_ZONEFM_LEVEL1 * HAMMER2_PBUFSIZE;
197 		break;
198 	default:
199 		panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits);
200 		/* NOT REACHED */
201 		break;
202 	}
203 	bref->data_off = off | radix;
204 #if 0
205 	kprintf("-> %016jx\n", bref->data_off);
206 #endif
207 	return (0);
208 }
209 
210 /*
211  * Normal freemap allocator
212  *
213  * Use available hints to allocate space using the freemap.  Create missing
214  * freemap infrastructure on-the-fly as needed (including marking initial
215  * allocations using the iterator as allocated, instantiating new 2GB zones,
216  * and dealing with the end-of-media edge case).
217  *
218  * ip and bpref are only used as a heuristic to determine locality of
219  * reference.  bref->key may also be used heuristically.
220  *
221  * WARNING! When called from a flush we have to use the 'live' sync_tid
222  *	    and not the flush sync_tid.  The live sync_tid is the flush
223  *	    sync_tid + 1.  That is, freemap allocations which occur during
224  *	    a flush are not part of the flush.  Crash-recovery will restore
225  *	    any lost allocations.
226  */
227 int
228 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain,
229 		      size_t bytes)
230 {
231 	hammer2_mount_t *hmp = chain->hmp;
232 	hammer2_blockref_t *bref = &chain->bref;
233 	hammer2_chain_t *parent;
234 	int radix;
235 	int error;
236 	unsigned int hindex;
237 	hammer2_fiterate_t iter;
238 
239 	/*
240 	 * Validate the allocation size.  It must be a power of 2.
241 	 *
242 	 * For now require that the caller be aware of the minimum
243 	 * allocation (1K).
244 	 */
245 	radix = hammer2_getradix(bytes);
246 	KKASSERT((size_t)1 << radix == bytes);
247 
248 	/*
249 	 * Freemap blocks themselves are simply assigned from the reserve
250 	 * area, not allocated from the freemap.
251 	 */
252 	if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
253 	    bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
254 		return (hammer2_freemap_reserve(trans, chain, radix));
255 	}
256 
257 	/*
258 	 * Mark previously allocated block as possibly freeable.  There might
259 	 * be snapshots and other races so we can't just mark it fully free.
260 	 * (XXX optimize this for the current-transaction create+delete case)
261 	 */
262 	if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) {
263 		hammer2_freemap_adjust(trans, hmp, bref,
264 				       HAMMER2_FREEMAP_DOMAYFREE);
265 	}
266 
267 	/*
268 	 * Setting ISALLOCATING ensures correct operation even when the
269 	 * flusher itself is making allocations.
270 	 */
271 	KKASSERT(bytes >= HAMMER2_MIN_ALLOC && bytes <= HAMMER2_MAX_ALLOC);
272 	KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0);
273 	atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
274 	if (trans->flags & HAMMER2_TRANS_ISFLUSH)
275 		++trans->sync_tid;
276 
277 	/*
278 	 * Calculate the starting point for our allocation search.
279 	 *
280 	 * Each freemap leaf is dedicated to a specific freemap_radix.
281 	 * The freemap_radix can be more fine-grained than the device buffer
282 	 * radix which results in inodes being grouped together in their
283 	 * own segment, terminal-data (16K or less) and initial indirect
284 	 * block being grouped together, and then full-indirect and full-data
285 	 * blocks (64K) being grouped together.
286 	 *
287 	 * The single most important aspect of this is the inode grouping
288 	 * because that is what allows 'find' and 'ls' and other filesystem
289 	 * topology operations to run fast.
290 	 */
291 #if 0
292 	if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
293 		bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
294 	else if (trans->tmp_bpref)
295 		bpref = trans->tmp_bpref;
296 	else if (trans->tmp_ip)
297 		bpref = trans->tmp_ip->chain->bref.data_off;
298 	else
299 #endif
300 	/*
301 	 * Heuristic tracking index.  We would like one for each distinct
302 	 * bref type if possible.  heur_freemap[] has room for two classes
303 	 * for each type.  At a minimum we have to break-up our heuristic
304 	 * by device block sizes.
305 	 */
306 	hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX;
307 	KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX);
308 	hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX;
309 	hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1;
310 	KKASSERT(hindex < HAMMER2_FREEMAP_HEUR);
311 
312 	iter.bpref = hmp->heur_freemap[hindex];
313 
314 	/*
315 	 * Make sure bpref is in-bounds.  It's ok if bpref covers a zone's
316 	 * reserved area, the try code will iterate past it.
317 	 */
318 	if (iter.bpref > hmp->voldata.volu_size)
319 		iter.bpref = hmp->voldata.volu_size - 1;
320 
321 	/*
322 	 * Iterate the freemap looking for free space before and after.
323 	 */
324 	parent = &hmp->fchain;
325 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
326 	error = EAGAIN;
327 	iter.bnext = iter.bpref;
328 	iter.loops = 0;
329 
330 	while (error == EAGAIN) {
331 		error = hammer2_freemap_try_alloc(trans, &parent, bref,
332 						  radix, &iter);
333 	}
334 	hmp->heur_freemap[hindex] = iter.bnext;
335 	hammer2_chain_unlock(parent);
336 
337 	atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
338 	if (trans->flags & HAMMER2_TRANS_ISFLUSH)
339 		--trans->sync_tid;
340 
341 	return (error);
342 }
343 
344 static int
345 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp,
346 			  hammer2_blockref_t *bref, int radix,
347 			  hammer2_fiterate_t *iter)
348 {
349 	hammer2_mount_t *hmp = (*parentp)->hmp;
350 	hammer2_off_t l0size;
351 	hammer2_off_t l1size;
352 	hammer2_off_t l1mask;
353 	hammer2_key_t key_dummy;
354 	hammer2_chain_t *chain;
355 	hammer2_off_t key;
356 	size_t bytes;
357 	uint16_t class;
358 	int error = 0;
359 	int cache_index = -1;
360 
361 
362 	/*
363 	 * Calculate the number of bytes being allocated, the number
364 	 * of contiguous bits of bitmap being allocated, and the bitmap
365 	 * mask.
366 	 *
367 	 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the
368 	 *	    mask calculation.
369 	 */
370 	bytes = (size_t)1 << radix;
371 	class = (bref->type << 8) | hammer2_devblkradix(radix);
372 
373 	/*
374 	 * Lookup the level1 freemap chain, creating and initializing one
375 	 * if necessary.  Intermediate levels will be created automatically
376 	 * when necessary by hammer2_chain_create().
377 	 */
378 	key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX);
379 	l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
380 	l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
381 	l1mask = l1size - 1;
382 
383 	chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask,
384 				     &cache_index,
385 				     HAMMER2_LOOKUP_FREEMAP |
386 				     HAMMER2_LOOKUP_ALWAYS |
387 				     HAMMER2_LOOKUP_MATCHIND);
388 
389 	if (chain == NULL) {
390 		/*
391 		 * Create the missing leaf, be sure to initialize
392 		 * the auxillary freemap tracking information in
393 		 * the bref.check.freemap structure.
394 		 */
395 #if 0
396 		kprintf("freemap create L1 @ %016jx bpref %016jx\n",
397 			key, iter->bpref);
398 #endif
399 		error = hammer2_chain_create(trans, parentp, &chain,
400 				     key, HAMMER2_FREEMAP_LEVEL1_RADIX,
401 				     HAMMER2_BREF_TYPE_FREEMAP_LEAF,
402 				     HAMMER2_FREEMAP_LEVELN_PSIZE);
403 		if (error == 0) {
404 			hammer2_chain_modify(trans, &chain, 0);
405 			bzero(&chain->data->bmdata[0],
406 			      HAMMER2_FREEMAP_LEVELN_PSIZE);
407 			chain->bref.check.freemap.bigmask = (uint32_t)-1;
408 			chain->bref.check.freemap.avail = l1size;
409 			/* bref.methods should already be inherited */
410 
411 			hammer2_freemap_init(trans, hmp, key, chain);
412 		}
413 	} else if ((chain->bref.check.freemap.bigmask & (1 << radix)) == 0) {
414 		/*
415 		 * Already flagged as not having enough space
416 		 */
417 		error = ENOSPC;
418 	} else {
419 		/*
420 		 * Modify existing chain to setup for adjustment.
421 		 */
422 		hammer2_chain_modify(trans, &chain, 0);
423 	}
424 
425 	/*
426 	 * Scan 2MB entries.
427 	 */
428 	if (error == 0) {
429 		hammer2_bmap_data_t *bmap;
430 		hammer2_key_t base_key;
431 		int count;
432 		int start;
433 		int n;
434 
435 		KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF);
436 		start = (int)((iter->bnext - key) >>
437 			      HAMMER2_FREEMAP_LEVEL0_RADIX);
438 		KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT);
439 		hammer2_chain_modify(trans, &chain, 0);
440 
441 		error = ENOSPC;
442 		for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
443 			if (start + count >= HAMMER2_FREEMAP_COUNT &&
444 			    start - count < 0) {
445 				break;
446 			}
447 			n = start + count;
448 			bmap = &chain->data->bmdata[n];
449 			if (n < HAMMER2_FREEMAP_COUNT && bmap->avail &&
450 			    (bmap->class == 0 || bmap->class == class)) {
451 				base_key = key + n * l0size;
452 				error = hammer2_bmap_alloc(trans, hmp, bmap,
453 							   class, n, radix,
454 							   &base_key);
455 				if (error != ENOSPC) {
456 					key = base_key;
457 					break;
458 				}
459 			}
460 			n = start - count;
461 			bmap = &chain->data->bmdata[n];
462 			if (n >= 0 && bmap->avail &&
463 			    (bmap->class == 0 || bmap->class == class)) {
464 				base_key = key + n * l0size;
465 				error = hammer2_bmap_alloc(trans, hmp, bmap,
466 							   class, n, radix,
467 							   &base_key);
468 				if (error != ENOSPC) {
469 					key = base_key;
470 					break;
471 				}
472 			}
473 		}
474 		if (error == ENOSPC)
475 			chain->bref.check.freemap.bigmask &= ~(1 << radix);
476 		/* XXX also scan down from original count */
477 	}
478 
479 	if (error == 0) {
480 		/*
481 		 * Assert validity.  Must be beyond the static allocator used
482 		 * by newfs_hammer2 (and thus also beyond the aux area),
483 		 * not go past the volume size, and must not be in the
484 		 * reserved segment area for a zone.
485 		 */
486 		KKASSERT(key >= hmp->voldata.allocator_beg &&
487 			 key + bytes <= hmp->voldata.volu_size);
488 		KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
489 		bref->data_off = key | radix;
490 
491 #if 0
492 		kprintf("alloc cp=%p %016jx %016jx using %016jx\n",
493 			chain,
494 			bref->key, bref->data_off, chain->bref.data_off);
495 #endif
496 	} else if (error == ENOSPC) {
497 		/*
498 		 * Return EAGAIN with next iteration in iter->bnext, or
499 		 * return ENOSPC if the allocation map has been exhausted.
500 		 */
501 		error = hammer2_freemap_iterate(trans, parentp, &chain, iter);
502 	}
503 
504 	/*
505 	 * Cleanup
506 	 */
507 	if (chain)
508 		hammer2_chain_unlock(chain);
509 	return (error);
510 }
511 
512 /*
513  * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep).
514  *
515  * If the linear iterator is mid-block we use it directly (the bitmap should
516  * already be marked allocated), otherwise we search for a block in the bitmap
517  * that fits the allocation request.
518  *
519  * A partial bitmap allocation sets the minimum bitmap granularity (16KB)
520  * to fully allocated and adjusts the linear allocator to allow the
521  * remaining space to be allocated.
522  */
523 static
524 int
525 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp,
526 		   hammer2_bmap_data_t *bmap,
527 		   uint16_t class, int n, int radix, hammer2_key_t *basep)
528 {
529 	hammer2_io_t *dio;
530 	size_t size;
531 	size_t bsize;
532 	int bmradix;
533 	uint32_t bmmask;
534 	int offset;
535 	int error;
536 	int i;
537 	int j;
538 
539 	/*
540 	 * Take into account 2-bits per block when calculating bmradix.
541 	 */
542 	size = (size_t)1 << radix;
543 
544 	if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) {
545 		bmradix = 2;
546 		bsize = HAMMER2_FREEMAP_BLOCK_SIZE;
547 		/* (16K) 2 bits per allocation block */
548 	} else {
549 		bmradix = 2 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
550 		bsize = size;
551 		/* (32K-256K) 4, 8, 16, 32 bits per allocation block */
552 	}
553 
554 	/*
555 	 * Use the linear iterator to pack small allocations, otherwise
556 	 * fall-back to finding a free 16KB chunk.  The linear iterator
557 	 * is only valid when *NOT* on a freemap chunking boundary (16KB).
558 	 * If it is the bitmap must be scanned.  It can become invalid
559 	 * once we pack to the boundary.  We adjust it after a bitmap
560 	 * allocation only for sub-16KB allocations (so the perfectly good
561 	 * previous value can still be used for fragments when 16KB+
562 	 * allocations are made).
563 	 *
564 	 * Beware of hardware artifacts when bmradix == 32 (intermediate
565 	 * result can wind up being '1' instead of '0' if hardware masks
566 	 * bit-count & 31).
567 	 *
568 	 * NOTE: j needs to be even in the j= calculation.  As an artifact
569 	 *	 of the /2 division, our bitmask has to clear bit 0.
570 	 *
571 	 * NOTE: TODO this can leave little unallocatable fragments lying
572 	 *	 around.
573 	 */
574 	if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <=
575 	    HAMMER2_FREEMAP_BLOCK_SIZE &&
576 	    (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) &&
577 	    bmap->linear < HAMMER2_SEGSIZE) {
578 		KKASSERT(bmap->linear >= 0 &&
579 			 bmap->linear + size <= HAMMER2_SEGSIZE &&
580 			 (bmap->linear & (HAMMER2_MIN_ALLOC - 1)) == 0);
581 		offset = bmap->linear;
582 		i = offset / (HAMMER2_SEGSIZE / 8);
583 		j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30;
584 		bmmask = (bmradix == 32) ?
585 			 0xFFFFFFFFU : (1 << bmradix) - 1;
586 		bmmask <<= j;
587 		bmap->linear = offset + size;
588 	} else {
589 		for (i = 0; i < 8; ++i) {
590 			bmmask = (bmradix == 32) ?
591 				 0xFFFFFFFFU : (1 << bmradix) - 1;
592 			for (j = 0; j < 32; j += bmradix) {
593 				if ((bmap->bitmap[i] & bmmask) == 0)
594 					goto success;
595 				bmmask <<= bmradix;
596 			}
597 		}
598 		/*fragments might remain*/
599 		/*KKASSERT(bmap->avail == 0);*/
600 		return (ENOSPC);
601 success:
602 		offset = i * (HAMMER2_SEGSIZE / 8) +
603 			 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2));
604 		if (size & HAMMER2_FREEMAP_BLOCK_MASK)
605 			bmap->linear = offset + size;
606 	}
607 
608 	KKASSERT(i >= 0 && i < 8);	/* 8 x 16 -> 128 x 16K -> 2MB */
609 
610 	/*
611 	 * Optimize the buffer cache to avoid unnecessary read-before-write
612 	 * operations.
613 	 *
614 	 * The device block size could be larger than the allocation size
615 	 * so the actual bitmap test is somewhat more involved.  We have
616 	 * to use a compatible buffer size for this operation.
617 	 */
618 	if ((bmap->bitmap[i] & bmmask) == 0 &&
619 	    hammer2_devblksize(size) != size) {
620 		size_t psize = hammer2_devblksize(size);
621 		hammer2_off_t pmask = (hammer2_off_t)psize - 1;
622 		int pbmradix = 2 << (hammer2_devblkradix(radix) -
623 				     HAMMER2_FREEMAP_BLOCK_RADIX);
624 		uint32_t pbmmask;
625 		int pradix = hammer2_getradix(psize);
626 
627 		pbmmask = (pbmradix == 32) ? 0xFFFFFFFFU : (1 << pbmradix) - 1;
628 		while ((pbmmask & bmmask) == 0)
629 			pbmmask <<= pbmradix;
630 
631 #if 0
632 		kprintf("%016jx mask %08x %08x %08x (%zd/%zd)\n",
633 			*basep + offset, bmap->bitmap[i],
634 			pbmmask, bmmask, size, psize);
635 #endif
636 
637 		if ((bmap->bitmap[i] & pbmmask) == 0) {
638 			error = hammer2_io_newq(hmp,
639 						(*basep + (offset & ~pmask)) |
640 						 pradix,
641 						psize, &dio);
642 			hammer2_io_bqrelse(&dio);
643 		}
644 	}
645 
646 #if 0
647 	/*
648 	 * When initializing a new inode segment also attempt to initialize
649 	 * an adjacent segment.  Be careful not to index beyond the array
650 	 * bounds.
651 	 *
652 	 * We do this to try to localize inode accesses to improve
653 	 * directory scan rates.  XXX doesn't improve scan rates.
654 	 */
655 	if (size == HAMMER2_INODE_BYTES) {
656 		if (n & 1) {
657 			if (bmap[-1].radix == 0 && bmap[-1].avail)
658 				bmap[-1].radix = radix;
659 		} else {
660 			if (bmap[1].radix == 0 && bmap[1].avail)
661 				bmap[1].radix = radix;
662 		}
663 	}
664 #endif
665 
666 	/*
667 	 * Adjust the linear iterator, set the radix if necessary (might as
668 	 * well just set it unconditionally), adjust *basep to return the
669 	 * allocated data offset.
670 	 */
671 	bmap->bitmap[i] |= bmmask;
672 	bmap->class = class;
673 	bmap->avail -= size;
674 	*basep += offset;
675 
676 	hammer2_voldata_lock(hmp);
677 	hmp->voldata.allocator_free -= size;  /* XXX */
678 	hammer2_voldata_unlock(hmp, 1);
679 
680 	return(0);
681 }
682 
683 static
684 void
685 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp,
686 		     hammer2_key_t key, hammer2_chain_t *chain)
687 {
688 	hammer2_off_t l1size;
689 	hammer2_off_t lokey;
690 	hammer2_off_t hikey;
691 	hammer2_bmap_data_t *bmap;
692 	int count;
693 
694 	l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
695 
696 	/*
697 	 * Calculate the portion of the 2GB map that should be initialized
698 	 * as free.  Portions below or after will be initialized as allocated.
699 	 * SEGMASK-align the areas so we don't have to worry about sub-scans
700 	 * or endianess when using memset.
701 	 *
702 	 * (1) Ensure that all statically allocated space from newfs_hammer2
703 	 *     is marked allocated.
704 	 *
705 	 * (2) Ensure that the reserved area is marked allocated (typically
706 	 *     the first 4MB of the 2GB area being represented).
707 	 *
708 	 * (3) Ensure that any trailing space at the end-of-volume is marked
709 	 *     allocated.
710 	 *
711 	 * WARNING! It is possible for lokey to be larger than hikey if the
712 	 *	    entire 2GB segment is within the static allocation.
713 	 */
714 	lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
715 		~HAMMER2_SEGMASK64;
716 
717 	if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
718 		  HAMMER2_ZONE_SEG64) {
719 		lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
720 			HAMMER2_ZONE_SEG64;
721 	}
722 
723 	hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
724 	if (hikey > hmp->voldata.volu_size) {
725 		hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64;
726 	}
727 
728 	chain->bref.check.freemap.avail =
729 		H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
730 	bmap = &chain->data->bmdata[0];
731 
732 	for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) {
733 		if (key < lokey || key >= hikey) {
734 			memset(bmap->bitmap, -1,
735 			       sizeof(bmap->bitmap));
736 			bmap->avail = 0;
737 			bmap->linear = HAMMER2_SEGSIZE;
738 			chain->bref.check.freemap.avail -=
739 				H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
740 		} else {
741 			bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
742 		}
743 		key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
744 		++bmap;
745 	}
746 }
747 
748 /*
749  * The current Level 1 freemap has been exhausted, iterate to the next
750  * one, return ENOSPC if no freemaps remain.
751  *
752  * XXX this should rotate back to the beginning to handle freed-up space
753  * XXX or use intermediate entries to locate free space. TODO
754  */
755 static int
756 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp,
757 			hammer2_chain_t **chainp, hammer2_fiterate_t *iter)
758 {
759 	hammer2_mount_t *hmp = (*parentp)->hmp;
760 
761 	iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1);
762 	iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
763 	if (iter->bnext >= hmp->voldata.volu_size) {
764 		iter->bnext = 0;
765 		if (++iter->loops == 2)
766 			return (ENOSPC);
767 	}
768 	return(EAGAIN);
769 }
770 
771 /*
772  * Free the specified blockref.  This code is only able to fully free
773  * blocks when (how) is non-zero, otherwise the block is marked for
774  * the bulk freeing pass to check.
775  *
776  * Normal use is to only mark inodes as possibly being free.  The underlying
777  * file blocks are not necessarily marked.  The bulk freescan can
778  * theoretically handle the case.
779  *
780  * XXX currently disabled when how == 0 (the normal real-time case).  At
781  * the moment we depend on the bulk freescan to actually free blocks.  It
782  * will still call this routine with a non-zero how to stage possible frees
783  * and to do the actual free.
784  *
785  * WARNING! When called from a flush we have to use the 'live' sync_tid
786  *	    and not the flush sync_tid.  The live sync_tid is the flush
787  *	    sync_tid + 1.  That is, freemap allocations which occur during
788  *	    a flush are not part of the flush.  Crash-recovery will restore
789  *	    any lost allocations.
790  */
791 void
792 hammer2_freemap_adjust(hammer2_trans_t *trans, hammer2_mount_t *hmp,
793 		       hammer2_blockref_t *bref, int how)
794 {
795 	hammer2_off_t data_off = bref->data_off;
796 	hammer2_chain_t *chain;
797 	hammer2_chain_t *parent;
798 	hammer2_bmap_data_t *bmap;
799 	hammer2_key_t key;
800 	hammer2_key_t key_dummy;
801 	hammer2_off_t l0size;
802 	hammer2_off_t l1size;
803 	hammer2_off_t l1mask;
804 	uint32_t *bitmap;
805 	const uint32_t bmmask00 = 0;
806 	uint32_t bmmask01;
807 	uint32_t bmmask10;
808 	uint32_t bmmask11;
809 	size_t bytes;
810 	uint16_t class;
811 	int radix;
812 	int start;
813 	int count;
814 	int modified = 0;
815 	int cache_index = -1;
816 	int error;
817 
818 	radix = (int)data_off & HAMMER2_OFF_MASK_RADIX;
819 	data_off &= ~HAMMER2_OFF_MASK_RADIX;
820 	KKASSERT(radix <= HAMMER2_MAX_RADIX);
821 
822 	bytes = (size_t)1 << radix;
823 	class = (bref->type << 8) | hammer2_devblkradix(radix);
824 
825 	/*
826 	 * We can't adjust thre freemap for data allocations made by
827 	 * newfs_hammer2.
828 	 */
829 	if (data_off < hmp->voldata.allocator_beg)
830 		return;
831 
832 	KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG);
833 	KKASSERT((trans->flags & HAMMER2_TRANS_ISALLOCATING) == 0);
834 	atomic_set_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
835 	if (trans->flags & HAMMER2_TRANS_ISFLUSH)
836 		++trans->sync_tid;
837 
838 	/*
839 	 * Lookup the level1 freemap chain.  The chain must exist.
840 	 */
841 	key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX);
842 	l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
843 	l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX);
844 	l1mask = l1size - 1;
845 
846 	parent = &hmp->fchain;
847 	hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
848 
849 	chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask,
850 				     &cache_index,
851 				     HAMMER2_LOOKUP_FREEMAP |
852 				     HAMMER2_LOOKUP_ALWAYS |
853 				     HAMMER2_LOOKUP_MATCHIND);
854 
855 	/*
856 	 * Stop early if we are trying to free something but no leaf exists.
857 	 */
858 	if (chain == NULL && how != HAMMER2_FREEMAP_DORECOVER) {
859 		kprintf("hammer2_freemap_adjust: %016jx: no chain\n",
860 			(intmax_t)bref->data_off);
861 		goto done;
862 	}
863 
864 	/*
865 	 * Create any missing leaf(s) if we are doing a recovery (marking
866 	 * the block(s) as being allocated instead of being freed).  Be sure
867 	 * to initialize the auxillary freemap tracking info in the
868 	 * bref.check.freemap structure.
869 	 */
870 	if (chain == NULL && how == HAMMER2_FREEMAP_DORECOVER) {
871 		error = hammer2_chain_create(trans, &parent, &chain,
872 				     key, HAMMER2_FREEMAP_LEVEL1_RADIX,
873 				     HAMMER2_BREF_TYPE_FREEMAP_LEAF,
874 				     HAMMER2_FREEMAP_LEVELN_PSIZE);
875 		kprintf("fixup create chain %p %016jx:%d\n", chain, chain->bref.key, chain->bref.keybits);
876 
877 		if (error == 0) {
878 			hammer2_chain_modify(trans, &chain, 0);
879 			bzero(&chain->data->bmdata[0],
880 			      HAMMER2_FREEMAP_LEVELN_PSIZE);
881 			chain->bref.check.freemap.bigmask = (uint32_t)-1;
882 			chain->bref.check.freemap.avail = l1size;
883 			/* bref.methods should already be inherited */
884 
885 			hammer2_freemap_init(trans, hmp, key, chain);
886 		}
887 		/* XXX handle error */
888 	}
889 
890 	/*
891 	 * Calculate the bitmask (runs in 2-bit pairs).
892 	 */
893 	start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2;
894 	bmmask01 = 1 << start;
895 	bmmask10 = 2 << start;
896 	bmmask11 = 3 << start;
897 
898 	/*
899 	 * Fixup the bitmap.  Partial blocks cannot be fully freed unless
900 	 * a bulk scan is able to roll them up.
901 	 */
902 	if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) {
903 		count = 1;
904 		if (how == HAMMER2_FREEMAP_DOREALFREE)
905 			how = HAMMER2_FREEMAP_DOMAYFREE;
906 	} else {
907 		count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX);
908 	}
909 
910 	/*
911 	 * [re]load the bmap and bitmap pointers.  Each bmap entry covers
912 	 * a 2MB swath.  The bmap itself (LEVEL1) covers 2GB.
913 	 */
914 again:
915 	bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) &
916 				    (HAMMER2_FREEMAP_COUNT - 1)];
917 	bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7];
918 
919 
920 	while (count) {
921 		KKASSERT(bmmask11);
922 		if (how == HAMMER2_FREEMAP_DORECOVER) {
923 			/*
924 			 * Recovery request, mark as allocated.
925 			 */
926 			if ((*bitmap & bmmask11) != bmmask11) {
927 				if (modified == 0) {
928 					hammer2_chain_modify(trans, &chain, 0);
929 					modified = 1;
930 					goto again;
931 				}
932 				if ((*bitmap & bmmask11) == bmmask00)
933 					bmap->avail -= 1 << radix;
934 				if (bmap->class == 0)
935 					bmap->class = class;
936 				*bitmap |= bmmask11;
937 				kprintf("hammer2_freemap_recover: fixup "
938 					"type=%02x block=%016jx/%zd\n",
939 					bref->type, data_off, bytes);
940 			} else {
941 				/*
942 				kprintf("hammer2_freemap_recover:  good "
943 					"type=%02x block=%016jx/%zd\n",
944 					bref->type, data_off, bytes);
945 				*/
946 			}
947 		} else if ((*bitmap & bmmask11) == bmmask11) {
948 			/*
949 			 * Mayfree/Realfree request and bitmap is currently
950 			 * marked as being fully allocated.
951 			 */
952 			if (!modified) {
953 				hammer2_chain_modify(trans, &chain, 0);
954 				modified = 1;
955 				goto again;
956 			}
957 			if (how == HAMMER2_FREEMAP_DOREALFREE)
958 				*bitmap &= ~bmmask11;
959 			else
960 				*bitmap = (*bitmap & ~bmmask11) | bmmask10;
961 		} else if ((*bitmap & bmmask11) == bmmask10) {
962 			/*
963 			 * Mayfree/Realfree request and bitmap is currently
964 			 * marked as being possibly freeable.
965 			 */
966 			if (how == HAMMER2_FREEMAP_DOREALFREE) {
967 				if (!modified) {
968 					hammer2_chain_modify(trans, &chain, 0);
969 					modified = 1;
970 					goto again;
971 				}
972 				*bitmap &= ~bmmask11;
973 			}
974 		} else {
975 			/*
976 			 * 01 - Not implemented, currently illegal state
977 			 * 00 - Not allocated at all, illegal free.
978 			 */
979 			panic("hammer2_freemap_adjust: "
980 			      "Illegal state %08x(%08x)",
981 			      *bitmap, *bitmap & bmmask11);
982 		}
983 		--count;
984 		bmmask01 <<= 2;
985 		bmmask10 <<= 2;
986 		bmmask11 <<= 2;
987 	}
988 	if (how == HAMMER2_FREEMAP_DOREALFREE && modified) {
989 		bmap->avail += 1 << radix;
990 		KKASSERT(bmap->avail <= HAMMER2_SEGSIZE);
991 		if (bmap->avail == HAMMER2_SEGSIZE &&
992 		    bmap->bitmap[0] == 0 &&
993 		    bmap->bitmap[1] == 0 &&
994 		    bmap->bitmap[2] == 0 &&
995 		    bmap->bitmap[3] == 0 &&
996 		    bmap->bitmap[4] == 0 &&
997 		    bmap->bitmap[5] == 0 &&
998 		    bmap->bitmap[6] == 0 &&
999 		    bmap->bitmap[7] == 0) {
1000 			key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX);
1001 			kprintf("Freeseg %016jx\n", (intmax_t)key);
1002 			bmap->class = 0;
1003 		}
1004 	}
1005 
1006 	/*
1007 	 * chain->bref.check.freemap.bigmask (XXX)
1008 	 *
1009 	 * Setting bigmask is a hint to the allocation code that there might
1010 	 * be something allocatable.  We also set this in recovery... it
1011 	 * doesn't hurt and we might want to use the hint for other validation
1012 	 * operations later on.
1013 	 */
1014 	if (modified)
1015 		chain->bref.check.freemap.bigmask |= 1 << radix;
1016 
1017 	hammer2_chain_unlock(chain);
1018 done:
1019 	hammer2_chain_unlock(parent);
1020 	atomic_clear_int(&trans->flags, HAMMER2_TRANS_ISALLOCATING);
1021 	if (trans->flags & HAMMER2_TRANS_ISFLUSH)
1022 		--trans->sync_tid;
1023 }
1024