xref: /freebsd/usr.sbin/makefs/ffs/ffs_alloc.c (revision aa0a1e58)
1 /*	$NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $	*/
2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3 
4 /*
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Marshall
9  * Kirk McKusick and Network Associates Laboratories, the Security
10  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12  * research program
13  *
14  * Copyright (c) 1982, 1986, 1989, 1993
15  *	The Regents of the University of California.  All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)ffs_alloc.c	8.19 (Berkeley) 7/13/95
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include <sys/param.h>
48 #include <sys/time.h>
49 
50 #include <errno.h>
51 
52 #include "makefs.h"
53 
54 #include <ufs/ufs/dinode.h>
55 #include <ufs/ffs/fs.h>
56 
57 #include "ffs/ufs_bswap.h"
58 #include "ffs/buf.h"
59 #include "ffs/ufs_inode.h"
60 #include "ffs/ffs_extern.h"
61 
62 static int scanc(u_int, const u_char *, const u_char *, int);
63 
64 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
65 static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
66 static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
67 		     daddr_t (*)(struct inode *, int, daddr_t, int));
68 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
69 
70 /*
71  * Allocate a block in the file system.
72  *
73  * The size of the requested block is given, which must be some
74  * multiple of fs_fsize and <= fs_bsize.
75  * A preference may be optionally specified. If a preference is given
76  * the following hierarchy is used to allocate a block:
77  *   1) allocate the requested block.
78  *   2) allocate a rotationally optimal block in the same cylinder.
79  *   3) allocate a block in the same cylinder group.
80  *   4) quadradically rehash into other cylinder groups, until an
81  *      available block is located.
82  * If no block preference is given the following hierarchy is used
83  * to allocate a block:
84  *   1) allocate a block in the cylinder group that contains the
85  *      inode for the file.
86  *   2) quadradically rehash into other cylinder groups, until an
87  *      available block is located.
88  */
89 int
90 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
91     daddr_t *bnp)
92 {
93 	struct fs *fs = ip->i_fs;
94 	daddr_t bno;
95 	int cg;
96 
97 	*bnp = 0;
98 	if (size > fs->fs_bsize || fragoff(fs, size) != 0) {
99 		errx(1, "ffs_alloc: bad size: bsize %d size %d",
100 		    fs->fs_bsize, size);
101 	}
102 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
103 		goto nospace;
104 	if (bpref >= fs->fs_size)
105 		bpref = 0;
106 	if (bpref == 0)
107 		cg = ino_to_cg(fs, ip->i_number);
108 	else
109 		cg = dtog(fs, bpref);
110 	bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
111 	if (bno > 0) {
112 		if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
113 			ip->i_ffs1_blocks += size / DEV_BSIZE;
114 		else
115 			ip->i_ffs2_blocks += size / DEV_BSIZE;
116 		*bnp = bno;
117 		return (0);
118 	}
119 nospace:
120 	return (ENOSPC);
121 }
122 
123 /*
124  * Select the desired position for the next block in a file.  The file is
125  * logically divided into sections. The first section is composed of the
126  * direct blocks. Each additional section contains fs_maxbpg blocks.
127  *
128  * If no blocks have been allocated in the first section, the policy is to
129  * request a block in the same cylinder group as the inode that describes
130  * the file. If no blocks have been allocated in any other section, the
131  * policy is to place the section in a cylinder group with a greater than
132  * average number of free blocks.  An appropriate cylinder group is found
133  * by using a rotor that sweeps the cylinder groups. When a new group of
134  * blocks is needed, the sweep begins in the cylinder group following the
135  * cylinder group from which the previous allocation was made. The sweep
136  * continues until a cylinder group with greater than the average number
137  * of free blocks is found. If the allocation is for the first block in an
138  * indirect block, the information on the previous allocation is unavailable;
139  * here a best guess is made based upon the logical block number being
140  * allocated.
141  *
142  * If a section is already partially allocated, the policy is to
143  * contiguously allocate fs_maxcontig blocks.  The end of one of these
144  * contiguous blocks and the beginning of the next is physically separated
145  * so that the disk head will be in transit between them for at least
146  * fs_rotdelay milliseconds.  This is to allow time for the processor to
147  * schedule another I/O transfer.
148  */
149 /* XXX ondisk32 */
150 daddr_t
151 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
152 {
153 	struct fs *fs;
154 	int cg;
155 	int avgbfree, startcg;
156 
157 	fs = ip->i_fs;
158 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
159 		if (lbn < NDADDR + NINDIR(fs)) {
160 			cg = ino_to_cg(fs, ip->i_number);
161 			return (fs->fs_fpg * cg + fs->fs_frag);
162 		}
163 		/*
164 		 * Find a cylinder with greater than average number of
165 		 * unused data blocks.
166 		 */
167 		if (indx == 0 || bap[indx - 1] == 0)
168 			startcg =
169 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
170 		else
171 			startcg = dtog(fs,
172 				ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
173 		startcg %= fs->fs_ncg;
174 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
175 		for (cg = startcg; cg < fs->fs_ncg; cg++)
176 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
177 				return (fs->fs_fpg * cg + fs->fs_frag);
178 		for (cg = 0; cg <= startcg; cg++)
179 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
180 				return (fs->fs_fpg * cg + fs->fs_frag);
181 		return (0);
182 	}
183 	/*
184 	 * We just always try to lay things out contiguously.
185 	 */
186 	return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
187 }
188 
189 daddr_t
190 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
191 {
192 	struct fs *fs;
193 	int cg;
194 	int avgbfree, startcg;
195 
196 	fs = ip->i_fs;
197 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
198 		if (lbn < NDADDR + NINDIR(fs)) {
199 			cg = ino_to_cg(fs, ip->i_number);
200 			return (fs->fs_fpg * cg + fs->fs_frag);
201 		}
202 		/*
203 		 * Find a cylinder with greater than average number of
204 		 * unused data blocks.
205 		 */
206 		if (indx == 0 || bap[indx - 1] == 0)
207 			startcg =
208 			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
209 		else
210 			startcg = dtog(fs,
211 				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
212 		startcg %= fs->fs_ncg;
213 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
214 		for (cg = startcg; cg < fs->fs_ncg; cg++)
215 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
216 				return (fs->fs_fpg * cg + fs->fs_frag);
217 			}
218 		for (cg = 0; cg < startcg; cg++)
219 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
220 				return (fs->fs_fpg * cg + fs->fs_frag);
221 			}
222 		return (0);
223 	}
224 	/*
225 	 * We just always try to lay things out contiguously.
226 	 */
227 	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
228 }
229 
230 /*
231  * Implement the cylinder overflow algorithm.
232  *
233  * The policy implemented by this algorithm is:
234  *   1) allocate the block in its requested cylinder group.
235  *   2) quadradically rehash on the cylinder group number.
236  *   3) brute force search for a free block.
237  *
238  * `size':	size for data blocks, mode for inodes
239  */
240 /*VARARGS5*/
241 static daddr_t
242 ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
243     daddr_t (*allocator)(struct inode *, int, daddr_t, int))
244 {
245 	struct fs *fs;
246 	daddr_t result;
247 	int i, icg = cg;
248 
249 	fs = ip->i_fs;
250 	/*
251 	 * 1: preferred cylinder group
252 	 */
253 	result = (*allocator)(ip, cg, pref, size);
254 	if (result)
255 		return (result);
256 	/*
257 	 * 2: quadratic rehash
258 	 */
259 	for (i = 1; i < fs->fs_ncg; i *= 2) {
260 		cg += i;
261 		if (cg >= fs->fs_ncg)
262 			cg -= fs->fs_ncg;
263 		result = (*allocator)(ip, cg, 0, size);
264 		if (result)
265 			return (result);
266 	}
267 	/*
268 	 * 3: brute force search
269 	 * Note that we start at i == 2, since 0 was checked initially,
270 	 * and 1 is always checked in the quadratic rehash.
271 	 */
272 	cg = (icg + 2) % fs->fs_ncg;
273 	for (i = 2; i < fs->fs_ncg; i++) {
274 		result = (*allocator)(ip, cg, 0, size);
275 		if (result)
276 			return (result);
277 		cg++;
278 		if (cg == fs->fs_ncg)
279 			cg = 0;
280 	}
281 	return (0);
282 }
283 
284 /*
285  * Determine whether a block can be allocated.
286  *
287  * Check to see if a block of the appropriate size is available,
288  * and if it is, allocate it.
289  */
290 static daddr_t
291 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
292 {
293 	struct cg *cgp;
294 	struct buf *bp;
295 	daddr_t bno, blkno;
296 	int error, frags, allocsiz, i;
297 	struct fs *fs = ip->i_fs;
298 	const int needswap = UFS_FSNEEDSWAP(fs);
299 
300 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
301 		return (0);
302 	error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
303 		(int)fs->fs_cgsize, &bp);
304 	if (error) {
305 		brelse(bp);
306 		return (0);
307 	}
308 	cgp = (struct cg *)bp->b_data;
309 	if (!cg_chkmagic_swap(cgp, needswap) ||
310 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
311 		brelse(bp);
312 		return (0);
313 	}
314 	if (size == fs->fs_bsize) {
315 		bno = ffs_alloccgblk(ip, bp, bpref);
316 		bdwrite(bp);
317 		return (bno);
318 	}
319 	/*
320 	 * check to see if any fragments are already available
321 	 * allocsiz is the size which will be allocated, hacking
322 	 * it down to a smaller size if necessary
323 	 */
324 	frags = numfrags(fs, size);
325 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
326 		if (cgp->cg_frsum[allocsiz] != 0)
327 			break;
328 	if (allocsiz == fs->fs_frag) {
329 		/*
330 		 * no fragments were available, so a block will be
331 		 * allocated, and hacked up
332 		 */
333 		if (cgp->cg_cs.cs_nbfree == 0) {
334 			brelse(bp);
335 			return (0);
336 		}
337 		bno = ffs_alloccgblk(ip, bp, bpref);
338 		bpref = dtogd(fs, bno);
339 		for (i = frags; i < fs->fs_frag; i++)
340 			setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
341 		i = fs->fs_frag - frags;
342 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
343 		fs->fs_cstotal.cs_nffree += i;
344 		fs->fs_cs(fs, cg).cs_nffree += i;
345 		fs->fs_fmod = 1;
346 		ufs_add32(cgp->cg_frsum[i], 1, needswap);
347 		bdwrite(bp);
348 		return (bno);
349 	}
350 	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
351 	for (i = 0; i < frags; i++)
352 		clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
353 	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
354 	fs->fs_cstotal.cs_nffree -= frags;
355 	fs->fs_cs(fs, cg).cs_nffree -= frags;
356 	fs->fs_fmod = 1;
357 	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
358 	if (frags != allocsiz)
359 		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
360 	blkno = cg * fs->fs_fpg + bno;
361 	bdwrite(bp);
362 	return blkno;
363 }
364 
365 /*
366  * Allocate a block in a cylinder group.
367  *
368  * This algorithm implements the following policy:
369  *   1) allocate the requested block.
370  *   2) allocate a rotationally optimal block in the same cylinder.
371  *   3) allocate the next available block on the block rotor for the
372  *      specified cylinder group.
373  * Note that this routine only allocates fs_bsize blocks; these
374  * blocks may be fragmented by the routine that allocates them.
375  */
376 static daddr_t
377 ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
378 {
379 	struct cg *cgp;
380 	daddr_t blkno;
381 	int32_t bno;
382 	struct fs *fs = ip->i_fs;
383 	const int needswap = UFS_FSNEEDSWAP(fs);
384 	u_int8_t *blksfree_swap;
385 
386 	cgp = (struct cg *)bp->b_data;
387 	blksfree_swap = cg_blksfree_swap(cgp, needswap);
388 	if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
389 		bpref = ufs_rw32(cgp->cg_rotor, needswap);
390 	} else {
391 		bpref = blknum(fs, bpref);
392 		bno = dtogd(fs, bpref);
393 		/*
394 		 * if the requested block is available, use it
395 		 */
396 		if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno)))
397 			goto gotit;
398 	}
399 	/*
400 	 * Take the next available one in this cylinder group.
401 	 */
402 	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
403 	if (bno < 0)
404 		return (0);
405 	cgp->cg_rotor = ufs_rw32(bno, needswap);
406 gotit:
407 	blkno = fragstoblks(fs, bno);
408 	ffs_clrblock(fs, blksfree_swap, (long)blkno);
409 	ffs_clusteracct(fs, cgp, blkno, -1);
410 	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
411 	fs->fs_cstotal.cs_nbfree--;
412 	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
413 	fs->fs_fmod = 1;
414 	blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
415 	return (blkno);
416 }
417 
418 /*
419  * Free a block or fragment.
420  *
421  * The specified block or fragment is placed back in the
422  * free map. If a fragment is deallocated, a possible
423  * block reassembly is checked.
424  */
425 void
426 ffs_blkfree(struct inode *ip, daddr_t bno, long size)
427 {
428 	struct cg *cgp;
429 	struct buf *bp;
430 	int32_t fragno, cgbno;
431 	int i, error, cg, blk, frags, bbase;
432 	struct fs *fs = ip->i_fs;
433 	const int needswap = UFS_FSNEEDSWAP(fs);
434 
435 	if (size > fs->fs_bsize || fragoff(fs, size) != 0 ||
436 	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
437 		errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
438 		    (long long)bno, fs->fs_bsize, size);
439 	}
440 	cg = dtog(fs, bno);
441 	if (bno >= fs->fs_size) {
442 		warnx("bad block %lld, ino %llu", (long long)bno,
443 		    (unsigned long long)ip->i_number);
444 		return;
445 	}
446 	error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
447 		(int)fs->fs_cgsize, &bp);
448 	if (error) {
449 		brelse(bp);
450 		return;
451 	}
452 	cgp = (struct cg *)bp->b_data;
453 	if (!cg_chkmagic_swap(cgp, needswap)) {
454 		brelse(bp);
455 		return;
456 	}
457 	cgbno = dtogd(fs, bno);
458 	if (size == fs->fs_bsize) {
459 		fragno = fragstoblks(fs, cgbno);
460 		if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
461 			errx(1, "blkfree: freeing free block %lld",
462 			    (long long)bno);
463 		}
464 		ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
465 		ffs_clusteracct(fs, cgp, fragno, 1);
466 		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
467 		fs->fs_cstotal.cs_nbfree++;
468 		fs->fs_cs(fs, cg).cs_nbfree++;
469 	} else {
470 		bbase = cgbno - fragnum(fs, cgbno);
471 		/*
472 		 * decrement the counts associated with the old frags
473 		 */
474 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
475 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
476 		/*
477 		 * deallocate the fragment
478 		 */
479 		frags = numfrags(fs, size);
480 		for (i = 0; i < frags; i++) {
481 			if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
482 				errx(1, "blkfree: freeing free frag: block %lld",
483 				    (long long)(cgbno + i));
484 			}
485 			setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
486 		}
487 		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
488 		fs->fs_cstotal.cs_nffree += i;
489 		fs->fs_cs(fs, cg).cs_nffree += i;
490 		/*
491 		 * add back in counts associated with the new frags
492 		 */
493 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
494 		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
495 		/*
496 		 * if a complete block has been reassembled, account for it
497 		 */
498 		fragno = fragstoblks(fs, bbase);
499 		if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
500 			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
501 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
502 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
503 			ffs_clusteracct(fs, cgp, fragno, 1);
504 			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
505 			fs->fs_cstotal.cs_nbfree++;
506 			fs->fs_cs(fs, cg).cs_nbfree++;
507 		}
508 	}
509 	fs->fs_fmod = 1;
510 	bdwrite(bp);
511 }
512 
513 
514 static int
515 scanc(u_int size, const u_char *cp, const u_char table[], int mask)
516 {
517 	const u_char *end = &cp[size];
518 
519 	while (cp < end && (table[*cp] & mask) == 0)
520 		cp++;
521 	return (end - cp);
522 }
523 
524 /*
525  * Find a block of the specified size in the specified cylinder group.
526  *
527  * It is a panic if a request is made to find a block if none are
528  * available.
529  */
530 static int32_t
531 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
532 {
533 	int32_t bno;
534 	int start, len, loc, i;
535 	int blk, field, subfield, pos;
536 	int ostart, olen;
537 	const int needswap = UFS_FSNEEDSWAP(fs);
538 
539 	/*
540 	 * find the fragment by searching through the free block
541 	 * map for an appropriate bit pattern
542 	 */
543 	if (bpref)
544 		start = dtogd(fs, bpref) / NBBY;
545 	else
546 		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
547 	len = howmany(fs->fs_fpg, NBBY) - start;
548 	ostart = start;
549 	olen = len;
550 	loc = scanc((u_int)len,
551 		(const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
552 		(const u_char *)fragtbl[fs->fs_frag],
553 		(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
554 	if (loc == 0) {
555 		len = start + 1;
556 		start = 0;
557 		loc = scanc((u_int)len,
558 			(const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
559 			(const u_char *)fragtbl[fs->fs_frag],
560 			(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
561 		if (loc == 0) {
562 			errx(1,
563     "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
564 				ostart, olen,
565 				ufs_rw32(cgp->cg_freeoff, needswap),
566 				(long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
567 			/* NOTREACHED */
568 		}
569 	}
570 	bno = (start + len - loc) * NBBY;
571 	cgp->cg_frotor = ufs_rw32(bno, needswap);
572 	/*
573 	 * found the byte in the map
574 	 * sift through the bits to find the selected frag
575 	 */
576 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
577 		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
578 		blk <<= 1;
579 		field = around[allocsiz];
580 		subfield = inside[allocsiz];
581 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
582 			if ((blk & field) == subfield)
583 				return (bno + pos);
584 			field <<= 1;
585 			subfield <<= 1;
586 		}
587 	}
588 	errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
589 	return (-1);
590 }
591 
592 /*
593  * Update the cluster map because of an allocation or free.
594  *
595  * Cnt == 1 means free; cnt == -1 means allocating.
596  */
597 void
598 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
599 {
600 	int32_t *sump;
601 	int32_t *lp;
602 	u_char *freemapp, *mapp;
603 	int i, start, end, forw, back, map, bit;
604 	const int needswap = UFS_FSNEEDSWAP(fs);
605 
606 	if (fs->fs_contigsumsize <= 0)
607 		return;
608 	freemapp = cg_clustersfree_swap(cgp, needswap);
609 	sump = cg_clustersum_swap(cgp, needswap);
610 	/*
611 	 * Allocate or clear the actual block.
612 	 */
613 	if (cnt > 0)
614 		setbit(freemapp, blkno);
615 	else
616 		clrbit(freemapp, blkno);
617 	/*
618 	 * Find the size of the cluster going forward.
619 	 */
620 	start = blkno + 1;
621 	end = start + fs->fs_contigsumsize;
622 	if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
623 		end = ufs_rw32(cgp->cg_nclusterblks, needswap);
624 	mapp = &freemapp[start / NBBY];
625 	map = *mapp++;
626 	bit = 1 << (start % NBBY);
627 	for (i = start; i < end; i++) {
628 		if ((map & bit) == 0)
629 			break;
630 		if ((i & (NBBY - 1)) != (NBBY - 1)) {
631 			bit <<= 1;
632 		} else {
633 			map = *mapp++;
634 			bit = 1;
635 		}
636 	}
637 	forw = i - start;
638 	/*
639 	 * Find the size of the cluster going backward.
640 	 */
641 	start = blkno - 1;
642 	end = start - fs->fs_contigsumsize;
643 	if (end < 0)
644 		end = -1;
645 	mapp = &freemapp[start / NBBY];
646 	map = *mapp--;
647 	bit = 1 << (start % NBBY);
648 	for (i = start; i > end; i--) {
649 		if ((map & bit) == 0)
650 			break;
651 		if ((i & (NBBY - 1)) != 0) {
652 			bit >>= 1;
653 		} else {
654 			map = *mapp--;
655 			bit = 1 << (NBBY - 1);
656 		}
657 	}
658 	back = start - i;
659 	/*
660 	 * Account for old cluster and the possibly new forward and
661 	 * back clusters.
662 	 */
663 	i = back + forw + 1;
664 	if (i > fs->fs_contigsumsize)
665 		i = fs->fs_contigsumsize;
666 	ufs_add32(sump[i], cnt, needswap);
667 	if (back > 0)
668 		ufs_add32(sump[back], -cnt, needswap);
669 	if (forw > 0)
670 		ufs_add32(sump[forw], -cnt, needswap);
671 
672 	/*
673 	 * Update cluster summary information.
674 	 */
675 	lp = &sump[fs->fs_contigsumsize];
676 	for (i = fs->fs_contigsumsize; i > 0; i--)
677 		if (ufs_rw32(*lp--, needswap) > 0)
678 			break;
679 	fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
680 }
681