1 /* $NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $ */
2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3
4 /*-
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 * Copyright (c) 2002 Networks Associates Technology, Inc.
8 * All rights reserved.
9 *
10 * This software was developed for the FreeBSD Project by Marshall
11 * Kirk McKusick and Network Associates Laboratories, the Security
12 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
13 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
14 * research program
15 *
16 * Copyright (c) 1982, 1986, 1989, 1993
17 * The Regents of the University of California. All rights reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 * 3. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
44 * $FreeBSD: head/usr.sbin/makefs/ffs/ffs_alloc.c 336736 2018-07-26 13:33:10Z emaste $
45 */
46
47 #include <sys/param.h>
48 #include <sys/time.h>
49
50 #include <errno.h>
51 #include <stdint.h>
52
53 #include "makefs.h"
54
55 #include <vfs/ufs/dinode.h>
56 #include <vfs/ufs/fs.h>
57
58 #include "ffs/ufs_bswap.h"
59 #include "ffs/buf.h"
60 #include "ffs/ufs_inode.h"
61 #include "ffs/ffs_extern.h"
62
63 #include "ffs.h" /* XXX swildner: for compat defines */
64
65 static int scanc(u_int, const u_char *, const u_char *, int);
66
67 static makefs_daddr_t ffs_alloccg(struct inode *, int, makefs_daddr_t, int);
68 static makefs_daddr_t ffs_alloccgblk(struct inode *, struct m_buf *, makefs_daddr_t);
69 static makefs_daddr_t ffs_hashalloc(struct inode *, u_int, makefs_daddr_t, int,
70 makefs_daddr_t (*)(struct inode *, int, makefs_daddr_t, int));
71 static int32_t ffs_mapsearch(struct fs *, struct cg *, makefs_daddr_t, int);
72
73 /*
74 * Allocate a block in the file system.
75 *
76 * The size of the requested block is given, which must be some
77 * multiple of fs_fsize and <= fs_bsize.
78 * A preference may be optionally specified. If a preference is given
79 * the following hierarchy is used to allocate a block:
80 * 1) allocate the requested block.
81 * 2) allocate a rotationally optimal block in the same cylinder.
82 * 3) allocate a block in the same cylinder group.
83 * 4) quadratically rehash into other cylinder groups, until an
84 * available block is located.
85 * If no block preference is given the following hierarchy is used
86 * to allocate a block:
87 * 1) allocate a block in the cylinder group that contains the
88 * inode for the file.
89 * 2) quadratically rehash into other cylinder groups, until an
90 * available block is located.
91 */
92 int
ffs_alloc(struct inode * ip,makefs_daddr_t lbn __unused,makefs_daddr_t bpref,int size,makefs_daddr_t * bnp)93 ffs_alloc(struct inode *ip, makefs_daddr_t lbn __unused, makefs_daddr_t bpref, int size,
94 makefs_daddr_t *bnp)
95 {
96 struct fs *fs = ip->i_fs;
97 makefs_daddr_t bno;
98 int cg;
99
100 *bnp = 0;
101 if (size > fs->fs_bsize || fragoff(fs, size) != 0) {
102 errx(1, "ffs_alloc: bad size: bsize %d size %d",
103 fs->fs_bsize, size);
104 }
105 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
106 goto nospace;
107 if (bpref >= fs->fs_size)
108 bpref = 0;
109 if (bpref == 0)
110 cg = ino_to_cg(fs, ip->i_number);
111 else
112 cg = dtog(fs, bpref);
113 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
114 if (bno > 0) {
115 if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
116 ip->i_ffs1_blocks += size / DEV_BSIZE;
117 #ifndef __DragonFly__ /* XXX UFS2 */
118 else
119 ip->i_ffs2_blocks += size / DEV_BSIZE;
120 #endif
121 *bnp = bno;
122 return (0);
123 }
124 nospace:
125 return (ENOSPC);
126 }
127
128 /*
129 * Select the desired position for the next block in a file. The file is
130 * logically divided into sections. The first section is composed of the
131 * direct blocks. Each additional section contains fs_maxbpg blocks.
132 *
133 * If no blocks have been allocated in the first section, the policy is to
134 * request a block in the same cylinder group as the inode that describes
135 * the file. If no blocks have been allocated in any other section, the
136 * policy is to place the section in a cylinder group with a greater than
137 * average number of free blocks. An appropriate cylinder group is found
138 * by using a rotor that sweeps the cylinder groups. When a new group of
139 * blocks is needed, the sweep begins in the cylinder group following the
140 * cylinder group from which the previous allocation was made. The sweep
141 * continues until a cylinder group with greater than the average number
142 * of free blocks is found. If the allocation is for the first block in an
143 * indirect block, the information on the previous allocation is unavailable;
144 * here a best guess is made based upon the logical block number being
145 * allocated.
146 *
147 * If a section is already partially allocated, the policy is to
148 * contiguously allocate fs_maxcontig blocks. The end of one of these
149 * contiguous blocks and the beginning of the next is physically separated
150 * so that the disk head will be in transit between them for at least
151 * fs_rotdelay milliseconds. This is to allow time for the processor to
152 * schedule another I/O transfer.
153 */
154 /* XXX ondisk32 */
155 makefs_daddr_t
ffs_blkpref_ufs1(struct inode * ip,makefs_daddr_t lbn,int indx,int32_t * bap)156 ffs_blkpref_ufs1(struct inode *ip, makefs_daddr_t lbn, int indx, int32_t *bap)
157 {
158 struct fs *fs;
159 u_int cg, startcg;
160 int avgbfree;
161
162 fs = ip->i_fs;
163 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
164 if (lbn < UFS_NDADDR + NINDIR(fs)) {
165 cg = ino_to_cg(fs, ip->i_number);
166 return (fs->fs_fpg * cg + fs->fs_frag);
167 }
168 /*
169 * Find a cylinder with greater than average number of
170 * unused data blocks.
171 */
172 if (indx == 0 || bap[indx - 1] == 0)
173 startcg =
174 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
175 else
176 startcg = dtog(fs,
177 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
178 startcg %= fs->fs_ncg;
179 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
180 for (cg = startcg; cg < fs->fs_ncg; cg++)
181 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
182 return (fs->fs_fpg * cg + fs->fs_frag);
183 for (cg = 0; cg <= startcg; cg++)
184 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
185 return (fs->fs_fpg * cg + fs->fs_frag);
186 return (0);
187 }
188 /*
189 * We just always try to lay things out contiguously.
190 */
191 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
192 }
193
194 #ifndef __DragonFly__ /* XXX UFS2 */
195 daddr_t
ffs_blkpref_ufs2(struct inode * ip,daddr_t lbn,int indx,int64_t * bap)196 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
197 {
198 struct fs *fs;
199 u_int cg, startcg;
200 int avgbfree;
201
202 fs = ip->i_fs;
203 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
204 if (lbn < UFS_NDADDR + NINDIR(fs)) {
205 cg = ino_to_cg(fs, ip->i_number);
206 return (fs->fs_fpg * cg + fs->fs_frag);
207 }
208 /*
209 * Find a cylinder with greater than average number of
210 * unused data blocks.
211 */
212 if (indx == 0 || bap[indx - 1] == 0)
213 startcg =
214 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
215 else
216 startcg = dtog(fs,
217 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
218 startcg %= fs->fs_ncg;
219 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
220 for (cg = startcg; cg < fs->fs_ncg; cg++)
221 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
222 return (fs->fs_fpg * cg + fs->fs_frag);
223 }
224 for (cg = 0; cg < startcg; cg++)
225 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
226 return (fs->fs_fpg * cg + fs->fs_frag);
227 }
228 return (0);
229 }
230 /*
231 * We just always try to lay things out contiguously.
232 */
233 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
234 }
235 #endif
236
237 /*
238 * Implement the cylinder overflow algorithm.
239 *
240 * The policy implemented by this algorithm is:
241 * 1) allocate the block in its requested cylinder group.
242 * 2) quadratically rehash on the cylinder group number.
243 * 3) brute force search for a free block.
244 *
245 * `size': size for data blocks, mode for inodes
246 */
247 /*VARARGS5*/
248 static makefs_daddr_t
ffs_hashalloc(struct inode * ip,u_int cg,makefs_daddr_t pref,int size,makefs_daddr_t (* allocator)(struct inode *,int,makefs_daddr_t,int))249 ffs_hashalloc(struct inode *ip, u_int cg, makefs_daddr_t pref, int size,
250 makefs_daddr_t (*allocator)(struct inode *, int, makefs_daddr_t, int))
251 {
252 struct fs *fs;
253 makefs_daddr_t result;
254 u_int i, icg = cg;
255
256 fs = ip->i_fs;
257 /*
258 * 1: preferred cylinder group
259 */
260 result = (*allocator)(ip, cg, pref, size);
261 if (result)
262 return (result);
263 /*
264 * 2: quadratic rehash
265 */
266 for (i = 1; i < fs->fs_ncg; i *= 2) {
267 cg += i;
268 if (cg >= fs->fs_ncg)
269 cg -= fs->fs_ncg;
270 result = (*allocator)(ip, cg, 0, size);
271 if (result)
272 return (result);
273 }
274 /*
275 * 3: brute force search
276 * Note that we start at i == 2, since 0 was checked initially,
277 * and 1 is always checked in the quadratic rehash.
278 */
279 cg = (icg + 2) % fs->fs_ncg;
280 for (i = 2; i < fs->fs_ncg; i++) {
281 result = (*allocator)(ip, cg, 0, size);
282 if (result)
283 return (result);
284 cg++;
285 if (cg == fs->fs_ncg)
286 cg = 0;
287 }
288 return (0);
289 }
290
291 /*
292 * Determine whether a block can be allocated.
293 *
294 * Check to see if a block of the appropriate size is available,
295 * and if it is, allocate it.
296 */
297 static makefs_daddr_t
ffs_alloccg(struct inode * ip,int cg,makefs_daddr_t bpref,int size)298 ffs_alloccg(struct inode *ip, int cg, makefs_daddr_t bpref, int size)
299 {
300 struct cg *cgp;
301 struct m_buf *bp;
302 makefs_daddr_t bno, blkno;
303 int error, frags, allocsiz, i;
304 struct fs *fs = ip->i_fs;
305 const int needswap = UFS_FSNEEDSWAP(fs);
306
307 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
308 return (0);
309 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
310 NULL, &bp);
311 if (error) {
312 brelse(bp);
313 return (0);
314 }
315 cgp = (struct cg *)bp->b_data;
316 if (!cg_chkmagic_swap(cgp, needswap) ||
317 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
318 brelse(bp);
319 return (0);
320 }
321 if (size == fs->fs_bsize) {
322 bno = ffs_alloccgblk(ip, bp, bpref);
323 bdwrite(bp);
324 return (bno);
325 }
326 /*
327 * check to see if any fragments are already available
328 * allocsiz is the size which will be allocated, hacking
329 * it down to a smaller size if necessary
330 */
331 frags = numfrags(fs, size);
332 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
333 if (cgp->cg_frsum[allocsiz] != 0)
334 break;
335 if (allocsiz == fs->fs_frag) {
336 /*
337 * no fragments were available, so a block will be
338 * allocated, and hacked up
339 */
340 if (cgp->cg_cs.cs_nbfree == 0) {
341 brelse(bp);
342 return (0);
343 }
344 bno = ffs_alloccgblk(ip, bp, bpref);
345 bpref = dtogd(fs, bno);
346 for (i = frags; i < fs->fs_frag; i++)
347 setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
348 i = fs->fs_frag - frags;
349 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
350 fs->fs_cstotal.cs_nffree += i;
351 fs->fs_cs(fs, cg).cs_nffree += i;
352 fs->fs_fmod = 1;
353 ufs_add32(cgp->cg_frsum[i], 1, needswap);
354 bdwrite(bp);
355 return (bno);
356 }
357 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
358 for (i = 0; i < frags; i++)
359 clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
360 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
361 fs->fs_cstotal.cs_nffree -= frags;
362 fs->fs_cs(fs, cg).cs_nffree -= frags;
363 fs->fs_fmod = 1;
364 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
365 if (frags != allocsiz)
366 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
367 blkno = cg * fs->fs_fpg + bno;
368 bdwrite(bp);
369 return blkno;
370 }
371
372 /*
373 * Allocate a block in a cylinder group.
374 *
375 * This algorithm implements the following policy:
376 * 1) allocate the requested block.
377 * 2) allocate a rotationally optimal block in the same cylinder.
378 * 3) allocate the next available block on the block rotor for the
379 * specified cylinder group.
380 * Note that this routine only allocates fs_bsize blocks; these
381 * blocks may be fragmented by the routine that allocates them.
382 */
383 static makefs_daddr_t
ffs_alloccgblk(struct inode * ip,struct m_buf * bp,makefs_daddr_t bpref)384 ffs_alloccgblk(struct inode *ip, struct m_buf *bp, makefs_daddr_t bpref)
385 {
386 struct cg *cgp;
387 makefs_daddr_t blkno;
388 int32_t bno;
389 struct fs *fs = ip->i_fs;
390 const int needswap = UFS_FSNEEDSWAP(fs);
391 u_int8_t *blksfree_swap;
392
393 cgp = (struct cg *)bp->b_data;
394 blksfree_swap = cg_blksfree_swap(cgp, needswap);
395 if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
396 bpref = ufs_rw32(cgp->cg_rotor, needswap);
397 } else {
398 bpref = blknum(fs, bpref);
399 bno = dtogd(fs, bpref);
400 /*
401 * if the requested block is available, use it
402 */
403 if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno)))
404 goto gotit;
405 }
406 /*
407 * Take the next available one in this cylinder group.
408 */
409 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
410 if (bno < 0)
411 return (0);
412 cgp->cg_rotor = ufs_rw32(bno, needswap);
413 gotit:
414 blkno = fragstoblks(fs, bno);
415 ffs_clrblock(fs, blksfree_swap, (long)blkno);
416 ffs_clusteracct(fs, cgp, blkno, -1);
417 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
418 fs->fs_cstotal.cs_nbfree--;
419 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
420 #ifdef __DragonFly__ /* XXX swildner: our fsck checks these */
421 cg_blks(fs, cgp, cbtocylno(fs, bno))[cbtorpos(fs, bno)]--;
422 cg_blktot(cgp)[cbtocylno(fs, bno)]--;
423 #endif
424 fs->fs_fmod = 1;
425 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
426 return (blkno);
427 }
428
429 /*
430 * Free a block or fragment.
431 *
432 * The specified block or fragment is placed back in the
433 * free map. If a fragment is deallocated, a possible
434 * block reassembly is checked.
435 */
436 void
ffs_blkfree(struct inode * ip,makefs_daddr_t bno,long size)437 ffs_blkfree(struct inode *ip, makefs_daddr_t bno, long size)
438 {
439 struct cg *cgp;
440 struct m_buf *bp;
441 int32_t fragno, cgbno;
442 int i, error, cg, blk, frags, bbase;
443 struct fs *fs = ip->i_fs;
444 const int needswap = UFS_FSNEEDSWAP(fs);
445
446 if (size > fs->fs_bsize || fragoff(fs, size) != 0 ||
447 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
448 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
449 (long long)bno, fs->fs_bsize, size);
450 }
451 cg = dtog(fs, bno);
452 if (bno >= fs->fs_size) {
453 warnx("bad block %lld, ino %ju", (long long)bno,
454 (uintmax_t)ip->i_number);
455 return;
456 }
457 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
458 NULL, &bp);
459 if (error) {
460 brelse(bp);
461 return;
462 }
463 cgp = (struct cg *)bp->b_data;
464 if (!cg_chkmagic_swap(cgp, needswap)) {
465 brelse(bp);
466 return;
467 }
468 cgbno = dtogd(fs, bno);
469 if (size == fs->fs_bsize) {
470 fragno = fragstoblks(fs, cgbno);
471 if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
472 errx(1, "blkfree: freeing free block %lld",
473 (long long)bno);
474 }
475 ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
476 ffs_clusteracct(fs, cgp, fragno, 1);
477 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
478 fs->fs_cstotal.cs_nbfree++;
479 fs->fs_cs(fs, cg).cs_nbfree++;
480 #ifdef __DragonFly__ /* XXX swildner: our fsck checks these */
481 cg_blks(fs, cgp, cbtocylno(fs, bno))[cbtorpos(fs, bno)]++;
482 cg_blktot(cgp)[cbtocylno(fs, bno)]++;
483 #endif
484 } else {
485 bbase = cgbno - fragnum(fs, cgbno);
486 /*
487 * decrement the counts associated with the old frags
488 */
489 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
490 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
491 /*
492 * deallocate the fragment
493 */
494 frags = numfrags(fs, size);
495 for (i = 0; i < frags; i++) {
496 if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
497 errx(1, "blkfree: freeing free frag: block %lld",
498 (long long)(cgbno + i));
499 }
500 setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
501 }
502 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
503 fs->fs_cstotal.cs_nffree += i;
504 fs->fs_cs(fs, cg).cs_nffree += i;
505 /*
506 * add back in counts associated with the new frags
507 */
508 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
509 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
510 /*
511 * if a complete block has been reassembled, account for it
512 */
513 fragno = fragstoblks(fs, bbase);
514 if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
515 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
516 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
517 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
518 ffs_clusteracct(fs, cgp, fragno, 1);
519 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
520 fs->fs_cstotal.cs_nbfree++;
521 fs->fs_cs(fs, cg).cs_nbfree++;
522 #ifdef __DragonFly__ /* XXX swildner: our fsck checks these */
523 cg_blks(fs, cgp,
524 cbtocylno(fs, bbase))[cbtorpos(fs, bbase)]++;
525 cg_blktot(cgp)[cbtocylno(fs, bbase)]++;
526 #endif
527 }
528 }
529 fs->fs_fmod = 1;
530 bdwrite(bp);
531 }
532
533
534 static int
scanc(u_int size,const u_char * cp,const u_char table[],int mask)535 scanc(u_int size, const u_char *cp, const u_char table[], int mask)
536 {
537 const u_char *end = &cp[size];
538
539 while (cp < end && (table[*cp] & mask) == 0)
540 cp++;
541 return (end - cp);
542 }
543
544 /*
545 * Find a block of the specified size in the specified cylinder group.
546 *
547 * It is a panic if a request is made to find a block if none are
548 * available.
549 */
550 static int32_t
ffs_mapsearch(struct fs * fs,struct cg * cgp,makefs_daddr_t bpref,int allocsiz)551 ffs_mapsearch(struct fs *fs, struct cg *cgp, makefs_daddr_t bpref, int allocsiz)
552 {
553 int32_t bno;
554 int start, len, loc, i;
555 int blk, field, subfield, pos;
556 int ostart, olen;
557 const int needswap = UFS_FSNEEDSWAP(fs);
558
559 /*
560 * find the fragment by searching through the free block
561 * map for an appropriate bit pattern
562 */
563 if (bpref)
564 start = dtogd(fs, bpref) / NBBY;
565 else
566 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
567 len = howmany(fs->fs_fpg, NBBY) - start;
568 ostart = start;
569 olen = len;
570 loc = scanc((u_int)len,
571 (const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
572 (const u_char *)fragtbl[fs->fs_frag],
573 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
574 if (loc == 0) {
575 len = start + 1;
576 start = 0;
577 loc = scanc((u_int)len,
578 (const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
579 (const u_char *)fragtbl[fs->fs_frag],
580 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
581 if (loc == 0) {
582 errx(1,
583 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
584 ostart, olen,
585 ufs_rw32(cgp->cg_freeoff, needswap),
586 (long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
587 /* NOTREACHED */
588 }
589 }
590 bno = (start + len - loc) * NBBY;
591 cgp->cg_frotor = ufs_rw32(bno, needswap);
592 /*
593 * found the byte in the map
594 * sift through the bits to find the selected frag
595 */
596 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
597 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
598 blk <<= 1;
599 field = around[allocsiz];
600 subfield = inside[allocsiz];
601 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
602 if ((blk & field) == subfield)
603 return (bno + pos);
604 field <<= 1;
605 subfield <<= 1;
606 }
607 }
608 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
609 return (-1);
610 }
611
612 /*
613 * Update the cluster map because of an allocation or free.
614 *
615 * Cnt == 1 means free; cnt == -1 means allocating.
616 */
617 void
ffs_clusteracct(struct fs * fs,struct cg * cgp,int32_t blkno,int cnt)618 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
619 {
620 int32_t *sump;
621 int32_t *lp;
622 u_char *freemapp, *mapp;
623 int i, start, end, forw, back, map, bit;
624 const int needswap = UFS_FSNEEDSWAP(fs);
625
626 if (fs->fs_contigsumsize <= 0)
627 return;
628 freemapp = cg_clustersfree_swap(cgp, needswap);
629 sump = cg_clustersum_swap(cgp, needswap);
630 /*
631 * Allocate or clear the actual block.
632 */
633 if (cnt > 0)
634 setbit(freemapp, blkno);
635 else
636 clrbit(freemapp, blkno);
637 /*
638 * Find the size of the cluster going forward.
639 */
640 start = blkno + 1;
641 end = start + fs->fs_contigsumsize;
642 if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
643 end = ufs_rw32(cgp->cg_nclusterblks, needswap);
644 mapp = &freemapp[start / NBBY];
645 map = *mapp++;
646 bit = 1 << (start % NBBY);
647 for (i = start; i < end; i++) {
648 if ((map & bit) == 0)
649 break;
650 if ((i & (NBBY - 1)) != (NBBY - 1)) {
651 bit <<= 1;
652 } else {
653 map = *mapp++;
654 bit = 1;
655 }
656 }
657 forw = i - start;
658 /*
659 * Find the size of the cluster going backward.
660 */
661 start = blkno - 1;
662 end = start - fs->fs_contigsumsize;
663 if (end < 0)
664 end = -1;
665 mapp = &freemapp[start / NBBY];
666 map = *mapp--;
667 bit = 1 << (start % NBBY);
668 for (i = start; i > end; i--) {
669 if ((map & bit) == 0)
670 break;
671 if ((i & (NBBY - 1)) != 0) {
672 bit >>= 1;
673 } else {
674 map = *mapp--;
675 bit = 1 << (NBBY - 1);
676 }
677 }
678 back = start - i;
679 /*
680 * Account for old cluster and the possibly new forward and
681 * back clusters.
682 */
683 i = back + forw + 1;
684 if (i > fs->fs_contigsumsize)
685 i = fs->fs_contigsumsize;
686 ufs_add32(sump[i], cnt, needswap);
687 if (back > 0)
688 ufs_add32(sump[back], -cnt, needswap);
689 if (forw > 0)
690 ufs_add32(sump[forw], -cnt, needswap);
691
692 /*
693 * Update cluster summary information.
694 */
695 lp = &sump[fs->fs_contigsumsize];
696 for (i = fs->fs_contigsumsize; i > 0; i--)
697 if (ufs_rw32(*lp--, needswap) > 0)
698 break;
699 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
700 }
701