1 /* $NetBSD: ffs_alloc.c,v 1.9 2002/02/06 15:36:30 lukem Exp $ */ 2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95 37 */ 38 39 #include <sys/cdefs.h> 40 #if defined(__RCSID) && !defined(__lint) 41 __RCSID("$NetBSD: ffs_alloc.c,v 1.9 2002/02/06 15:36:30 lukem Exp $"); 42 #endif /* !__lint */ 43 44 #include <sys/param.h> 45 #include <sys/time.h> 46 47 #include <errno.h> 48 49 #include "makefs.h" 50 51 #include <ufs/ufs/dinode.h> 52 #include <ufs/ufs/ufs_bswap.h> 53 #include <ufs/ffs/fs.h> 54 55 #include "ffs/buf.h" 56 #include "ffs/ufs_inode.h" 57 #include "ffs/ffs_extern.h" 58 59 60 static int scanc(u_int, const u_char *, const u_char *, int); 61 62 static ufs_daddr_t ffs_alloccg(struct inode *, int, ufs_daddr_t, int); 63 static ufs_daddr_t ffs_alloccgblk(struct inode *, struct buf *, ufs_daddr_t); 64 static u_long ffs_hashalloc(struct inode *, int, long, int, 65 ufs_daddr_t (*)(struct inode *, int, ufs_daddr_t, int)); 66 static ufs_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs_daddr_t, int); 67 68 /* in ffs_tables.c */ 69 extern const int inside[], around[]; 70 extern const u_char * const fragtbl[]; 71 72 /* 73 * Allocate a block in the file system. 74 * 75 * The size of the requested block is given, which must be some 76 * multiple of fs_fsize and <= fs_bsize. 77 * A preference may be optionally specified. If a preference is given 78 * the following hierarchy is used to allocate a block: 79 * 1) allocate the requested block. 80 * 2) allocate a rotationally optimal block in the same cylinder. 81 * 3) allocate a block in the same cylinder group. 82 * 4) quadradically rehash into other cylinder groups, until an 83 * available block is located. 84 * If no block preference is given the following hierarchy is used 85 * to allocate a block: 86 * 1) allocate a block in the cylinder group that contains the 87 * inode for the file. 88 * 2) quadradically rehash into other cylinder groups, until an 89 * available block is located. 90 */ 91 int 92 ffs_alloc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t bpref, int size, 93 ufs_daddr_t *bnp) 94 { 95 struct fs *fs = ip->i_fs; 96 ufs_daddr_t bno; 97 int cg; 98 99 *bnp = 0; 100 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 101 errx(1, "ffs_alloc: bad size: bsize %d size %d", 102 fs->fs_bsize, size); 103 } 104 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 105 goto nospace; 106 if (bpref >= fs->fs_size) 107 bpref = 0; 108 if (bpref == 0) 109 cg = ino_to_cg(fs, ip->i_number); 110 else 111 cg = dtog(fs, bpref); 112 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 113 ffs_alloccg); 114 if (bno > 0) { 115 ip->i_ffs_blocks += size / DEV_BSIZE; 116 *bnp = bno; 117 return (0); 118 } 119 nospace: 120 return (ENOSPC); 121 } 122 123 /* 124 * Select the desired position for the next block in a file. The file is 125 * logically divided into sections. The first section is composed of the 126 * direct blocks. Each additional section contains fs_maxbpg blocks. 127 * 128 * If no blocks have been allocated in the first section, the policy is to 129 * request a block in the same cylinder group as the inode that describes 130 * the file. If no blocks have been allocated in any other section, the 131 * policy is to place the section in a cylinder group with a greater than 132 * average number of free blocks. An appropriate cylinder group is found 133 * by using a rotor that sweeps the cylinder groups. When a new group of 134 * blocks is needed, the sweep begins in the cylinder group following the 135 * cylinder group from which the previous allocation was made. The sweep 136 * continues until a cylinder group with greater than the average number 137 * of free blocks is found. If the allocation is for the first block in an 138 * indirect block, the information on the previous allocation is unavailable; 139 * here a best guess is made based upon the logical block number being 140 * allocated. 141 * 142 * If a section is already partially allocated, the policy is to 143 * contiguously allocate fs_maxcontig blocks. The end of one of these 144 * contiguous blocks and the beginning of the next is physically separated 145 * so that the disk head will be in transit between them for at least 146 * fs_rotdelay milliseconds. This is to allow time for the processor to 147 * schedule another I/O transfer. 148 */ 149 ufs_daddr_t 150 ffs_blkpref(struct inode *ip, ufs_daddr_t lbn, int indx, ufs_daddr_t *bap) 151 { 152 struct fs *fs; 153 int cg; 154 int avgbfree, startcg; 155 ufs_daddr_t nextblk; 156 157 fs = ip->i_fs; 158 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 159 if (lbn < NDADDR + NINDIR(fs)) { 160 cg = ino_to_cg(fs, ip->i_number); 161 return (fs->fs_fpg * cg + fs->fs_frag); 162 } 163 /* 164 * Find a cylinder with greater than average number of 165 * unused data blocks. 166 */ 167 if (indx == 0 || bap[indx - 1] == 0) 168 startcg = 169 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 170 else 171 startcg = dtog(fs, 172 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 173 startcg %= fs->fs_ncg; 174 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 175 for (cg = startcg; cg < fs->fs_ncg; cg++) 176 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 177 fs->fs_cgrotor = cg; 178 return (fs->fs_fpg * cg + fs->fs_frag); 179 } 180 for (cg = 0; cg <= startcg; cg++) 181 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 182 fs->fs_cgrotor = cg; 183 return (fs->fs_fpg * cg + fs->fs_frag); 184 } 185 return (0); 186 } 187 /* 188 * One or more previous blocks have been laid out. If less 189 * than fs_maxcontig previous blocks are contiguous, the 190 * next block is requested contiguously, otherwise it is 191 * requested rotationally delayed by fs_rotdelay milliseconds. 192 */ 193 nextblk = ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 194 if (indx < fs->fs_maxcontig || 195 ufs_rw32(bap[indx - fs->fs_maxcontig], UFS_FSNEEDSWAP(fs)) + 196 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 197 return (nextblk); 198 if (fs->fs_rotdelay != 0) 199 /* 200 * Here we convert ms of delay to frags as: 201 * (frags) = (ms) * (rev/sec) * (sect/rev) / 202 * ((sect/frag) * (ms/sec)) 203 * then round up to the next block. 204 */ 205 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 206 (NSPF(fs) * 1000), fs->fs_frag); 207 return (nextblk); 208 } 209 210 /* 211 * Implement the cylinder overflow algorithm. 212 * 213 * The policy implemented by this algorithm is: 214 * 1) allocate the block in its requested cylinder group. 215 * 2) quadradically rehash on the cylinder group number. 216 * 3) brute force search for a free block. 217 * 218 * `size': size for data blocks, mode for inodes 219 */ 220 /*VARARGS5*/ 221 static u_long 222 ffs_hashalloc(struct inode *ip, int cg, long pref, int size, 223 ufs_daddr_t (*allocator)(struct inode *, int, ufs_daddr_t, int)) 224 { 225 struct fs *fs; 226 long result; 227 int i, icg = cg; 228 229 fs = ip->i_fs; 230 /* 231 * 1: preferred cylinder group 232 */ 233 result = (*allocator)(ip, cg, pref, size); 234 if (result) 235 return (result); 236 /* 237 * 2: quadratic rehash 238 */ 239 for (i = 1; i < fs->fs_ncg; i *= 2) { 240 cg += i; 241 if (cg >= fs->fs_ncg) 242 cg -= fs->fs_ncg; 243 result = (*allocator)(ip, cg, 0, size); 244 if (result) 245 return (result); 246 } 247 /* 248 * 3: brute force search 249 * Note that we start at i == 2, since 0 was checked initially, 250 * and 1 is always checked in the quadratic rehash. 251 */ 252 cg = (icg + 2) % fs->fs_ncg; 253 for (i = 2; i < fs->fs_ncg; i++) { 254 result = (*allocator)(ip, cg, 0, size); 255 if (result) 256 return (result); 257 cg++; 258 if (cg == fs->fs_ncg) 259 cg = 0; 260 } 261 return (0); 262 } 263 264 /* 265 * Determine whether a block can be allocated. 266 * 267 * Check to see if a block of the appropriate size is available, 268 * and if it is, allocate it. 269 */ 270 static ufs_daddr_t 271 ffs_alloccg(struct inode *ip, int cg, ufs_daddr_t bpref, int size) 272 { 273 struct cg *cgp; 274 struct buf *bp; 275 ufs_daddr_t bno, blkno; 276 int error, frags, allocsiz, i; 277 struct fs *fs = ip->i_fs; 278 const int needswap = UFS_FSNEEDSWAP(fs); 279 280 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 281 return (0); 282 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)), 283 (int)fs->fs_cgsize, &bp); 284 if (error) { 285 brelse(bp); 286 return (0); 287 } 288 cgp = (struct cg *)bp->b_data; 289 if (!cg_chkmagic(cgp, needswap) || 290 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 291 brelse(bp); 292 return (0); 293 } 294 if (size == fs->fs_bsize) { 295 bno = ffs_alloccgblk(ip, bp, bpref); 296 bdwrite(bp); 297 return (bno); 298 } 299 /* 300 * check to see if any fragments are already available 301 * allocsiz is the size which will be allocated, hacking 302 * it down to a smaller size if necessary 303 */ 304 frags = numfrags(fs, size); 305 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 306 if (cgp->cg_frsum[allocsiz] != 0) 307 break; 308 if (allocsiz == fs->fs_frag) { 309 /* 310 * no fragments were available, so a block will be 311 * allocated, and hacked up 312 */ 313 if (cgp->cg_cs.cs_nbfree == 0) { 314 brelse(bp); 315 return (0); 316 } 317 bno = ffs_alloccgblk(ip, bp, bpref); 318 bpref = dtogd(fs, bno); 319 for (i = frags; i < fs->fs_frag; i++) 320 setbit(cg_blksfree(cgp, needswap), bpref + i); 321 i = fs->fs_frag - frags; 322 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 323 fs->fs_cstotal.cs_nffree += i; 324 fs->fs_cs(fs, cg).cs_nffree += i; 325 fs->fs_fmod = 1; 326 ufs_add32(cgp->cg_frsum[i], 1, needswap); 327 bdwrite(bp); 328 return (bno); 329 } 330 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 331 for (i = 0; i < frags; i++) 332 clrbit(cg_blksfree(cgp, needswap), bno + i); 333 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); 334 fs->fs_cstotal.cs_nffree -= frags; 335 fs->fs_cs(fs, cg).cs_nffree -= frags; 336 fs->fs_fmod = 1; 337 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); 338 if (frags != allocsiz) 339 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); 340 blkno = cg * fs->fs_fpg + bno; 341 bdwrite(bp); 342 return blkno; 343 } 344 345 /* 346 * Allocate a block in a cylinder group. 347 * 348 * This algorithm implements the following policy: 349 * 1) allocate the requested block. 350 * 2) allocate a rotationally optimal block in the same cylinder. 351 * 3) allocate the next available block on the block rotor for the 352 * specified cylinder group. 353 * Note that this routine only allocates fs_bsize blocks; these 354 * blocks may be fragmented by the routine that allocates them. 355 */ 356 static ufs_daddr_t 357 ffs_alloccgblk(struct inode *ip, struct buf *bp, ufs_daddr_t bpref) 358 { 359 struct cg *cgp; 360 ufs_daddr_t bno, blkno; 361 int cylno, pos, delta; 362 short *cylbp; 363 int i; 364 struct fs *fs = ip->i_fs; 365 const int needswap = UFS_FSNEEDSWAP(fs); 366 367 cgp = (struct cg *)bp->b_data; 368 if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { 369 bpref = ufs_rw32(cgp->cg_rotor, needswap); 370 goto norot; 371 } 372 bpref = blknum(fs, bpref); 373 bpref = dtogd(fs, bpref); 374 /* 375 * if the requested block is available, use it 376 */ 377 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), 378 fragstoblks(fs, bpref))) { 379 bno = bpref; 380 goto gotit; 381 } 382 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 383 /* 384 * Block layout information is not available. 385 * Leaving bpref unchanged means we take the 386 * next available free block following the one 387 * we just allocated. Hopefully this will at 388 * least hit a track cache on drives of unknown 389 * geometry (e.g. SCSI). 390 */ 391 goto norot; 392 } 393 /* 394 * check for a block available on the same cylinder 395 */ 396 cylno = cbtocylno(fs, bpref); 397 if (cg_blktot(cgp, needswap)[cylno] == 0) 398 goto norot; 399 /* 400 * check the summary information to see if a block is 401 * available in the requested cylinder starting at the 402 * requested rotational position and proceeding around. 403 */ 404 cylbp = cg_blks(fs, cgp, cylno, needswap); 405 pos = cbtorpos(fs, bpref); 406 for (i = pos; i < fs->fs_nrpos; i++) 407 if (ufs_rw16(cylbp[i], needswap) > 0) 408 break; 409 if (i == fs->fs_nrpos) 410 for (i = 0; i < pos; i++) 411 if (ufs_rw16(cylbp[i], needswap) > 0) 412 break; 413 if (ufs_rw16(cylbp[i], needswap) > 0) { 414 /* 415 * found a rotational position, now find the actual 416 * block. A panic if none is actually there. 417 */ 418 pos = cylno % fs->fs_cpc; 419 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 420 if (fs_postbl(fs, pos)[i] == -1) { 421 errx(1, 422 "ffs_alloccgblk: cyl groups corrupted: pos %d i %d", 423 pos, i); 424 } 425 for (i = fs_postbl(fs, pos)[i];; ) { 426 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), bno + i)) { 427 bno = blkstofrags(fs, (bno + i)); 428 goto gotit; 429 } 430 delta = fs_rotbl(fs)[i]; 431 if (delta <= 0 || 432 delta + i > fragstoblks(fs, fs->fs_fpg)) 433 break; 434 i += delta; 435 } 436 errx(1, "ffs_alloccgblk: can't find blk in cyl: pos %d i %d", 437 pos, i); 438 } 439 norot: 440 /* 441 * no blocks in the requested cylinder, so take next 442 * available one in this cylinder group. 443 */ 444 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 445 if (bno < 0) 446 return (0); 447 cgp->cg_rotor = ufs_rw32(bno, needswap); 448 gotit: 449 blkno = fragstoblks(fs, bno); 450 ffs_clrblock(fs, cg_blksfree(cgp, needswap), (long)blkno); 451 ffs_clusteracct(fs, cgp, blkno, -1); 452 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); 453 fs->fs_cstotal.cs_nbfree--; 454 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; 455 cylno = cbtocylno(fs, bno); 456 ufs_add16(cg_blks(fs, cgp, cylno, needswap)[cbtorpos(fs, bno)], -1, 457 needswap); 458 ufs_add32(cg_blktot(cgp, needswap)[cylno], -1, needswap); 459 fs->fs_fmod = 1; 460 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; 461 return (blkno); 462 } 463 464 /* 465 * Free a block or fragment. 466 * 467 * The specified block or fragment is placed back in the 468 * free map. If a fragment is deallocated, a possible 469 * block reassembly is checked. 470 */ 471 void 472 ffs_blkfree(struct inode *ip, ufs_daddr_t bno, long size) 473 { 474 struct cg *cgp; 475 struct buf *bp; 476 ufs_daddr_t blkno; 477 int i, error, cg, blk, frags, bbase; 478 struct fs *fs = ip->i_fs; 479 const int needswap = UFS_FSNEEDSWAP(fs); 480 481 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 482 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 483 errx(1, "blkfree: bad size: bno %u bsize %d size %ld", 484 bno, fs->fs_bsize, size); 485 } 486 cg = dtog(fs, bno); 487 if ((u_int)bno >= fs->fs_size) { 488 warnx("bad block %d, ino %d\n", bno, ip->i_number); 489 return; 490 } 491 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)), 492 (int)fs->fs_cgsize, &bp); 493 if (error) { 494 brelse(bp); 495 return; 496 } 497 cgp = (struct cg *)bp->b_data; 498 if (!cg_chkmagic(cgp, needswap)) { 499 brelse(bp); 500 return; 501 } 502 bno = dtogd(fs, bno); 503 if (size == fs->fs_bsize) { 504 blkno = fragstoblks(fs, bno); 505 if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), blkno)) { 506 errx(1, "blkfree: freeing free block %d", bno); 507 } 508 ffs_setblock(fs, cg_blksfree(cgp, needswap), blkno); 509 ffs_clusteracct(fs, cgp, blkno, 1); 510 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 511 fs->fs_cstotal.cs_nbfree++; 512 fs->fs_cs(fs, cg).cs_nbfree++; 513 i = cbtocylno(fs, bno); 514 ufs_add16(cg_blks(fs, cgp, i, needswap)[cbtorpos(fs, bno)], 1, 515 needswap); 516 ufs_add32(cg_blktot(cgp, needswap)[i], 1, needswap); 517 } else { 518 bbase = bno - fragnum(fs, bno); 519 /* 520 * decrement the counts associated with the old frags 521 */ 522 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 523 ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap); 524 /* 525 * deallocate the fragment 526 */ 527 frags = numfrags(fs, size); 528 for (i = 0; i < frags; i++) { 529 if (isset(cg_blksfree(cgp, needswap), bno + i)) { 530 errx(1, "blkfree: freeing free frag: block %d", 531 bno + i); 532 } 533 setbit(cg_blksfree(cgp, needswap), bno + i); 534 } 535 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 536 fs->fs_cstotal.cs_nffree += i; 537 fs->fs_cs(fs, cg).cs_nffree += i; 538 /* 539 * add back in counts associated with the new frags 540 */ 541 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 542 ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap); 543 /* 544 * if a complete block has been reassembled, account for it 545 */ 546 blkno = fragstoblks(fs, bbase); 547 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), blkno)) { 548 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); 549 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 550 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 551 ffs_clusteracct(fs, cgp, blkno, 1); 552 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 553 fs->fs_cstotal.cs_nbfree++; 554 fs->fs_cs(fs, cg).cs_nbfree++; 555 i = cbtocylno(fs, bbase); 556 ufs_add16(cg_blks(fs, cgp, i, needswap)[cbtorpos(fs, 557 bbase)], 1, 558 needswap); 559 ufs_add32(cg_blktot(cgp, needswap)[i], 1, needswap); 560 } 561 } 562 fs->fs_fmod = 1; 563 bdwrite(bp); 564 } 565 566 567 static int 568 scanc(u_int size, const u_char *cp, const u_char table[], int mask) 569 { 570 const u_char *end = &cp[size]; 571 572 while (cp < end && (table[*cp] & mask) == 0) 573 cp++; 574 return (end - cp); 575 } 576 577 /* 578 * Find a block of the specified size in the specified cylinder group. 579 * 580 * It is a panic if a request is made to find a block if none are 581 * available. 582 */ 583 static ufs_daddr_t 584 ffs_mapsearch(struct fs *fs, struct cg *cgp, ufs_daddr_t bpref, int allocsiz) 585 { 586 ufs_daddr_t bno; 587 int start, len, loc, i; 588 int blk, field, subfield, pos; 589 int ostart, olen; 590 const int needswap = UFS_FSNEEDSWAP(fs); 591 592 /* 593 * find the fragment by searching through the free block 594 * map for an appropriate bit pattern 595 */ 596 if (bpref) 597 start = dtogd(fs, bpref) / NBBY; 598 else 599 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; 600 len = howmany(fs->fs_fpg, NBBY) - start; 601 ostart = start; 602 olen = len; 603 loc = scanc((u_int)len, 604 (const u_char *)&cg_blksfree(cgp, needswap)[start], 605 (const u_char *)fragtbl[fs->fs_frag], 606 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 607 if (loc == 0) { 608 len = start + 1; 609 start = 0; 610 loc = scanc((u_int)len, 611 (const u_char *)&cg_blksfree(cgp, needswap)[0], 612 (const u_char *)fragtbl[fs->fs_frag], 613 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 614 if (loc == 0) { 615 errx(1, 616 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld", 617 ostart, olen, 618 ufs_rw32(cgp->cg_freeoff, needswap), 619 (long)cg_blksfree(cgp, needswap) - (long)cgp); 620 /* NOTREACHED */ 621 } 622 } 623 bno = (start + len - loc) * NBBY; 624 cgp->cg_frotor = ufs_rw32(bno, needswap); 625 /* 626 * found the byte in the map 627 * sift through the bits to find the selected frag 628 */ 629 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 630 blk = blkmap(fs, cg_blksfree(cgp, needswap), bno); 631 blk <<= 1; 632 field = around[allocsiz]; 633 subfield = inside[allocsiz]; 634 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 635 if ((blk & field) == subfield) 636 return (bno + pos); 637 field <<= 1; 638 subfield <<= 1; 639 } 640 } 641 errx(1, "ffs_alloccg: block not in map: bno %d", bno); 642 return (-1); 643 } 644 645 /* 646 * Update the cluster map because of an allocation or free. 647 * 648 * Cnt == 1 means free; cnt == -1 means allocating. 649 */ 650 void 651 ffs_clusteracct(struct fs *fs, struct cg *cgp, ufs_daddr_t blkno, int cnt) 652 { 653 int32_t *sump; 654 int32_t *lp; 655 u_char *freemapp, *mapp; 656 int i, start, end, forw, back, map, bit; 657 const int needswap = UFS_FSNEEDSWAP(fs); 658 659 if (fs->fs_contigsumsize <= 0) 660 return; 661 freemapp = cg_clustersfree(cgp, needswap); 662 sump = cg_clustersum(cgp, needswap); 663 /* 664 * Allocate or clear the actual block. 665 */ 666 if (cnt > 0) 667 setbit(freemapp, blkno); 668 else 669 clrbit(freemapp, blkno); 670 /* 671 * Find the size of the cluster going forward. 672 */ 673 start = blkno + 1; 674 end = start + fs->fs_contigsumsize; 675 if (end >= ufs_rw32(cgp->cg_nclusterblks, needswap)) 676 end = ufs_rw32(cgp->cg_nclusterblks, needswap); 677 mapp = &freemapp[start / NBBY]; 678 map = *mapp++; 679 bit = 1 << (start % NBBY); 680 for (i = start; i < end; i++) { 681 if ((map & bit) == 0) 682 break; 683 if ((i & (NBBY - 1)) != (NBBY - 1)) { 684 bit <<= 1; 685 } else { 686 map = *mapp++; 687 bit = 1; 688 } 689 } 690 forw = i - start; 691 /* 692 * Find the size of the cluster going backward. 693 */ 694 start = blkno - 1; 695 end = start - fs->fs_contigsumsize; 696 if (end < 0) 697 end = -1; 698 mapp = &freemapp[start / NBBY]; 699 map = *mapp--; 700 bit = 1 << (start % NBBY); 701 for (i = start; i > end; i--) { 702 if ((map & bit) == 0) 703 break; 704 if ((i & (NBBY - 1)) != 0) { 705 bit >>= 1; 706 } else { 707 map = *mapp--; 708 bit = 1 << (NBBY - 1); 709 } 710 } 711 back = start - i; 712 /* 713 * Account for old cluster and the possibly new forward and 714 * back clusters. 715 */ 716 i = back + forw + 1; 717 if (i > fs->fs_contigsumsize) 718 i = fs->fs_contigsumsize; 719 ufs_add32(sump[i], cnt, needswap); 720 if (back > 0) 721 ufs_add32(sump[back], -cnt, needswap); 722 if (forw > 0) 723 ufs_add32(sump[forw], -cnt, needswap); 724 725 /* 726 * Update cluster summary information. 727 */ 728 lp = &sump[fs->fs_contigsumsize]; 729 for (i = fs->fs_contigsumsize; i > 0; i--) 730 if (ufs_rw32(*lp--, needswap) > 0) 731 break; 732 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i; 733 } 734