1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 34 * $FreeBSD: src/sys/ufs/ffs/ffs_alloc.c,v 1.64.2.2 2001/09/21 19:15:21 dillon Exp $ 35 * $DragonFly: src/sys/vfs/ufs/ffs_alloc.c,v 1.27 2006/12/29 17:10:20 swildner Exp $ 36 */ 37 38 #include "opt_quota.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/buf.h> 43 #include <sys/conf.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/mount.h> 47 #include <sys/kernel.h> 48 #include <sys/sysctl.h> 49 #include <sys/syslog.h> 50 51 #include <machine/inttypes.h> 52 53 #include "quota.h" 54 #include "inode.h" 55 #include "ufs_extern.h" 56 #include "ufsmount.h" 57 58 #include "fs.h" 59 #include "ffs_extern.h" 60 61 typedef ufs_daddr_t allocfcn_t (struct inode *ip, int cg, ufs_daddr_t bpref, 62 int size); 63 64 static ufs_daddr_t ffs_alloccg (struct inode *, int, ufs_daddr_t, int); 65 static ufs_daddr_t 66 ffs_alloccgblk (struct inode *, struct buf *, ufs_daddr_t); 67 #ifdef DIAGNOSTIC 68 static int ffs_checkblk (struct inode *, ufs_daddr_t, long); 69 #endif 70 static void ffs_clusteracct (struct fs *, struct cg *, ufs_daddr_t, 71 int); 72 static ufs_daddr_t ffs_clusteralloc (struct inode *, int, ufs_daddr_t, 73 int); 74 static ino_t ffs_dirpref (struct inode *); 75 static ufs_daddr_t ffs_fragextend (struct inode *, int, long, int, int); 76 static void ffs_fserr (struct fs *, uint, char *); 77 static u_long ffs_hashalloc 78 (struct inode *, int, long, int, allocfcn_t *); 79 static ino_t ffs_nodealloccg (struct inode *, int, ufs_daddr_t, int); 80 static ufs_daddr_t ffs_mapsearch (struct fs *, struct cg *, ufs_daddr_t, 81 int); 82 83 /* 84 * Allocate a block in the filesystem. 85 * 86 * The size of the requested block is given, which must be some 87 * multiple of fs_fsize and <= fs_bsize. 88 * A preference may be optionally specified. If a preference is given 89 * the following hierarchy is used to allocate a block: 90 * 1) allocate the requested block. 91 * 2) allocate a rotationally optimal block in the same cylinder. 92 * 3) allocate a block in the same cylinder group. 93 * 4) quadradically rehash into other cylinder groups, until an 94 * available block is located. 95 * If no block preference is given the following heirarchy is used 96 * to allocate a block: 97 * 1) allocate a block in the cylinder group that contains the 98 * inode for the file. 99 * 2) quadradically rehash into other cylinder groups, until an 100 * available block is located. 101 */ 102 int 103 ffs_alloc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t bpref, int size, 104 struct ucred *cred, ufs_daddr_t *bnp) 105 { 106 struct fs *fs; 107 ufs_daddr_t bno; 108 int cg; 109 #ifdef QUOTA 110 int error; 111 #endif 112 113 *bnp = 0; 114 fs = ip->i_fs; 115 #ifdef DIAGNOSTIC 116 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0) { 117 kprintf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 118 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 119 fs->fs_fsmnt); 120 panic("ffs_alloc: bad size"); 121 } 122 if (cred == NOCRED) 123 panic("ffs_alloc: missing credential"); 124 #endif /* DIAGNOSTIC */ 125 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 126 goto nospace; 127 if (cred->cr_uid != 0 && 128 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 129 goto nospace; 130 #ifdef QUOTA 131 error = ufs_chkdq(ip, (long)btodb(size), cred, 0); 132 if (error) 133 return (error); 134 #endif 135 if (bpref >= fs->fs_size) 136 bpref = 0; 137 if (bpref == 0) 138 cg = ino_to_cg(fs, ip->i_number); 139 else 140 cg = dtog(fs, bpref); 141 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 142 ffs_alloccg); 143 if (bno > 0) { 144 ip->i_blocks += btodb(size); 145 ip->i_flag |= IN_CHANGE | IN_UPDATE; 146 *bnp = bno; 147 return (0); 148 } 149 #ifdef QUOTA 150 /* 151 * Restore user's disk quota because allocation failed. 152 */ 153 (void) ufs_chkdq(ip, (long)-btodb(size), cred, FORCE); 154 #endif 155 nospace: 156 ffs_fserr(fs, cred->cr_uid, "filesystem full"); 157 uprintf("\n%s: write failed, filesystem is full\n", fs->fs_fsmnt); 158 return (ENOSPC); 159 } 160 161 /* 162 * Reallocate a fragment to a bigger size 163 * 164 * The number and size of the old block is given, and a preference 165 * and new size is also specified. The allocator attempts to extend 166 * the original block. Failing that, the regular block allocator is 167 * invoked to get an appropriate block. 168 */ 169 int 170 ffs_realloccg(struct inode *ip, ufs_daddr_t lbprev, ufs_daddr_t bpref, 171 int osize, int nsize, struct ucred *cred, struct buf **bpp) 172 { 173 struct fs *fs; 174 struct buf *bp; 175 int cg, request, error; 176 ufs_daddr_t bprev, bno; 177 178 *bpp = 0; 179 fs = ip->i_fs; 180 #ifdef DIAGNOSTIC 181 if ((uint)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 182 (uint)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 183 kprintf( 184 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 185 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 186 nsize, fs->fs_fsmnt); 187 panic("ffs_realloccg: bad size"); 188 } 189 if (cred == NOCRED) 190 panic("ffs_realloccg: missing credential"); 191 #endif /* DIAGNOSTIC */ 192 if (cred->cr_uid != 0 && 193 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) 194 goto nospace; 195 if ((bprev = ip->i_db[lbprev]) == 0) { 196 kprintf("dev = %s, bsize = %ld, bprev = %ld, fs = %s\n", 197 devtoname(ip->i_dev), (long)fs->fs_bsize, (long)bprev, 198 fs->fs_fsmnt); 199 panic("ffs_realloccg: bad bprev"); 200 } 201 /* 202 * Allocate the extra space in the buffer. 203 */ 204 error = bread(ITOV(ip), lblktodoff(fs, lbprev), osize, &bp); 205 if (error) { 206 brelse(bp); 207 return (error); 208 } 209 210 if(bp->b_bio2.bio_offset == NOOFFSET) { 211 if( lbprev >= NDADDR) 212 panic("ffs_realloccg: lbprev out of range"); 213 bp->b_bio2.bio_offset = fsbtodoff(fs, bprev); 214 } 215 216 #ifdef QUOTA 217 error = ufs_chkdq(ip, (long)btodb(nsize - osize), cred, 0); 218 if (error) { 219 brelse(bp); 220 return (error); 221 } 222 #endif 223 /* 224 * Check for extension in the existing location. 225 */ 226 cg = dtog(fs, bprev); 227 bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize); 228 if (bno) { 229 if (bp->b_bio2.bio_offset != fsbtodoff(fs, bno)) 230 panic("ffs_realloccg: bad blockno"); 231 ip->i_blocks += btodb(nsize - osize); 232 ip->i_flag |= IN_CHANGE | IN_UPDATE; 233 allocbuf(bp, nsize); 234 bzero((char *)bp->b_data + osize, (uint)nsize - osize); 235 *bpp = bp; 236 return (0); 237 } 238 /* 239 * Allocate a new disk location. 240 */ 241 if (bpref >= fs->fs_size) 242 bpref = 0; 243 switch ((int)fs->fs_optim) { 244 case FS_OPTSPACE: 245 /* 246 * Allocate an exact sized fragment. Although this makes 247 * best use of space, we will waste time relocating it if 248 * the file continues to grow. If the fragmentation is 249 * less than half of the minimum free reserve, we choose 250 * to begin optimizing for time. 251 */ 252 request = nsize; 253 if (fs->fs_minfree <= 5 || 254 fs->fs_cstotal.cs_nffree > 255 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 256 break; 257 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 258 fs->fs_fsmnt); 259 fs->fs_optim = FS_OPTTIME; 260 break; 261 case FS_OPTTIME: 262 /* 263 * At this point we have discovered a file that is trying to 264 * grow a small fragment to a larger fragment. To save time, 265 * we allocate a full sized block, then free the unused portion. 266 * If the file continues to grow, the `ffs_fragextend' call 267 * above will be able to grow it in place without further 268 * copying. If aberrant programs cause disk fragmentation to 269 * grow within 2% of the free reserve, we choose to begin 270 * optimizing for space. 271 */ 272 request = fs->fs_bsize; 273 if (fs->fs_cstotal.cs_nffree < 274 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 275 break; 276 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 277 fs->fs_fsmnt); 278 fs->fs_optim = FS_OPTSPACE; 279 break; 280 default: 281 kprintf("dev = %s, optim = %ld, fs = %s\n", 282 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 283 panic("ffs_realloccg: bad optim"); 284 /* NOTREACHED */ 285 } 286 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 287 ffs_alloccg); 288 if (bno > 0) { 289 bp->b_bio2.bio_offset = fsbtodoff(fs, bno); 290 if (!DOINGSOFTDEP(ITOV(ip))) 291 ffs_blkfree(ip, bprev, (long)osize); 292 if (nsize < request) 293 ffs_blkfree(ip, bno + numfrags(fs, nsize), 294 (long)(request - nsize)); 295 ip->i_blocks += btodb(nsize - osize); 296 ip->i_flag |= IN_CHANGE | IN_UPDATE; 297 allocbuf(bp, nsize); 298 bzero((char *)bp->b_data + osize, (uint)nsize - osize); 299 *bpp = bp; 300 return (0); 301 } 302 #ifdef QUOTA 303 /* 304 * Restore user's disk quota because allocation failed. 305 */ 306 (void) ufs_chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 307 #endif 308 brelse(bp); 309 nospace: 310 /* 311 * no space available 312 */ 313 ffs_fserr(fs, cred->cr_uid, "filesystem full"); 314 uprintf("\n%s: write failed, filesystem is full\n", fs->fs_fsmnt); 315 return (ENOSPC); 316 } 317 318 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 319 320 /* 321 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 322 * 323 * The vnode and an array of buffer pointers for a range of sequential 324 * logical blocks to be made contiguous is given. The allocator attempts 325 * to find a range of sequential blocks starting as close as possible to 326 * an fs_rotdelay offset from the end of the allocation for the logical 327 * block immediately preceeding the current range. If successful, the 328 * physical block numbers in the buffer pointers and in the inode are 329 * changed to reflect the new allocation. If unsuccessful, the allocation 330 * is left unchanged. The success in doing the reallocation is returned. 331 * Note that the error return is not reflected back to the user. Rather 332 * the previous block allocation will be used. 333 */ 334 static int doasyncfree = 1; 335 SYSCTL_INT(_vfs_ffs, FFS_ASYNCFREE, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 336 337 static int doreallocblks = 1; 338 SYSCTL_INT(_vfs_ffs, FFS_REALLOCBLKS, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 339 340 #ifdef DEBUG 341 static volatile int prtrealloc = 0; 342 #endif 343 344 /* 345 * ffs_reallocblks(struct vnode *a_vp, struct cluster_save *a_buflist) 346 */ 347 int 348 ffs_reallocblks(struct vop_reallocblks_args *ap) 349 { 350 struct fs *fs; 351 struct inode *ip; 352 struct vnode *vp; 353 struct buf *sbp, *ebp; 354 ufs_daddr_t *bap, *sbap, *ebap = 0; 355 struct cluster_save *buflist; 356 ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno; 357 #ifdef DIAGNOSTIC 358 off_t boffset; 359 #endif 360 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 361 int i, len, slen, start_lvl, end_lvl, pref, ssize; 362 363 if (doreallocblks == 0) 364 return (ENOSPC); 365 vp = ap->a_vp; 366 ip = VTOI(vp); 367 fs = ip->i_fs; 368 if (fs->fs_contigsumsize <= 0) 369 return (ENOSPC); 370 buflist = ap->a_buflist; 371 len = buflist->bs_nchildren; 372 start_lbn = lblkno(fs, buflist->bs_children[0]->b_loffset); 373 end_lbn = start_lbn + len - 1; 374 #ifdef DIAGNOSTIC 375 for (i = 0; i < len; i++) 376 if (!ffs_checkblk(ip, 377 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 378 panic("ffs_reallocblks: unallocated block 1"); 379 for (i = 1; i < len; i++) { 380 if (buflist->bs_children[i]->b_loffset != lblktodoff(fs, start_lbn) + lblktodoff(fs, i)) 381 panic("ffs_reallocblks: non-logical cluster"); 382 } 383 boffset = buflist->bs_children[0]->b_bio2.bio_offset; 384 ssize = (int)fsbtodoff(fs, fs->fs_frag); 385 for (i = 1; i < len - 1; i++) 386 if (buflist->bs_children[i]->b_bio2.bio_offset != boffset + (i * ssize)) 387 panic("ffs_reallocblks: non-physical cluster %d", i); 388 #endif 389 /* 390 * If the latest allocation is in a new cylinder group, assume that 391 * the filesystem has decided to move and do not force it back to 392 * the previous cylinder group. 393 */ 394 if (dtog(fs, dofftofsb(fs, buflist->bs_children[0]->b_bio2.bio_offset)) != 395 dtog(fs, dofftofsb(fs, buflist->bs_children[len - 1]->b_bio2.bio_offset))) 396 return (ENOSPC); 397 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 398 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 399 return (ENOSPC); 400 /* 401 * Get the starting offset and block map for the first block and 402 * the number of blocks that will fit into sbap starting at soff. 403 */ 404 if (start_lvl == 0) { 405 sbap = &ip->i_db[0]; 406 soff = start_lbn; 407 slen = NDADDR - soff; 408 } else { 409 idp = &start_ap[start_lvl - 1]; 410 if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->fs_bsize, &sbp)) { 411 brelse(sbp); 412 return (ENOSPC); 413 } 414 sbap = (ufs_daddr_t *)sbp->b_data; 415 soff = idp->in_off; 416 slen = fs->fs_nindir - soff; 417 } 418 /* 419 * Find the preferred location for the cluster. 420 */ 421 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 422 /* 423 * If the block range spans two block maps, get the second map. 424 */ 425 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 426 ssize = len; 427 } else { 428 #ifdef DIAGNOSTIC 429 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 430 panic("ffs_reallocblk: start == end"); 431 #endif 432 ssize = len - (idp->in_off + 1); 433 if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->fs_bsize, &ebp)) 434 goto fail; 435 ebap = (ufs_daddr_t *)ebp->b_data; 436 } 437 438 /* 439 * Make sure we aren't spanning more then two blockmaps. ssize is 440 * our calculation of the span we have to scan in the first blockmap, 441 * while slen is our calculation of the number of entries available 442 * in the first blockmap (from soff). 443 */ 444 if (ssize > slen) { 445 panic("ffs_reallocblks: range spans more then two blockmaps!" 446 " start_lbn %ld len %d (%d/%d)", 447 (long)start_lbn, len, slen, ssize); 448 } 449 /* 450 * Search the block map looking for an allocation of the desired size. 451 */ 452 if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 453 len, ffs_clusteralloc)) == 0) 454 goto fail; 455 /* 456 * We have found a new contiguous block. 457 * 458 * First we have to replace the old block pointers with the new 459 * block pointers in the inode and indirect blocks associated 460 * with the file. 461 */ 462 #ifdef DEBUG 463 if (prtrealloc) 464 kprintf("realloc: ino %ju, lbns %d-%d\n\told:", 465 (uintmax_t)ip->i_number, start_lbn, end_lbn); 466 #endif 467 blkno = newblk; 468 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 469 if (i == ssize) { 470 bap = ebap; 471 soff = -i; 472 } 473 #ifdef DIAGNOSTIC 474 if (!ffs_checkblk(ip, 475 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 476 panic("ffs_reallocblks: unallocated block 2"); 477 if (dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset) != *bap) 478 panic("ffs_reallocblks: alloc mismatch"); 479 #endif 480 #ifdef DEBUG 481 if (prtrealloc) 482 kprintf(" %d,", *bap); 483 #endif 484 if (DOINGSOFTDEP(vp)) { 485 if (sbap == &ip->i_db[0] && i < ssize) 486 softdep_setup_allocdirect(ip, start_lbn + i, 487 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 488 buflist->bs_children[i]); 489 else 490 softdep_setup_allocindir_page(ip, start_lbn + i, 491 i < ssize ? sbp : ebp, soff + i, blkno, 492 *bap, buflist->bs_children[i]); 493 } 494 *bap++ = blkno; 495 } 496 /* 497 * Next we must write out the modified inode and indirect blocks. 498 * For strict correctness, the writes should be synchronous since 499 * the old block values may have been written to disk. In practise 500 * they are almost never written, but if we are concerned about 501 * strict correctness, the `doasyncfree' flag should be set to zero. 502 * 503 * The test on `doasyncfree' should be changed to test a flag 504 * that shows whether the associated buffers and inodes have 505 * been written. The flag should be set when the cluster is 506 * started and cleared whenever the buffer or inode is flushed. 507 * We can then check below to see if it is set, and do the 508 * synchronous write only when it has been cleared. 509 */ 510 if (sbap != &ip->i_db[0]) { 511 if (doasyncfree) 512 bdwrite(sbp); 513 else 514 bwrite(sbp); 515 } else { 516 ip->i_flag |= IN_CHANGE | IN_UPDATE; 517 if (!doasyncfree) 518 ffs_update(vp, 1); 519 } 520 if (ssize < len) { 521 if (doasyncfree) 522 bdwrite(ebp); 523 else 524 bwrite(ebp); 525 } 526 /* 527 * Last, free the old blocks and assign the new blocks to the buffers. 528 */ 529 #ifdef DEBUG 530 if (prtrealloc) 531 kprintf("\n\tnew:"); 532 #endif 533 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 534 if (!DOINGSOFTDEP(vp)) 535 ffs_blkfree(ip, 536 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), 537 fs->fs_bsize); 538 buflist->bs_children[i]->b_bio2.bio_offset = fsbtodoff(fs, blkno); 539 #ifdef DIAGNOSTIC 540 if (!ffs_checkblk(ip, 541 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 542 panic("ffs_reallocblks: unallocated block 3"); 543 #endif 544 #ifdef DEBUG 545 if (prtrealloc) 546 kprintf(" %d,", blkno); 547 #endif 548 } 549 #ifdef DEBUG 550 if (prtrealloc) { 551 prtrealloc--; 552 kprintf("\n"); 553 } 554 #endif 555 return (0); 556 557 fail: 558 if (ssize < len) 559 brelse(ebp); 560 if (sbap != &ip->i_db[0]) 561 brelse(sbp); 562 return (ENOSPC); 563 } 564 565 /* 566 * Allocate an inode in the filesystem. 567 * 568 * If allocating a directory, use ffs_dirpref to select the inode. 569 * If allocating in a directory, the following hierarchy is followed: 570 * 1) allocate the preferred inode. 571 * 2) allocate an inode in the same cylinder group. 572 * 3) quadradically rehash into other cylinder groups, until an 573 * available inode is located. 574 * If no inode preference is given the following heirarchy is used 575 * to allocate an inode: 576 * 1) allocate an inode in cylinder group 0. 577 * 2) quadradically rehash into other cylinder groups, until an 578 * available inode is located. 579 */ 580 int 581 ffs_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) 582 { 583 struct inode *pip; 584 struct fs *fs; 585 struct inode *ip; 586 ino_t ino, ipref; 587 int cg, error; 588 589 *vpp = NULL; 590 pip = VTOI(pvp); 591 fs = pip->i_fs; 592 if (fs->fs_cstotal.cs_nifree == 0) 593 goto noinodes; 594 595 if ((mode & IFMT) == IFDIR) 596 ipref = ffs_dirpref(pip); 597 else 598 ipref = pip->i_number; 599 if (ipref >= fs->fs_ncg * fs->fs_ipg) 600 ipref = 0; 601 cg = ino_to_cg(fs, ipref); 602 /* 603 * Track number of dirs created one after another 604 * in a same cg without intervening by files. 605 */ 606 if ((mode & IFMT) == IFDIR) { 607 if (fs->fs_contigdirs[cg] < 255) 608 fs->fs_contigdirs[cg]++; 609 } else { 610 if (fs->fs_contigdirs[cg] > 0) 611 fs->fs_contigdirs[cg]--; 612 } 613 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, 614 (allocfcn_t *)ffs_nodealloccg); 615 if (ino == 0) 616 goto noinodes; 617 error = VFS_VGET(pvp->v_mount, ino, vpp); 618 if (error) { 619 ffs_vfree(pvp, ino, mode); 620 return (error); 621 } 622 ip = VTOI(*vpp); 623 if (ip->i_mode) { 624 kprintf("mode = 0%o, inum = %lu, fs = %s\n", 625 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 626 panic("ffs_valloc: dup alloc"); 627 } 628 if (ip->i_blocks) { /* XXX */ 629 kprintf("free inode %s/%lu had %ld blocks\n", 630 fs->fs_fsmnt, (u_long)ino, (long)ip->i_blocks); 631 ip->i_blocks = 0; 632 } 633 ip->i_flags = 0; 634 /* 635 * Set up a new generation number for this inode. 636 */ 637 if (ip->i_gen == 0 || ++ip->i_gen == 0) 638 ip->i_gen = krandom() / 2 + 1; 639 return (0); 640 noinodes: 641 ffs_fserr(fs, cred->cr_uid, "out of inodes"); 642 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 643 return (ENOSPC); 644 } 645 646 /* 647 * Find a cylinder group to place a directory. 648 * 649 * The policy implemented by this algorithm is to allocate a 650 * directory inode in the same cylinder group as its parent 651 * directory, but also to reserve space for its files inodes 652 * and data. Restrict the number of directories which may be 653 * allocated one after another in the same cylinder group 654 * without intervening allocation of files. 655 * 656 * If we allocate a first level directory then force allocation 657 * in another cylinder group. 658 */ 659 static ino_t 660 ffs_dirpref(struct inode *pip) 661 { 662 struct fs *fs; 663 int cg, prefcg, dirsize, cgsize; 664 int64_t dirsize64; 665 int avgifree, avgbfree, avgndir, curdirsize; 666 int minifree, minbfree, maxndir; 667 int mincg, minndir; 668 int maxcontigdirs; 669 670 fs = pip->i_fs; 671 672 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 673 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 674 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 675 676 /* 677 * Force allocation in another cg if creating a first level dir. 678 */ 679 if (ITOV(pip)->v_flag & VROOT) { 680 prefcg = karc4random() % fs->fs_ncg; 681 mincg = prefcg; 682 minndir = fs->fs_ipg; 683 for (cg = prefcg; cg < fs->fs_ncg; cg++) 684 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 685 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 686 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 687 mincg = cg; 688 minndir = fs->fs_cs(fs, cg).cs_ndir; 689 } 690 for (cg = 0; cg < prefcg; cg++) 691 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 692 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 693 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 694 mincg = cg; 695 minndir = fs->fs_cs(fs, cg).cs_ndir; 696 } 697 return ((ino_t)(fs->fs_ipg * mincg)); 698 } 699 700 /* 701 * Count various limits which used for 702 * optimal allocation of a directory inode. 703 */ 704 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 705 minifree = avgifree - avgifree / 4; 706 if (minifree < 1) 707 minifree = 1; 708 minbfree = avgbfree - avgbfree / 4; 709 if (minbfree < 1) 710 minbfree = 1; 711 cgsize = fs->fs_fsize * fs->fs_fpg; 712 713 /* 714 * fs_avgfilesize and fs_avgfpdir are user-settable entities and 715 * multiplying them may overflow a 32 bit integer. 716 */ 717 dirsize64 = fs->fs_avgfilesize * (int64_t)fs->fs_avgfpdir; 718 if (dirsize64 > 0x7fffffff) { 719 maxcontigdirs = 1; 720 } else { 721 dirsize = (int)dirsize64; 722 curdirsize = avgndir ? 723 (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 724 if (dirsize < curdirsize) 725 dirsize = curdirsize; 726 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 727 if (fs->fs_avgfpdir > 0) 728 maxcontigdirs = min(maxcontigdirs, 729 fs->fs_ipg / fs->fs_avgfpdir); 730 if (maxcontigdirs == 0) 731 maxcontigdirs = 1; 732 } 733 734 /* 735 * Limit number of dirs in one cg and reserve space for 736 * regular files, but only if we have no deficit in 737 * inodes or space. 738 */ 739 prefcg = ino_to_cg(fs, pip->i_number); 740 for (cg = prefcg; cg < fs->fs_ncg; cg++) 741 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 742 fs->fs_cs(fs, cg).cs_nifree >= minifree && 743 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 744 if (fs->fs_contigdirs[cg] < maxcontigdirs) 745 return ((ino_t)(fs->fs_ipg * cg)); 746 } 747 for (cg = 0; cg < prefcg; cg++) 748 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 749 fs->fs_cs(fs, cg).cs_nifree >= minifree && 750 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 751 if (fs->fs_contigdirs[cg] < maxcontigdirs) 752 return ((ino_t)(fs->fs_ipg * cg)); 753 } 754 /* 755 * This is a backstop when we have deficit in space. 756 */ 757 for (cg = prefcg; cg < fs->fs_ncg; cg++) 758 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 759 return ((ino_t)(fs->fs_ipg * cg)); 760 for (cg = 0; cg < prefcg; cg++) 761 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 762 break; 763 return ((ino_t)(fs->fs_ipg * cg)); 764 } 765 766 /* 767 * Select the desired position for the next block in a file. The file is 768 * logically divided into sections. The first section is composed of the 769 * direct blocks. Each additional section contains fs_maxbpg blocks. 770 * 771 * If no blocks have been allocated in the first section, the policy is to 772 * request a block in the same cylinder group as the inode that describes 773 * the file. If no blocks have been allocated in any other section, the 774 * policy is to place the section in a cylinder group with a greater than 775 * average number of free blocks. An appropriate cylinder group is found 776 * by using a rotor that sweeps the cylinder groups. When a new group of 777 * blocks is needed, the sweep begins in the cylinder group following the 778 * cylinder group from which the previous allocation was made. The sweep 779 * continues until a cylinder group with greater than the average number 780 * of free blocks is found. If the allocation is for the first block in an 781 * indirect block, the information on the previous allocation is unavailable; 782 * here a best guess is made based upon the logical block number being 783 * allocated. 784 * 785 * If a section is already partially allocated, the policy is to 786 * contiguously allocate fs_maxcontig blocks. The end of one of these 787 * contiguous blocks and the beginning of the next is physically separated 788 * so that the disk head will be in transit between them for at least 789 * fs_rotdelay milliseconds. This is to allow time for the processor to 790 * schedule another I/O transfer. 791 */ 792 ufs_daddr_t 793 ffs_blkpref(struct inode *ip, ufs_daddr_t lbn, int indx, ufs_daddr_t *bap) 794 { 795 struct fs *fs; 796 int cg; 797 int avgbfree, startcg; 798 ufs_daddr_t nextblk; 799 800 fs = ip->i_fs; 801 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 802 if (lbn < NDADDR + NINDIR(fs)) { 803 cg = ino_to_cg(fs, ip->i_number); 804 return (fs->fs_fpg * cg + fs->fs_frag); 805 } 806 /* 807 * Find a cylinder with greater than average number of 808 * unused data blocks. 809 */ 810 if (indx == 0 || bap[indx - 1] == 0) 811 startcg = 812 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 813 else 814 startcg = dtog(fs, bap[indx - 1]) + 1; 815 startcg %= fs->fs_ncg; 816 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 817 for (cg = startcg; cg < fs->fs_ncg; cg++) 818 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 819 fs->fs_cgrotor = cg; 820 return (fs->fs_fpg * cg + fs->fs_frag); 821 } 822 for (cg = 0; cg <= startcg; cg++) 823 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 824 fs->fs_cgrotor = cg; 825 return (fs->fs_fpg * cg + fs->fs_frag); 826 } 827 return (0); 828 } 829 /* 830 * One or more previous blocks have been laid out. If less 831 * than fs_maxcontig previous blocks are contiguous, the 832 * next block is requested contiguously, otherwise it is 833 * requested rotationally delayed by fs_rotdelay milliseconds. 834 */ 835 nextblk = bap[indx - 1] + fs->fs_frag; 836 if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig || 837 bap[indx - fs->fs_maxcontig] + 838 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 839 return (nextblk); 840 /* 841 * Here we convert ms of delay to frags as: 842 * (frags) = (ms) * (rev/sec) * (sect/rev) / 843 * ((sect/frag) * (ms/sec)) 844 * then round up to the next block. 845 */ 846 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 847 (NSPF(fs) * 1000), fs->fs_frag); 848 return (nextblk); 849 } 850 851 /* 852 * Implement the cylinder overflow algorithm. 853 * 854 * The policy implemented by this algorithm is: 855 * 1) allocate the block in its requested cylinder group. 856 * 2) quadradically rehash on the cylinder group number. 857 * 3) brute force search for a free block. 858 */ 859 /*VARARGS5*/ 860 static u_long 861 ffs_hashalloc(struct inode *ip, int cg, long pref, 862 int size, /* size for data blocks, mode for inodes */ 863 allocfcn_t *allocator) 864 { 865 struct fs *fs; 866 long result; /* XXX why not same type as we return? */ 867 int i, icg = cg; 868 869 fs = ip->i_fs; 870 /* 871 * 1: preferred cylinder group 872 */ 873 result = (*allocator)(ip, cg, pref, size); 874 if (result) 875 return (result); 876 /* 877 * 2: quadratic rehash 878 */ 879 for (i = 1; i < fs->fs_ncg; i *= 2) { 880 cg += i; 881 if (cg >= fs->fs_ncg) 882 cg -= fs->fs_ncg; 883 result = (*allocator)(ip, cg, 0, size); 884 if (result) 885 return (result); 886 } 887 /* 888 * 3: brute force search 889 * Note that we start at i == 2, since 0 was checked initially, 890 * and 1 is always checked in the quadratic rehash. 891 */ 892 cg = (icg + 2) % fs->fs_ncg; 893 for (i = 2; i < fs->fs_ncg; i++) { 894 result = (*allocator)(ip, cg, 0, size); 895 if (result) 896 return (result); 897 cg++; 898 if (cg == fs->fs_ncg) 899 cg = 0; 900 } 901 return (0); 902 } 903 904 /* 905 * Determine whether a fragment can be extended. 906 * 907 * Check to see if the necessary fragments are available, and 908 * if they are, allocate them. 909 */ 910 static ufs_daddr_t 911 ffs_fragextend(struct inode *ip, int cg, long bprev, int osize, int nsize) 912 { 913 struct fs *fs; 914 struct cg *cgp; 915 struct buf *bp; 916 long bno; 917 int frags, bbase; 918 int i, error; 919 uint8_t *blksfree; 920 921 fs = ip->i_fs; 922 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 923 return (0); 924 frags = numfrags(fs, nsize); 925 bbase = fragnum(fs, bprev); 926 if (bbase > fragnum(fs, (bprev + frags - 1))) { 927 /* cannot extend across a block boundary */ 928 return (0); 929 } 930 KKASSERT(blknum(fs, bprev) == blknum(fs, bprev + frags - 1)); 931 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 932 (int)fs->fs_cgsize, &bp); 933 if (error) { 934 brelse(bp); 935 return (0); 936 } 937 cgp = (struct cg *)bp->b_data; 938 if (!cg_chkmagic(cgp)) { 939 brelse(bp); 940 return (0); 941 } 942 cgp->cg_time = time_second; 943 bno = dtogd(fs, bprev); 944 blksfree = cg_blksfree(cgp); 945 for (i = numfrags(fs, osize); i < frags; i++) { 946 if (isclr(blksfree, bno + i)) { 947 brelse(bp); 948 return (0); 949 } 950 } 951 952 /* 953 * the current fragment can be extended 954 * deduct the count on fragment being extended into 955 * increase the count on the remaining fragment (if any) 956 * allocate the extended piece 957 * 958 * ---oooooooooonnnnnnn111---- 959 * [-----frags-----] 960 * ^ ^ 961 * bbase fs_frag 962 */ 963 for (i = frags; i < fs->fs_frag - bbase; i++) { 964 if (isclr(blksfree, bno + i)) 965 break; 966 } 967 968 /* 969 * Size of original free frag is [i - numfrags(fs, osize)] 970 * Size of remaining free frag is [i - frags] 971 */ 972 cgp->cg_frsum[i - numfrags(fs, osize)]--; 973 if (i != frags) 974 cgp->cg_frsum[i - frags]++; 975 for (i = numfrags(fs, osize); i < frags; i++) { 976 clrbit(blksfree, bno + i); 977 cgp->cg_cs.cs_nffree--; 978 fs->fs_cstotal.cs_nffree--; 979 fs->fs_cs(fs, cg).cs_nffree--; 980 } 981 fs->fs_fmod = 1; 982 if (DOINGSOFTDEP(ITOV(ip))) 983 softdep_setup_blkmapdep(bp, fs, bprev); 984 bdwrite(bp); 985 return (bprev); 986 } 987 988 /* 989 * Determine whether a block can be allocated. 990 * 991 * Check to see if a block of the appropriate size is available, 992 * and if it is, allocate it. 993 */ 994 static ufs_daddr_t 995 ffs_alloccg(struct inode *ip, int cg, ufs_daddr_t bpref, int size) 996 { 997 struct fs *fs; 998 struct cg *cgp; 999 struct buf *bp; 1000 int i; 1001 ufs_daddr_t bno, blkno; 1002 int allocsiz, error, frags; 1003 uint8_t *blksfree; 1004 1005 fs = ip->i_fs; 1006 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1007 return (0); 1008 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1009 (int)fs->fs_cgsize, &bp); 1010 if (error) { 1011 brelse(bp); 1012 return (0); 1013 } 1014 cgp = (struct cg *)bp->b_data; 1015 if (!cg_chkmagic(cgp) || 1016 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 1017 brelse(bp); 1018 return (0); 1019 } 1020 cgp->cg_time = time_second; 1021 if (size == fs->fs_bsize) { 1022 bno = ffs_alloccgblk(ip, bp, bpref); 1023 bdwrite(bp); 1024 return (bno); 1025 } 1026 /* 1027 * Check to see if any fragments of sufficient size are already 1028 * available. Fit the data into a larger fragment if necessary, 1029 * before allocating a whole new block. 1030 */ 1031 blksfree = cg_blksfree(cgp); 1032 frags = numfrags(fs, size); 1033 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) { 1034 if (cgp->cg_frsum[allocsiz] != 0) 1035 break; 1036 } 1037 if (allocsiz == fs->fs_frag) { 1038 /* 1039 * No fragments were available, allocate a whole block and 1040 * cut the requested fragment (of size frags) out of it. 1041 */ 1042 if (cgp->cg_cs.cs_nbfree == 0) { 1043 brelse(bp); 1044 return (0); 1045 } 1046 bno = ffs_alloccgblk(ip, bp, bpref); 1047 bpref = dtogd(fs, bno); 1048 for (i = frags; i < fs->fs_frag; i++) 1049 setbit(blksfree, bpref + i); 1050 1051 /* 1052 * Calculate the number of free frags still remaining after 1053 * we have cut out the requested allocation. Indicate that 1054 * a fragment of that size is now available for future 1055 * allocation. 1056 */ 1057 i = fs->fs_frag - frags; 1058 cgp->cg_cs.cs_nffree += i; 1059 fs->fs_cstotal.cs_nffree += i; 1060 fs->fs_cs(fs, cg).cs_nffree += i; 1061 fs->fs_fmod = 1; 1062 cgp->cg_frsum[i]++; 1063 bdwrite(bp); 1064 return (bno); 1065 } 1066 1067 /* 1068 * cg_frsum[] has told us that a free fragment of allocsiz size is 1069 * available. Find it, then clear the bitmap bits associated with 1070 * the size we want. 1071 */ 1072 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1073 if (bno < 0) { 1074 brelse(bp); 1075 return (0); 1076 } 1077 for (i = 0; i < frags; i++) 1078 clrbit(blksfree, bno + i); 1079 cgp->cg_cs.cs_nffree -= frags; 1080 fs->fs_cstotal.cs_nffree -= frags; 1081 fs->fs_cs(fs, cg).cs_nffree -= frags; 1082 fs->fs_fmod = 1; 1083 1084 /* 1085 * Account for the allocation. The original searched size that we 1086 * found is no longer available. If we cut out a smaller piece then 1087 * a smaller fragment is now available. 1088 */ 1089 cgp->cg_frsum[allocsiz]--; 1090 if (frags != allocsiz) 1091 cgp->cg_frsum[allocsiz - frags]++; 1092 blkno = cg * fs->fs_fpg + bno; 1093 if (DOINGSOFTDEP(ITOV(ip))) 1094 softdep_setup_blkmapdep(bp, fs, blkno); 1095 bdwrite(bp); 1096 return ((u_long)blkno); 1097 } 1098 1099 /* 1100 * Allocate a block in a cylinder group. 1101 * 1102 * This algorithm implements the following policy: 1103 * 1) allocate the requested block. 1104 * 2) allocate a rotationally optimal block in the same cylinder. 1105 * 3) allocate the next available block on the block rotor for the 1106 * specified cylinder group. 1107 * Note that this routine only allocates fs_bsize blocks; these 1108 * blocks may be fragmented by the routine that allocates them. 1109 */ 1110 static ufs_daddr_t 1111 ffs_alloccgblk(struct inode *ip, struct buf *bp, ufs_daddr_t bpref) 1112 { 1113 struct fs *fs; 1114 struct cg *cgp; 1115 ufs_daddr_t bno, blkno; 1116 int cylno, pos, delta; 1117 short *cylbp; 1118 int i; 1119 uint8_t *blksfree; 1120 1121 fs = ip->i_fs; 1122 cgp = (struct cg *)bp->b_data; 1123 blksfree = cg_blksfree(cgp); 1124 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1125 bpref = cgp->cg_rotor; 1126 goto norot; 1127 } 1128 bpref = blknum(fs, bpref); 1129 bpref = dtogd(fs, bpref); 1130 /* 1131 * if the requested block is available, use it 1132 */ 1133 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bpref))) { 1134 bno = bpref; 1135 goto gotit; 1136 } 1137 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 1138 /* 1139 * Block layout information is not available. 1140 * Leaving bpref unchanged means we take the 1141 * next available free block following the one 1142 * we just allocated. Hopefully this will at 1143 * least hit a track cache on drives of unknown 1144 * geometry (e.g. SCSI). 1145 */ 1146 goto norot; 1147 } 1148 /* 1149 * check for a block available on the same cylinder 1150 */ 1151 cylno = cbtocylno(fs, bpref); 1152 if (cg_blktot(cgp)[cylno] == 0) 1153 goto norot; 1154 /* 1155 * check the summary information to see if a block is 1156 * available in the requested cylinder starting at the 1157 * requested rotational position and proceeding around. 1158 */ 1159 cylbp = cg_blks(fs, cgp, cylno); 1160 pos = cbtorpos(fs, bpref); 1161 for (i = pos; i < fs->fs_nrpos; i++) 1162 if (cylbp[i] > 0) 1163 break; 1164 if (i == fs->fs_nrpos) 1165 for (i = 0; i < pos; i++) 1166 if (cylbp[i] > 0) 1167 break; 1168 if (cylbp[i] > 0) { 1169 /* 1170 * found a rotational position, now find the actual 1171 * block. A panic if none is actually there. 1172 */ 1173 pos = cylno % fs->fs_cpc; 1174 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 1175 if (fs_postbl(fs, pos)[i] == -1) { 1176 kprintf("pos = %d, i = %d, fs = %s\n", 1177 pos, i, fs->fs_fsmnt); 1178 panic("ffs_alloccgblk: cyl groups corrupted"); 1179 } 1180 for (i = fs_postbl(fs, pos)[i];; ) { 1181 if (ffs_isblock(fs, blksfree, bno + i)) { 1182 bno = blkstofrags(fs, (bno + i)); 1183 goto gotit; 1184 } 1185 delta = fs_rotbl(fs)[i]; 1186 if (delta <= 0 || 1187 delta + i > fragstoblks(fs, fs->fs_fpg)) 1188 break; 1189 i += delta; 1190 } 1191 kprintf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 1192 panic("ffs_alloccgblk: can't find blk in cyl"); 1193 } 1194 norot: 1195 /* 1196 * no blocks in the requested cylinder, so take next 1197 * available one in this cylinder group. 1198 */ 1199 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1200 if (bno < 0) 1201 return (0); 1202 cgp->cg_rotor = bno; 1203 gotit: 1204 blkno = fragstoblks(fs, bno); 1205 ffs_clrblock(fs, blksfree, (long)blkno); 1206 ffs_clusteracct(fs, cgp, blkno, -1); 1207 cgp->cg_cs.cs_nbfree--; 1208 fs->fs_cstotal.cs_nbfree--; 1209 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1210 cylno = cbtocylno(fs, bno); 1211 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 1212 cg_blktot(cgp)[cylno]--; 1213 fs->fs_fmod = 1; 1214 blkno = cgp->cg_cgx * fs->fs_fpg + bno; 1215 if (DOINGSOFTDEP(ITOV(ip))) 1216 softdep_setup_blkmapdep(bp, fs, blkno); 1217 return (blkno); 1218 } 1219 1220 /* 1221 * Determine whether a cluster can be allocated. 1222 * 1223 * We do not currently check for optimal rotational layout if there 1224 * are multiple choices in the same cylinder group. Instead we just 1225 * take the first one that we find following bpref. 1226 */ 1227 static ufs_daddr_t 1228 ffs_clusteralloc(struct inode *ip, int cg, ufs_daddr_t bpref, int len) 1229 { 1230 struct fs *fs; 1231 struct cg *cgp; 1232 struct buf *bp; 1233 int i, got, run, bno, bit, map; 1234 u_char *mapp; 1235 int32_t *lp; 1236 uint8_t *blksfree; 1237 1238 fs = ip->i_fs; 1239 if (fs->fs_maxcluster[cg] < len) 1240 return (0); 1241 if (bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1242 (int)fs->fs_cgsize, &bp)) { 1243 goto fail; 1244 } 1245 cgp = (struct cg *)bp->b_data; 1246 if (!cg_chkmagic(cgp)) 1247 goto fail; 1248 1249 /* 1250 * Check to see if a cluster of the needed size (or bigger) is 1251 * available in this cylinder group. 1252 */ 1253 lp = &cg_clustersum(cgp)[len]; 1254 for (i = len; i <= fs->fs_contigsumsize; i++) 1255 if (*lp++ > 0) 1256 break; 1257 if (i > fs->fs_contigsumsize) { 1258 /* 1259 * This is the first time looking for a cluster in this 1260 * cylinder group. Update the cluster summary information 1261 * to reflect the true maximum sized cluster so that 1262 * future cluster allocation requests can avoid reading 1263 * the cylinder group map only to find no clusters. 1264 */ 1265 lp = &cg_clustersum(cgp)[len - 1]; 1266 for (i = len - 1; i > 0; i--) 1267 if (*lp-- > 0) 1268 break; 1269 fs->fs_maxcluster[cg] = i; 1270 goto fail; 1271 } 1272 /* 1273 * Search the cluster map to find a big enough cluster. 1274 * We take the first one that we find, even if it is larger 1275 * than we need as we prefer to get one close to the previous 1276 * block allocation. We do not search before the current 1277 * preference point as we do not want to allocate a block 1278 * that is allocated before the previous one (as we will 1279 * then have to wait for another pass of the elevator 1280 * algorithm before it will be read). We prefer to fail and 1281 * be recalled to try an allocation in the next cylinder group. 1282 */ 1283 if (dtog(fs, bpref) != cg) 1284 bpref = 0; 1285 else 1286 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1287 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1288 map = *mapp++; 1289 bit = 1 << (bpref % NBBY); 1290 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1291 if ((map & bit) == 0) { 1292 run = 0; 1293 } else { 1294 run++; 1295 if (run == len) 1296 break; 1297 } 1298 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1299 bit <<= 1; 1300 } else { 1301 map = *mapp++; 1302 bit = 1; 1303 } 1304 } 1305 if (got >= cgp->cg_nclusterblks) 1306 goto fail; 1307 /* 1308 * Allocate the cluster that we have found. 1309 */ 1310 blksfree = cg_blksfree(cgp); 1311 for (i = 1; i <= len; i++) { 1312 if (!ffs_isblock(fs, blksfree, got - run + i)) 1313 panic("ffs_clusteralloc: map mismatch"); 1314 } 1315 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1316 if (dtog(fs, bno) != cg) 1317 panic("ffs_clusteralloc: allocated out of group"); 1318 len = blkstofrags(fs, len); 1319 for (i = 0; i < len; i += fs->fs_frag) { 1320 if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i) 1321 panic("ffs_clusteralloc: lost block"); 1322 } 1323 bdwrite(bp); 1324 return (bno); 1325 1326 fail: 1327 brelse(bp); 1328 return (0); 1329 } 1330 1331 /* 1332 * Determine whether an inode can be allocated. 1333 * 1334 * Check to see if an inode is available, and if it is, 1335 * allocate it using the following policy: 1336 * 1) allocate the requested inode. 1337 * 2) allocate the next available inode after the requested 1338 * inode in the specified cylinder group. 1339 * 3) the inode must not already be in the inode hash table. We 1340 * can encounter such a case because the vnode reclamation sequence 1341 * frees the bit 1342 * 3) the inode must not already be in the inode hash, otherwise it 1343 * may be in the process of being deallocated. This can occur 1344 * because the bitmap is updated before the inode is removed from 1345 * hash. If we were to reallocate the inode the caller could wind 1346 * up returning a vnode/inode combination which is in an indeterminate 1347 * state. 1348 */ 1349 static ino_t 1350 ffs_nodealloccg(struct inode *ip, int cg, ufs_daddr_t ipref, int mode) 1351 { 1352 struct fs *fs; 1353 struct cg *cgp; 1354 struct buf *bp; 1355 uint8_t *inosused; 1356 uint8_t map; 1357 int error, len, arraysize, i; 1358 int icheckmiss; 1359 ufs_daddr_t ibase; 1360 1361 fs = ip->i_fs; 1362 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1363 return (0); 1364 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1365 (int)fs->fs_cgsize, &bp); 1366 if (error) { 1367 brelse(bp); 1368 return (0); 1369 } 1370 cgp = (struct cg *)bp->b_data; 1371 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1372 brelse(bp); 1373 return (0); 1374 } 1375 inosused = cg_inosused(cgp); 1376 icheckmiss = 0; 1377 1378 /* 1379 * Quick check, reuse the most recently free inode or continue 1380 * a scan from where we left off the last time. 1381 */ 1382 ibase = cg * fs->fs_ipg; 1383 if (ipref) { 1384 ipref %= fs->fs_ipg; 1385 if (isclr(inosused, ipref)) { 1386 if (ufs_ihashcheck(ip->i_dev, ibase + ipref) == 0) 1387 goto gotit; 1388 } 1389 } 1390 1391 /* 1392 * Scan the inode bitmap starting at irotor, be sure to handle 1393 * the edge case by going back to the beginning of the array. 1394 * 1395 * If the number of inodes is not byte-aligned, the unused bits 1396 * should be set to 1. This will be sanity checked in gotit. Note 1397 * that we have to be sure not to overlap the beginning and end 1398 * when irotor is in the middle of a byte as this will cause the 1399 * same bitmap byte to be checked twice. To solve this problem we 1400 * just convert everything to a byte index for the loop. 1401 */ 1402 ipref = (cgp->cg_irotor % fs->fs_ipg) >> 3; /* byte index */ 1403 len = (fs->fs_ipg + 7) >> 3; /* byte size */ 1404 arraysize = len; 1405 1406 while (len > 0) { 1407 map = inosused[ipref]; 1408 if (map != 255) { 1409 for (i = 0; i < NBBY; ++i) { 1410 /* 1411 * If we find a free bit we have to make sure 1412 * that the inode is not in the middle of 1413 * being destroyed. The inode should not exist 1414 * in the inode hash. 1415 * 1416 * Adjust the rotor to try to hit the 1417 * quick-check up above. 1418 */ 1419 if ((map & (1 << i)) == 0) { 1420 if (ufs_ihashcheck(ip->i_dev, ibase + (ipref << 3) + i) == 0) { 1421 ipref = (ipref << 3) + i; 1422 cgp->cg_irotor = (ipref + 1) % fs->fs_ipg; 1423 goto gotit; 1424 } 1425 ++icheckmiss; 1426 } 1427 } 1428 } 1429 1430 /* 1431 * Setup for the next byte, start at the beginning again if 1432 * we hit the end of the array. 1433 */ 1434 if (++ipref == arraysize) 1435 ipref = 0; 1436 --len; 1437 } 1438 if (icheckmiss == cgp->cg_cs.cs_nifree) { 1439 brelse(bp); 1440 return(0); 1441 } 1442 kprintf("fs = %s\n", fs->fs_fsmnt); 1443 panic("ffs_nodealloccg: block not in map, icheckmiss/nfree %d/%d", 1444 icheckmiss, cgp->cg_cs.cs_nifree); 1445 /* NOTREACHED */ 1446 1447 /* 1448 * ipref is a bit index as of the gotit label. 1449 */ 1450 gotit: 1451 KKASSERT(ipref >= 0 && ipref < fs->fs_ipg); 1452 cgp->cg_time = time_second; 1453 if (DOINGSOFTDEP(ITOV(ip))) 1454 softdep_setup_inomapdep(bp, ip, ibase + ipref); 1455 setbit(inosused, ipref); 1456 cgp->cg_cs.cs_nifree--; 1457 fs->fs_cstotal.cs_nifree--; 1458 fs->fs_cs(fs, cg).cs_nifree--; 1459 fs->fs_fmod = 1; 1460 if ((mode & IFMT) == IFDIR) { 1461 cgp->cg_cs.cs_ndir++; 1462 fs->fs_cstotal.cs_ndir++; 1463 fs->fs_cs(fs, cg).cs_ndir++; 1464 } 1465 bdwrite(bp); 1466 return (ibase + ipref); 1467 } 1468 1469 /* 1470 * Free a block or fragment. 1471 * 1472 * The specified block or fragment is placed back in the 1473 * free map. If a fragment is deallocated, a possible 1474 * block reassembly is checked. 1475 */ 1476 void 1477 ffs_blkfree(struct inode *ip, ufs_daddr_t bno, long size) 1478 { 1479 struct fs *fs; 1480 struct cg *cgp; 1481 struct buf *bp; 1482 ufs_daddr_t blkno; 1483 int i, error, cg, blk, frags, bbase; 1484 uint8_t *blksfree; 1485 1486 fs = ip->i_fs; 1487 VOP_FREEBLKS(ip->i_devvp, fsbtodoff(fs, bno), size); 1488 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1489 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1490 kprintf("dev=%s, bno = %ld, bsize = %ld, size = %ld, fs = %s\n", 1491 devtoname(ip->i_dev), (long)bno, (long)fs->fs_bsize, size, 1492 fs->fs_fsmnt); 1493 panic("ffs_blkfree: bad size"); 1494 } 1495 cg = dtog(fs, bno); 1496 if ((uint)bno >= fs->fs_size) { 1497 kprintf("bad block %ld, ino %lu\n", 1498 (long)bno, (u_long)ip->i_number); 1499 ffs_fserr(fs, ip->i_uid, "bad block"); 1500 return; 1501 } 1502 1503 /* 1504 * Load the cylinder group 1505 */ 1506 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1507 (int)fs->fs_cgsize, &bp); 1508 if (error) { 1509 brelse(bp); 1510 return; 1511 } 1512 cgp = (struct cg *)bp->b_data; 1513 if (!cg_chkmagic(cgp)) { 1514 brelse(bp); 1515 return; 1516 } 1517 cgp->cg_time = time_second; 1518 bno = dtogd(fs, bno); 1519 blksfree = cg_blksfree(cgp); 1520 1521 if (size == fs->fs_bsize) { 1522 /* 1523 * Free a whole block 1524 */ 1525 blkno = fragstoblks(fs, bno); 1526 if (!ffs_isfreeblock(fs, blksfree, blkno)) { 1527 kprintf("dev = %s, block = %ld, fs = %s\n", 1528 devtoname(ip->i_dev), (long)bno, fs->fs_fsmnt); 1529 panic("ffs_blkfree: freeing free block"); 1530 } 1531 ffs_setblock(fs, blksfree, blkno); 1532 ffs_clusteracct(fs, cgp, blkno, 1); 1533 cgp->cg_cs.cs_nbfree++; 1534 fs->fs_cstotal.cs_nbfree++; 1535 fs->fs_cs(fs, cg).cs_nbfree++; 1536 i = cbtocylno(fs, bno); 1537 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1538 cg_blktot(cgp)[i]++; 1539 } else { 1540 /* 1541 * Free a fragment within a block. 1542 * 1543 * bno is the starting block number of the fragment being 1544 * freed. 1545 * 1546 * bbase is the starting block number for the filesystem 1547 * block containing the fragment. 1548 * 1549 * blk is the current bitmap for the fragments within the 1550 * filesystem block containing the fragment. 1551 * 1552 * frags is the number of fragments being freed 1553 * 1554 * Call ffs_fragacct() to account for the removal of all 1555 * current fragments, then adjust the bitmap to free the 1556 * requested fragment, and finally call ffs_fragacct() again 1557 * to regenerate the accounting. 1558 */ 1559 bbase = bno - fragnum(fs, bno); 1560 blk = blkmap(fs, blksfree, bbase); 1561 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1562 frags = numfrags(fs, size); 1563 for (i = 0; i < frags; i++) { 1564 if (isset(blksfree, bno + i)) { 1565 kprintf("dev = %s, block = %ld, fs = %s\n", 1566 devtoname(ip->i_dev), (long)(bno + i), 1567 fs->fs_fsmnt); 1568 panic("ffs_blkfree: freeing free frag"); 1569 } 1570 setbit(blksfree, bno + i); 1571 } 1572 cgp->cg_cs.cs_nffree += i; 1573 fs->fs_cstotal.cs_nffree += i; 1574 fs->fs_cs(fs, cg).cs_nffree += i; 1575 1576 /* 1577 * Add back in counts associated with the new frags 1578 */ 1579 blk = blkmap(fs, blksfree, bbase); 1580 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1581 1582 /* 1583 * If a complete block has been reassembled, account for it 1584 */ 1585 blkno = fragstoblks(fs, bbase); 1586 if (ffs_isblock(fs, blksfree, blkno)) { 1587 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1588 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1589 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1590 ffs_clusteracct(fs, cgp, blkno, 1); 1591 cgp->cg_cs.cs_nbfree++; 1592 fs->fs_cstotal.cs_nbfree++; 1593 fs->fs_cs(fs, cg).cs_nbfree++; 1594 i = cbtocylno(fs, bbase); 1595 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1596 cg_blktot(cgp)[i]++; 1597 } 1598 } 1599 fs->fs_fmod = 1; 1600 bdwrite(bp); 1601 } 1602 1603 #ifdef DIAGNOSTIC 1604 /* 1605 * Verify allocation of a block or fragment. Returns true if block or 1606 * fragment is allocated, false if it is free. 1607 */ 1608 static int 1609 ffs_checkblk(struct inode *ip, ufs_daddr_t bno, long size) 1610 { 1611 struct fs *fs; 1612 struct cg *cgp; 1613 struct buf *bp; 1614 int i, error, frags, free; 1615 uint8_t *blksfree; 1616 1617 fs = ip->i_fs; 1618 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1619 kprintf("bsize = %ld, size = %ld, fs = %s\n", 1620 (long)fs->fs_bsize, size, fs->fs_fsmnt); 1621 panic("ffs_checkblk: bad size"); 1622 } 1623 if ((uint)bno >= fs->fs_size) 1624 panic("ffs_checkblk: bad block %d", bno); 1625 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, dtog(fs, bno))), 1626 (int)fs->fs_cgsize, &bp); 1627 if (error) 1628 panic("ffs_checkblk: cg bread failed"); 1629 cgp = (struct cg *)bp->b_data; 1630 if (!cg_chkmagic(cgp)) 1631 panic("ffs_checkblk: cg magic mismatch"); 1632 blksfree = cg_blksfree(cgp); 1633 bno = dtogd(fs, bno); 1634 if (size == fs->fs_bsize) { 1635 free = ffs_isblock(fs, blksfree, fragstoblks(fs, bno)); 1636 } else { 1637 frags = numfrags(fs, size); 1638 for (free = 0, i = 0; i < frags; i++) 1639 if (isset(blksfree, bno + i)) 1640 free++; 1641 if (free != 0 && free != frags) 1642 panic("ffs_checkblk: partially free fragment"); 1643 } 1644 brelse(bp); 1645 return (!free); 1646 } 1647 #endif /* DIAGNOSTIC */ 1648 1649 /* 1650 * Free an inode. 1651 */ 1652 int 1653 ffs_vfree(struct vnode *pvp, ino_t ino, int mode) 1654 { 1655 if (DOINGSOFTDEP(pvp)) { 1656 softdep_freefile(pvp, ino, mode); 1657 return (0); 1658 } 1659 return (ffs_freefile(pvp, ino, mode)); 1660 } 1661 1662 /* 1663 * Do the actual free operation. 1664 * The specified inode is placed back in the free map. 1665 */ 1666 int 1667 ffs_freefile(struct vnode *pvp, ino_t ino, int mode) 1668 { 1669 struct fs *fs; 1670 struct cg *cgp; 1671 struct inode *pip; 1672 struct buf *bp; 1673 int error, cg; 1674 uint8_t *inosused; 1675 1676 pip = VTOI(pvp); 1677 fs = pip->i_fs; 1678 if ((uint)ino >= fs->fs_ipg * fs->fs_ncg) 1679 panic("ffs_vfree: range: dev = (%d,%d), ino = %"PRId64", fs = %s", 1680 major(pip->i_dev), minor(pip->i_dev), ino, fs->fs_fsmnt); 1681 cg = ino_to_cg(fs, ino); 1682 error = bread(pip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1683 (int)fs->fs_cgsize, &bp); 1684 if (error) { 1685 brelse(bp); 1686 return (error); 1687 } 1688 cgp = (struct cg *)bp->b_data; 1689 if (!cg_chkmagic(cgp)) { 1690 brelse(bp); 1691 return (0); 1692 } 1693 cgp->cg_time = time_second; 1694 inosused = cg_inosused(cgp); 1695 ino %= fs->fs_ipg; 1696 if (isclr(inosused, ino)) { 1697 kprintf("dev = %s, ino = %lu, fs = %s\n", 1698 devtoname(pip->i_dev), (u_long)ino, fs->fs_fsmnt); 1699 if (fs->fs_ronly == 0) 1700 panic("ffs_vfree: freeing free inode"); 1701 } 1702 clrbit(inosused, ino); 1703 if (ino < cgp->cg_irotor) 1704 cgp->cg_irotor = ino; 1705 cgp->cg_cs.cs_nifree++; 1706 fs->fs_cstotal.cs_nifree++; 1707 fs->fs_cs(fs, cg).cs_nifree++; 1708 if ((mode & IFMT) == IFDIR) { 1709 cgp->cg_cs.cs_ndir--; 1710 fs->fs_cstotal.cs_ndir--; 1711 fs->fs_cs(fs, cg).cs_ndir--; 1712 } 1713 fs->fs_fmod = 1; 1714 bdwrite(bp); 1715 return (0); 1716 } 1717 1718 /* 1719 * Find a block of the specified size in the specified cylinder group. 1720 * 1721 * It is a panic if a request is made to find a block if none are 1722 * available. 1723 */ 1724 static ufs_daddr_t 1725 ffs_mapsearch(struct fs *fs, struct cg *cgp, ufs_daddr_t bpref, int allocsiz) 1726 { 1727 ufs_daddr_t bno; 1728 int start, len, loc, i; 1729 int blk, field, subfield, pos; 1730 uint8_t *blksfree; 1731 1732 /* 1733 * find the fragment by searching through the free block 1734 * map for an appropriate bit pattern. 1735 */ 1736 if (bpref) 1737 start = dtogd(fs, bpref) / NBBY; 1738 else 1739 start = cgp->cg_frotor / NBBY; 1740 blksfree = cg_blksfree(cgp); 1741 len = howmany(fs->fs_fpg, NBBY) - start; 1742 loc = scanc((uint)len, (u_char *)&blksfree[start], 1743 (u_char *)fragtbl[fs->fs_frag], 1744 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1745 if (loc == 0) { 1746 len = start + 1; /* XXX why overlap here? */ 1747 start = 0; 1748 loc = scanc((uint)len, (u_char *)&blksfree[0], 1749 (u_char *)fragtbl[fs->fs_frag], 1750 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1751 if (loc == 0) { 1752 kprintf("start = %d, len = %d, fs = %s\n", 1753 start, len, fs->fs_fsmnt); 1754 panic("ffs_alloccg: map corrupted"); 1755 /* NOTREACHED */ 1756 } 1757 } 1758 bno = (start + len - loc) * NBBY; 1759 cgp->cg_frotor = bno; 1760 /* 1761 * found the byte in the map 1762 * sift through the bits to find the selected frag 1763 */ 1764 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1765 blk = blkmap(fs, blksfree, bno); 1766 blk <<= 1; 1767 field = around[allocsiz]; 1768 subfield = inside[allocsiz]; 1769 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1770 if ((blk & field) == subfield) 1771 return (bno + pos); 1772 field <<= 1; 1773 subfield <<= 1; 1774 } 1775 } 1776 kprintf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 1777 panic("ffs_alloccg: block not in map"); 1778 return (-1); 1779 } 1780 1781 /* 1782 * Update the cluster map because of an allocation or free. 1783 * 1784 * Cnt == 1 means free; cnt == -1 means allocating. 1785 */ 1786 static void 1787 ffs_clusteracct(struct fs *fs, struct cg *cgp, ufs_daddr_t blkno, int cnt) 1788 { 1789 int32_t *sump; 1790 int32_t *lp; 1791 u_char *freemapp, *mapp; 1792 int i, start, end, forw, back, map, bit; 1793 1794 if (fs->fs_contigsumsize <= 0) 1795 return; 1796 freemapp = cg_clustersfree(cgp); 1797 sump = cg_clustersum(cgp); 1798 /* 1799 * Allocate or clear the actual block. 1800 */ 1801 if (cnt > 0) 1802 setbit(freemapp, blkno); 1803 else 1804 clrbit(freemapp, blkno); 1805 /* 1806 * Find the size of the cluster going forward. 1807 */ 1808 start = blkno + 1; 1809 end = start + fs->fs_contigsumsize; 1810 if (end >= cgp->cg_nclusterblks) 1811 end = cgp->cg_nclusterblks; 1812 mapp = &freemapp[start / NBBY]; 1813 map = *mapp++; 1814 bit = 1 << (start % NBBY); 1815 for (i = start; i < end; i++) { 1816 if ((map & bit) == 0) 1817 break; 1818 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1819 bit <<= 1; 1820 } else { 1821 map = *mapp++; 1822 bit = 1; 1823 } 1824 } 1825 forw = i - start; 1826 /* 1827 * Find the size of the cluster going backward. 1828 */ 1829 start = blkno - 1; 1830 end = start - fs->fs_contigsumsize; 1831 if (end < 0) 1832 end = -1; 1833 mapp = &freemapp[start / NBBY]; 1834 map = *mapp--; 1835 bit = 1 << (start % NBBY); 1836 for (i = start; i > end; i--) { 1837 if ((map & bit) == 0) 1838 break; 1839 if ((i & (NBBY - 1)) != 0) { 1840 bit >>= 1; 1841 } else { 1842 map = *mapp--; 1843 bit = 1 << (NBBY - 1); 1844 } 1845 } 1846 back = start - i; 1847 /* 1848 * Account for old cluster and the possibly new forward and 1849 * back clusters. 1850 */ 1851 i = back + forw + 1; 1852 if (i > fs->fs_contigsumsize) 1853 i = fs->fs_contigsumsize; 1854 sump[i] += cnt; 1855 if (back > 0) 1856 sump[back] -= cnt; 1857 if (forw > 0) 1858 sump[forw] -= cnt; 1859 /* 1860 * Update cluster summary information. 1861 */ 1862 lp = &sump[fs->fs_contigsumsize]; 1863 for (i = fs->fs_contigsumsize; i > 0; i--) 1864 if (*lp-- > 0) 1865 break; 1866 fs->fs_maxcluster[cgp->cg_cgx] = i; 1867 } 1868 1869 /* 1870 * Fserr prints the name of a filesystem with an error diagnostic. 1871 * 1872 * The form of the error message is: 1873 * fs: error message 1874 */ 1875 static void 1876 ffs_fserr(struct fs *fs, uint uid, char *cp) 1877 { 1878 struct thread *td = curthread; 1879 struct proc *p; 1880 1881 if ((p = td->td_proc) != NULL) { 1882 log(LOG_ERR, "pid %d (%s), uid %d on %s: %s\n", p ? p->p_pid : -1, 1883 p ? p->p_comm : "-", uid, fs->fs_fsmnt, cp); 1884 } else { 1885 log(LOG_ERR, "system thread %p, uid %d on %s: %s\n", 1886 td, uid, fs->fs_fsmnt, cp); 1887 } 1888 } 1889