1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 30 * $FreeBSD: src/sys/ufs/ffs/ffs_alloc.c,v 1.64.2.2 2001/09/21 19:15:21 dillon Exp $ 31 */ 32 33 #include "opt_quota.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/buf.h> 38 #include <sys/conf.h> 39 #include <sys/proc.h> 40 #include <sys/vnode.h> 41 #include <sys/mount.h> 42 #include <sys/kernel.h> 43 #include <sys/sysctl.h> 44 #include <sys/syslog.h> 45 46 #include <sys/taskqueue.h> 47 #include <machine/inttypes.h> 48 49 #include <sys/buf2.h> 50 51 #include "quota.h" 52 #include "inode.h" 53 #include "ufs_extern.h" 54 #include "ufsmount.h" 55 56 #include "fs.h" 57 #include "ffs_extern.h" 58 59 typedef ufs_daddr_t allocfcn_t (struct inode *ip, int cg, ufs_daddr_t bpref, 60 int size); 61 62 static ufs_daddr_t ffs_alloccg (struct inode *, int, ufs_daddr_t, int); 63 static ufs_daddr_t 64 ffs_alloccgblk (struct inode *, struct buf *, ufs_daddr_t); 65 static void ffs_blkfree_cg(struct fs *, struct vnode *, cdev_t , ino_t, 66 uint32_t , ufs_daddr_t, long ); 67 #ifdef DIAGNOSTIC 68 static int ffs_checkblk (struct inode *, ufs_daddr_t, long); 69 #endif 70 static void ffs_clusteracct (struct fs *, struct cg *, ufs_daddr_t, 71 int); 72 static ufs_daddr_t ffs_clusteralloc (struct inode *, int, ufs_daddr_t, 73 int); 74 static ino_t ffs_dirpref (struct inode *); 75 static ufs_daddr_t ffs_fragextend (struct inode *, int, long, int, int); 76 static void ffs_fserr (struct fs *, uint, char *); 77 static u_long ffs_hashalloc 78 (struct inode *, int, long, int, allocfcn_t *); 79 static ino_t ffs_nodealloccg (struct inode *, int, ufs_daddr_t, int); 80 static ufs_daddr_t ffs_mapsearch (struct fs *, struct cg *, ufs_daddr_t, 81 int); 82 83 /* 84 * Allocate a block in the filesystem. 85 * 86 * The size of the requested block is given, which must be some 87 * multiple of fs_fsize and <= fs_bsize. 88 * A preference may be optionally specified. If a preference is given 89 * the following hierarchy is used to allocate a block: 90 * 1) allocate the requested block. 91 * 2) allocate a rotationally optimal block in the same cylinder. 92 * 3) allocate a block in the same cylinder group. 93 * 4) quadradically rehash into other cylinder groups, until an 94 * available block is located. 95 * If no block preference is given the following heirarchy is used 96 * to allocate a block: 97 * 1) allocate a block in the cylinder group that contains the 98 * inode for the file. 99 * 2) quadradically rehash into other cylinder groups, until an 100 * available block is located. 101 */ 102 int 103 ffs_alloc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t bpref, int size, 104 struct ucred *cred, ufs_daddr_t *bnp) 105 { 106 struct fs *fs; 107 ufs_daddr_t bno; 108 int cg; 109 #ifdef QUOTA 110 int error; 111 #endif 112 113 *bnp = 0; 114 fs = ip->i_fs; 115 #ifdef DIAGNOSTIC 116 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0) { 117 kprintf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 118 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 119 fs->fs_fsmnt); 120 panic("ffs_alloc: bad size"); 121 } 122 if (cred == NOCRED) 123 panic("ffs_alloc: missing credential"); 124 #endif /* DIAGNOSTIC */ 125 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 126 goto nospace; 127 if (cred->cr_uid != 0 && 128 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 129 goto nospace; 130 #ifdef QUOTA 131 error = ufs_chkdq(ip, (long)btodb(size), cred, 0); 132 if (error) 133 return (error); 134 #endif 135 if (bpref >= fs->fs_size) 136 bpref = 0; 137 if (bpref == 0) 138 cg = ino_to_cg(fs, ip->i_number); 139 else 140 cg = dtog(fs, bpref); 141 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 142 ffs_alloccg); 143 if (bno > 0) { 144 ip->i_blocks += btodb(size); 145 ip->i_flag |= IN_CHANGE | IN_UPDATE; 146 *bnp = bno; 147 return (0); 148 } 149 #ifdef QUOTA 150 /* 151 * Restore user's disk quota because allocation failed. 152 */ 153 (void) ufs_chkdq(ip, (long)-btodb(size), cred, FORCE); 154 #endif 155 nospace: 156 ffs_fserr(fs, cred->cr_uid, "filesystem full"); 157 uprintf("\n%s: write failed, filesystem is full\n", fs->fs_fsmnt); 158 return (ENOSPC); 159 } 160 161 /* 162 * Reallocate a fragment to a bigger size 163 * 164 * The number and size of the old block is given, and a preference 165 * and new size is also specified. The allocator attempts to extend 166 * the original block. Failing that, the regular block allocator is 167 * invoked to get an appropriate block. 168 */ 169 int 170 ffs_realloccg(struct inode *ip, ufs_daddr_t lbprev, ufs_daddr_t bpref, 171 int osize, int nsize, struct ucred *cred, struct buf **bpp) 172 { 173 struct fs *fs; 174 struct buf *bp; 175 int cg, request, error; 176 ufs_daddr_t bprev, bno; 177 178 *bpp = NULL; 179 fs = ip->i_fs; 180 #ifdef DIAGNOSTIC 181 if ((uint)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 182 (uint)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 183 kprintf( 184 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 185 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 186 nsize, fs->fs_fsmnt); 187 panic("ffs_realloccg: bad size"); 188 } 189 if (cred == NOCRED) 190 panic("ffs_realloccg: missing credential"); 191 #endif /* DIAGNOSTIC */ 192 if (cred->cr_uid != 0 && 193 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) 194 goto nospace; 195 if ((bprev = ip->i_db[lbprev]) == 0) { 196 kprintf("dev = %s, bsize = %ld, bprev = %ld, fs = %s\n", 197 devtoname(ip->i_dev), (long)fs->fs_bsize, (long)bprev, 198 fs->fs_fsmnt); 199 panic("ffs_realloccg: bad bprev"); 200 } 201 /* 202 * Allocate the extra space in the buffer. 203 */ 204 error = bread(ITOV(ip), lblktodoff(fs, lbprev), osize, &bp); 205 if (error) { 206 brelse(bp); 207 return (error); 208 } 209 210 if(bp->b_bio2.bio_offset == NOOFFSET) { 211 if( lbprev >= NDADDR) 212 panic("ffs_realloccg: lbprev out of range"); 213 bp->b_bio2.bio_offset = fsbtodoff(fs, bprev); 214 } 215 216 #ifdef QUOTA 217 error = ufs_chkdq(ip, (long)btodb(nsize - osize), cred, 0); 218 if (error) { 219 brelse(bp); 220 return (error); 221 } 222 #endif 223 /* 224 * Check for extension in the existing location. 225 */ 226 cg = dtog(fs, bprev); 227 bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize); 228 if (bno) { 229 if (bp->b_bio2.bio_offset != fsbtodoff(fs, bno)) 230 panic("ffs_realloccg: bad blockno"); 231 ip->i_blocks += btodb(nsize - osize); 232 ip->i_flag |= IN_CHANGE | IN_UPDATE; 233 allocbuf(bp, nsize); 234 bzero((char *)bp->b_data + osize, (uint)nsize - osize); 235 *bpp = bp; 236 return (0); 237 } 238 /* 239 * Allocate a new disk location. 240 */ 241 if (bpref >= fs->fs_size) 242 bpref = 0; 243 switch ((int)fs->fs_optim) { 244 case FS_OPTSPACE: 245 /* 246 * Allocate an exact sized fragment. Although this makes 247 * best use of space, we will waste time relocating it if 248 * the file continues to grow. If the fragmentation is 249 * less than half of the minimum free reserve, we choose 250 * to begin optimizing for time. 251 */ 252 request = nsize; 253 if (fs->fs_minfree <= 5 || 254 fs->fs_cstotal.cs_nffree > 255 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 256 break; 257 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 258 fs->fs_fsmnt); 259 fs->fs_optim = FS_OPTTIME; 260 break; 261 case FS_OPTTIME: 262 /* 263 * At this point we have discovered a file that is trying to 264 * grow a small fragment to a larger fragment. To save time, 265 * we allocate a full sized block, then free the unused portion. 266 * If the file continues to grow, the `ffs_fragextend' call 267 * above will be able to grow it in place without further 268 * copying. If aberrant programs cause disk fragmentation to 269 * grow within 2% of the free reserve, we choose to begin 270 * optimizing for space. 271 */ 272 request = fs->fs_bsize; 273 if (fs->fs_cstotal.cs_nffree < 274 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 275 break; 276 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 277 fs->fs_fsmnt); 278 fs->fs_optim = FS_OPTSPACE; 279 break; 280 default: 281 kprintf("dev = %s, optim = %ld, fs = %s\n", 282 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 283 panic("ffs_realloccg: bad optim"); 284 /* NOTREACHED */ 285 } 286 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 287 ffs_alloccg); 288 if (bno > 0) { 289 bp->b_bio2.bio_offset = fsbtodoff(fs, bno); 290 if (!DOINGSOFTDEP(ITOV(ip))) 291 ffs_blkfree(ip, bprev, (long)osize); 292 if (nsize < request) 293 ffs_blkfree(ip, bno + numfrags(fs, nsize), 294 (long)(request - nsize)); 295 ip->i_blocks += btodb(nsize - osize); 296 ip->i_flag |= IN_CHANGE | IN_UPDATE; 297 allocbuf(bp, nsize); 298 bzero((char *)bp->b_data + osize, (uint)nsize - osize); 299 *bpp = bp; 300 return (0); 301 } 302 #ifdef QUOTA 303 /* 304 * Restore user's disk quota because allocation failed. 305 */ 306 (void) ufs_chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 307 #endif 308 brelse(bp); 309 nospace: 310 /* 311 * no space available 312 */ 313 ffs_fserr(fs, cred->cr_uid, "filesystem full"); 314 uprintf("\n%s: write failed, filesystem is full\n", fs->fs_fsmnt); 315 return (ENOSPC); 316 } 317 318 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 319 320 /* 321 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 322 * 323 * The vnode and an array of buffer pointers for a range of sequential 324 * logical blocks to be made contiguous is given. The allocator attempts 325 * to find a range of sequential blocks starting as close as possible to 326 * an fs_rotdelay offset from the end of the allocation for the logical 327 * block immediately preceeding the current range. If successful, the 328 * physical block numbers in the buffer pointers and in the inode are 329 * changed to reflect the new allocation. If unsuccessful, the allocation 330 * is left unchanged. The success in doing the reallocation is returned. 331 * Note that the error return is not reflected back to the user. Rather 332 * the previous block allocation will be used. 333 */ 334 static int doasyncfree = 1; 335 SYSCTL_INT(_vfs_ffs, FFS_ASYNCFREE, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 336 337 static int doreallocblks = 1; 338 SYSCTL_INT(_vfs_ffs, FFS_REALLOCBLKS, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 339 340 #ifdef DEBUG 341 static volatile int prtrealloc = 0; 342 #endif 343 344 /* 345 * ffs_reallocblks(struct vnode *a_vp, struct cluster_save *a_buflist) 346 */ 347 int 348 ffs_reallocblks(struct vop_reallocblks_args *ap) 349 { 350 struct fs *fs; 351 struct inode *ip; 352 struct vnode *vp; 353 struct buf *sbp, *ebp; 354 ufs_daddr_t *bap, *sbap, *ebap = NULL; 355 struct cluster_save *buflist; 356 ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno; 357 #ifdef DIAGNOSTIC 358 off_t boffset; 359 #endif 360 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 361 int i, len, slen, start_lvl, end_lvl, pref, ssize; 362 363 if (doreallocblks == 0) 364 return (ENOSPC); 365 vp = ap->a_vp; 366 ip = VTOI(vp); 367 fs = ip->i_fs; 368 if (fs->fs_contigsumsize <= 0) 369 return (ENOSPC); 370 buflist = ap->a_buflist; 371 len = buflist->bs_nchildren; 372 start_lbn = lblkno(fs, buflist->bs_children[0]->b_loffset); 373 end_lbn = start_lbn + len - 1; 374 #ifdef DIAGNOSTIC 375 for (i = 0; i < len; i++) 376 if (!ffs_checkblk(ip, 377 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 378 panic("ffs_reallocblks: unallocated block 1"); 379 for (i = 1; i < len; i++) { 380 if (buflist->bs_children[i]->b_loffset != lblktodoff(fs, start_lbn) + lblktodoff(fs, i)) 381 panic("ffs_reallocblks: non-logical cluster"); 382 } 383 boffset = buflist->bs_children[0]->b_bio2.bio_offset; 384 ssize = (int)fsbtodoff(fs, fs->fs_frag); 385 for (i = 1; i < len - 1; i++) 386 if (buflist->bs_children[i]->b_bio2.bio_offset != boffset + (i * ssize)) 387 panic("ffs_reallocblks: non-physical cluster %d", i); 388 #endif 389 /* 390 * If the latest allocation is in a new cylinder group, assume that 391 * the filesystem has decided to move and do not force it back to 392 * the previous cylinder group. 393 */ 394 if (dtog(fs, dofftofsb(fs, buflist->bs_children[0]->b_bio2.bio_offset)) != 395 dtog(fs, dofftofsb(fs, buflist->bs_children[len - 1]->b_bio2.bio_offset))) 396 return (ENOSPC); 397 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 398 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 399 return (ENOSPC); 400 /* 401 * Get the starting offset and block map for the first block and 402 * the number of blocks that will fit into sbap starting at soff. 403 */ 404 if (start_lvl == 0) { 405 sbap = &ip->i_db[0]; 406 soff = start_lbn; 407 slen = NDADDR - soff; 408 } else { 409 idp = &start_ap[start_lvl - 1]; 410 if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->fs_bsize, &sbp)) { 411 brelse(sbp); 412 return (ENOSPC); 413 } 414 sbap = (ufs_daddr_t *)sbp->b_data; 415 soff = idp->in_off; 416 slen = fs->fs_nindir - soff; 417 } 418 /* 419 * Find the preferred location for the cluster. 420 */ 421 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 422 423 /* 424 * If the block range spans two block maps, get the second map. 425 */ 426 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 427 ssize = len; 428 } else { 429 #ifdef DIAGNOSTIC 430 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 431 panic("ffs_reallocblk: start == end"); 432 #endif 433 ssize = len - (idp->in_off + 1); 434 if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->fs_bsize, &ebp)) 435 goto fail; 436 ebap = (ufs_daddr_t *)ebp->b_data; 437 } 438 439 /* 440 * Make sure we aren't spanning more then two blockmaps. ssize is 441 * our calculation of the span we have to scan in the first blockmap, 442 * while slen is our calculation of the number of entries available 443 * in the first blockmap (from soff). 444 */ 445 if (ssize > slen) { 446 panic("ffs_reallocblks: range spans more then two blockmaps!" 447 " start_lbn %ld len %d (%d/%d)", 448 (long)start_lbn, len, slen, ssize); 449 } 450 /* 451 * Search the block map looking for an allocation of the desired size. 452 */ 453 if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 454 len, ffs_clusteralloc)) == 0) 455 goto fail; 456 /* 457 * We have found a new contiguous block. 458 * 459 * First we have to replace the old block pointers with the new 460 * block pointers in the inode and indirect blocks associated 461 * with the file. 462 */ 463 #ifdef DEBUG 464 if (prtrealloc) 465 kprintf("realloc: ino %ju, lbns %d-%d\n\told:", 466 (uintmax_t)ip->i_number, start_lbn, end_lbn); 467 #endif 468 blkno = newblk; 469 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 470 if (i == ssize) { 471 bap = ebap; 472 soff = -i; 473 } 474 #ifdef DIAGNOSTIC 475 if (!ffs_checkblk(ip, 476 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 477 panic("ffs_reallocblks: unallocated block 2"); 478 if (dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset) != *bap) 479 panic("ffs_reallocblks: alloc mismatch"); 480 #endif 481 #ifdef DEBUG 482 if (prtrealloc) 483 kprintf(" %d,", *bap); 484 #endif 485 if (DOINGSOFTDEP(vp)) { 486 if (sbap == &ip->i_db[0] && i < ssize) 487 softdep_setup_allocdirect(ip, start_lbn + i, 488 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 489 buflist->bs_children[i]); 490 else 491 softdep_setup_allocindir_page(ip, start_lbn + i, 492 i < ssize ? sbp : ebp, soff + i, blkno, 493 *bap, buflist->bs_children[i]); 494 } 495 *bap++ = blkno; 496 } 497 /* 498 * Next we must write out the modified inode and indirect blocks. 499 * For strict correctness, the writes should be synchronous since 500 * the old block values may have been written to disk. In practise 501 * they are almost never written, but if we are concerned about 502 * strict correctness, the `doasyncfree' flag should be set to zero. 503 * 504 * The test on `doasyncfree' should be changed to test a flag 505 * that shows whether the associated buffers and inodes have 506 * been written. The flag should be set when the cluster is 507 * started and cleared whenever the buffer or inode is flushed. 508 * We can then check below to see if it is set, and do the 509 * synchronous write only when it has been cleared. 510 */ 511 if (sbap != &ip->i_db[0]) { 512 if (doasyncfree) 513 bdwrite(sbp); 514 else 515 bwrite(sbp); 516 } else { 517 ip->i_flag |= IN_CHANGE | IN_UPDATE; 518 if (!doasyncfree) 519 ffs_update(vp, 1); 520 } 521 if (ssize < len) { 522 if (doasyncfree) 523 bdwrite(ebp); 524 else 525 bwrite(ebp); 526 } 527 /* 528 * Last, free the old blocks and assign the new blocks to the buffers. 529 */ 530 #ifdef DEBUG 531 if (prtrealloc) 532 kprintf("\n\tnew:"); 533 #endif 534 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 535 if (!DOINGSOFTDEP(vp) && 536 buflist->bs_children[i]->b_bio2.bio_offset != NOOFFSET) { 537 ffs_blkfree(ip, 538 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), 539 fs->fs_bsize); 540 } 541 buflist->bs_children[i]->b_bio2.bio_offset = fsbtodoff(fs, blkno); 542 #ifdef DIAGNOSTIC 543 if (!ffs_checkblk(ip, 544 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 545 panic("ffs_reallocblks: unallocated block 3"); 546 #endif 547 #ifdef DEBUG 548 if (prtrealloc) 549 kprintf(" %d,", blkno); 550 #endif 551 } 552 #ifdef DEBUG 553 if (prtrealloc) { 554 prtrealloc--; 555 kprintf("\n"); 556 } 557 #endif 558 return (0); 559 560 fail: 561 if (ssize < len) 562 brelse(ebp); 563 if (sbap != &ip->i_db[0]) 564 brelse(sbp); 565 return (ENOSPC); 566 } 567 568 /* 569 * Allocate an inode in the filesystem. 570 * 571 * If allocating a directory, use ffs_dirpref to select the inode. 572 * If allocating in a directory, the following hierarchy is followed: 573 * 1) allocate the preferred inode. 574 * 2) allocate an inode in the same cylinder group. 575 * 3) quadradically rehash into other cylinder groups, until an 576 * available inode is located. 577 * If no inode preference is given the following heirarchy is used 578 * to allocate an inode: 579 * 1) allocate an inode in cylinder group 0. 580 * 2) quadradically rehash into other cylinder groups, until an 581 * available inode is located. 582 */ 583 int 584 ffs_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) 585 { 586 struct inode *pip; 587 struct fs *fs; 588 struct inode *ip; 589 ino_t ino, ipref; 590 int cg, error; 591 592 *vpp = NULL; 593 pip = VTOI(pvp); 594 fs = pip->i_fs; 595 if (fs->fs_cstotal.cs_nifree == 0) 596 goto noinodes; 597 598 if ((mode & IFMT) == IFDIR) 599 ipref = ffs_dirpref(pip); 600 else 601 ipref = pip->i_number; 602 if (ipref >= fs->fs_ncg * fs->fs_ipg) 603 ipref = 0; 604 cg = ino_to_cg(fs, ipref); 605 /* 606 * Track number of dirs created one after another 607 * in a same cg without intervening by files. 608 */ 609 if ((mode & IFMT) == IFDIR) { 610 if (fs->fs_contigdirs[cg] < 255) 611 fs->fs_contigdirs[cg]++; 612 } else { 613 if (fs->fs_contigdirs[cg] > 0) 614 fs->fs_contigdirs[cg]--; 615 } 616 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, 617 (allocfcn_t *)ffs_nodealloccg); 618 if (ino == 0) 619 goto noinodes; 620 error = VFS_VGET(pvp->v_mount, NULL, ino, vpp); 621 if (error) { 622 ffs_vfree(pvp, ino, mode); 623 return (error); 624 } 625 ip = VTOI(*vpp); 626 if (ip->i_mode) { 627 kprintf("mode = 0%o, inum = %lu, fs = %s\n", 628 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 629 panic("ffs_valloc: dup alloc"); 630 } 631 if (ip->i_blocks) { /* XXX */ 632 kprintf("free inode %s/%lu had %ld blocks\n", 633 fs->fs_fsmnt, (u_long)ino, (long)ip->i_blocks); 634 ip->i_blocks = 0; 635 } 636 ip->i_flags = 0; 637 /* 638 * Set up a new generation number for this inode. 639 */ 640 if (ip->i_gen == 0 || ++ip->i_gen == 0) 641 ip->i_gen = krandom() / 2 + 1; 642 return (0); 643 noinodes: 644 ffs_fserr(fs, cred->cr_uid, "out of inodes"); 645 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 646 return (ENOSPC); 647 } 648 649 /* 650 * Find a cylinder group to place a directory. 651 * 652 * The policy implemented by this algorithm is to allocate a 653 * directory inode in the same cylinder group as its parent 654 * directory, but also to reserve space for its files inodes 655 * and data. Restrict the number of directories which may be 656 * allocated one after another in the same cylinder group 657 * without intervening allocation of files. 658 * 659 * If we allocate a first level directory then force allocation 660 * in another cylinder group. 661 */ 662 static ino_t 663 ffs_dirpref(struct inode *pip) 664 { 665 struct fs *fs; 666 int cg, prefcg, dirsize, cgsize; 667 int64_t dirsize64; 668 int avgifree, avgbfree, avgndir, curdirsize; 669 int minifree, minbfree, maxndir; 670 int mincg, minndir; 671 int maxcontigdirs; 672 673 fs = pip->i_fs; 674 675 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 676 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 677 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 678 679 /* 680 * Force allocation in another cg if creating a first level dir. 681 */ 682 if (ITOV(pip)->v_flag & VROOT) { 683 prefcg = karc4random() % fs->fs_ncg; 684 mincg = prefcg; 685 minndir = fs->fs_ipg; 686 for (cg = prefcg; cg < fs->fs_ncg; cg++) 687 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 688 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 689 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 690 mincg = cg; 691 minndir = fs->fs_cs(fs, cg).cs_ndir; 692 } 693 for (cg = 0; cg < prefcg; cg++) 694 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 695 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 696 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 697 mincg = cg; 698 minndir = fs->fs_cs(fs, cg).cs_ndir; 699 } 700 return ((ino_t)(fs->fs_ipg * mincg)); 701 } 702 703 /* 704 * Count various limits which used for 705 * optimal allocation of a directory inode. 706 */ 707 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 708 minifree = avgifree - avgifree / 4; 709 if (minifree < 1) 710 minifree = 1; 711 minbfree = avgbfree - avgbfree / 4; 712 if (minbfree < 1) 713 minbfree = 1; 714 cgsize = fs->fs_fsize * fs->fs_fpg; 715 716 /* 717 * fs_avgfilesize and fs_avgfpdir are user-settable entities and 718 * multiplying them may overflow a 32 bit integer. 719 */ 720 dirsize64 = fs->fs_avgfilesize * (int64_t)fs->fs_avgfpdir; 721 if (dirsize64 > 0x7fffffff) { 722 maxcontigdirs = 1; 723 } else { 724 dirsize = (int)dirsize64; 725 curdirsize = avgndir ? 726 (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 727 if (dirsize < curdirsize) 728 dirsize = curdirsize; 729 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 730 if (fs->fs_avgfpdir > 0) 731 maxcontigdirs = min(maxcontigdirs, 732 fs->fs_ipg / fs->fs_avgfpdir); 733 if (maxcontigdirs == 0) 734 maxcontigdirs = 1; 735 } 736 737 /* 738 * Limit number of dirs in one cg and reserve space for 739 * regular files, but only if we have no deficit in 740 * inodes or space. 741 */ 742 prefcg = ino_to_cg(fs, pip->i_number); 743 for (cg = prefcg; cg < fs->fs_ncg; cg++) 744 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 745 fs->fs_cs(fs, cg).cs_nifree >= minifree && 746 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 747 if (fs->fs_contigdirs[cg] < maxcontigdirs) 748 return ((ino_t)(fs->fs_ipg * cg)); 749 } 750 for (cg = 0; cg < prefcg; cg++) 751 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 752 fs->fs_cs(fs, cg).cs_nifree >= minifree && 753 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 754 if (fs->fs_contigdirs[cg] < maxcontigdirs) 755 return ((ino_t)(fs->fs_ipg * cg)); 756 } 757 /* 758 * This is a backstop when we have deficit in space. 759 */ 760 for (cg = prefcg; cg < fs->fs_ncg; cg++) 761 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 762 return ((ino_t)(fs->fs_ipg * cg)); 763 for (cg = 0; cg < prefcg; cg++) 764 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 765 break; 766 return ((ino_t)(fs->fs_ipg * cg)); 767 } 768 769 /* 770 * Select the desired position for the next block in a file. The file is 771 * logically divided into sections. The first section is composed of the 772 * direct blocks. Each additional section contains fs_maxbpg blocks. 773 * 774 * If no blocks have been allocated in the first section, the policy is to 775 * request a block in the same cylinder group as the inode that describes 776 * the file. If no blocks have been allocated in any other section, the 777 * policy is to place the section in a cylinder group with a greater than 778 * average number of free blocks. An appropriate cylinder group is found 779 * by using a rotor that sweeps the cylinder groups. When a new group of 780 * blocks is needed, the sweep begins in the cylinder group following the 781 * cylinder group from which the previous allocation was made. The sweep 782 * continues until a cylinder group with greater than the average number 783 * of free blocks is found. If the allocation is for the first block in an 784 * indirect block, the information on the previous allocation is unavailable; 785 * here a best guess is made based upon the logical block number being 786 * allocated. 787 * 788 * If a section is already partially allocated, the policy is to 789 * contiguously allocate fs_maxcontig blocks. The end of one of these 790 * contiguous blocks and the beginning of the next is physically separated 791 * so that the disk head will be in transit between them for at least 792 * fs_rotdelay milliseconds. This is to allow time for the processor to 793 * schedule another I/O transfer. 794 */ 795 ufs_daddr_t 796 ffs_blkpref(struct inode *ip, ufs_daddr_t lbn, int indx, ufs_daddr_t *bap) 797 { 798 struct fs *fs; 799 int cg; 800 int avgbfree, startcg; 801 ufs_daddr_t nextblk; 802 803 fs = ip->i_fs; 804 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 805 if (lbn < NDADDR + NINDIR(fs)) { 806 cg = ino_to_cg(fs, ip->i_number); 807 return (fs->fs_fpg * cg + fs->fs_frag); 808 } 809 /* 810 * Find a cylinder with greater than average number of 811 * unused data blocks. 812 */ 813 if (indx == 0 || bap[indx - 1] == 0) 814 startcg = 815 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 816 else 817 startcg = dtog(fs, bap[indx - 1]) + 1; 818 startcg %= fs->fs_ncg; 819 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 820 for (cg = startcg; cg < fs->fs_ncg; cg++) 821 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 822 fs->fs_cgrotor = cg; 823 return (fs->fs_fpg * cg + fs->fs_frag); 824 } 825 for (cg = 0; cg <= startcg; cg++) 826 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 827 fs->fs_cgrotor = cg; 828 return (fs->fs_fpg * cg + fs->fs_frag); 829 } 830 return (0); 831 } 832 /* 833 * One or more previous blocks have been laid out. If less 834 * than fs_maxcontig previous blocks are contiguous, the 835 * next block is requested contiguously, otherwise it is 836 * requested rotationally delayed by fs_rotdelay milliseconds. 837 */ 838 nextblk = bap[indx - 1] + fs->fs_frag; 839 if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig || 840 bap[indx - fs->fs_maxcontig] + 841 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 842 return (nextblk); 843 /* 844 * Here we convert ms of delay to frags as: 845 * (frags) = (ms) * (rev/sec) * (sect/rev) / 846 * ((sect/frag) * (ms/sec)) 847 * then round up to the next block. 848 */ 849 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 850 (NSPF(fs) * 1000), fs->fs_frag); 851 return (nextblk); 852 } 853 854 /* 855 * Implement the cylinder overflow algorithm. 856 * 857 * The policy implemented by this algorithm is: 858 * 1) allocate the block in its requested cylinder group. 859 * 2) quadradically rehash on the cylinder group number. 860 * 3) brute force search for a free block. 861 */ 862 /*VARARGS5*/ 863 static u_long 864 ffs_hashalloc(struct inode *ip, int cg, long pref, 865 int size, /* size for data blocks, mode for inodes */ 866 allocfcn_t *allocator) 867 { 868 struct fs *fs; 869 long result; /* XXX why not same type as we return? */ 870 int i, icg = cg; 871 872 fs = ip->i_fs; 873 /* 874 * 1: preferred cylinder group 875 */ 876 result = (*allocator)(ip, cg, pref, size); 877 if (result) 878 return (result); 879 /* 880 * 2: quadratic rehash 881 */ 882 for (i = 1; i < fs->fs_ncg; i *= 2) { 883 cg += i; 884 if (cg >= fs->fs_ncg) 885 cg -= fs->fs_ncg; 886 result = (*allocator)(ip, cg, 0, size); 887 if (result) 888 return (result); 889 } 890 /* 891 * 3: brute force search 892 * Note that we start at i == 2, since 0 was checked initially, 893 * and 1 is always checked in the quadratic rehash. 894 */ 895 cg = (icg + 2) % fs->fs_ncg; 896 for (i = 2; i < fs->fs_ncg; i++) { 897 result = (*allocator)(ip, cg, 0, size); 898 if (result) 899 return (result); 900 cg++; 901 if (cg == fs->fs_ncg) 902 cg = 0; 903 } 904 return (0); 905 } 906 907 /* 908 * Determine whether a fragment can be extended. 909 * 910 * Check to see if the necessary fragments are available, and 911 * if they are, allocate them. 912 */ 913 static ufs_daddr_t 914 ffs_fragextend(struct inode *ip, int cg, long bprev, int osize, int nsize) 915 { 916 struct fs *fs; 917 struct cg *cgp; 918 struct buf *bp; 919 long bno; 920 int frags, bbase; 921 int i, error; 922 uint8_t *blksfree; 923 924 fs = ip->i_fs; 925 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 926 return (0); 927 frags = numfrags(fs, nsize); 928 bbase = fragnum(fs, bprev); 929 if (bbase > fragnum(fs, (bprev + frags - 1))) { 930 /* cannot extend across a block boundary */ 931 return (0); 932 } 933 KKASSERT(blknum(fs, bprev) == blknum(fs, bprev + frags - 1)); 934 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 935 (int)fs->fs_cgsize, &bp); 936 if (error) { 937 brelse(bp); 938 return (0); 939 } 940 cgp = (struct cg *)bp->b_data; 941 if (!cg_chkmagic(cgp)) { 942 brelse(bp); 943 return (0); 944 } 945 cgp->cg_time = time_second; 946 bno = dtogd(fs, bprev); 947 blksfree = cg_blksfree(cgp); 948 for (i = numfrags(fs, osize); i < frags; i++) { 949 if (isclr(blksfree, bno + i)) { 950 brelse(bp); 951 return (0); 952 } 953 } 954 955 /* 956 * the current fragment can be extended 957 * deduct the count on fragment being extended into 958 * increase the count on the remaining fragment (if any) 959 * allocate the extended piece 960 * 961 * ---oooooooooonnnnnnn111---- 962 * [-----frags-----] 963 * ^ ^ 964 * bbase fs_frag 965 */ 966 for (i = frags; i < fs->fs_frag - bbase; i++) { 967 if (isclr(blksfree, bno + i)) 968 break; 969 } 970 971 /* 972 * Size of original free frag is [i - numfrags(fs, osize)] 973 * Size of remaining free frag is [i - frags] 974 */ 975 cgp->cg_frsum[i - numfrags(fs, osize)]--; 976 if (i != frags) 977 cgp->cg_frsum[i - frags]++; 978 for (i = numfrags(fs, osize); i < frags; i++) { 979 clrbit(blksfree, bno + i); 980 cgp->cg_cs.cs_nffree--; 981 fs->fs_cstotal.cs_nffree--; 982 fs->fs_cs(fs, cg).cs_nffree--; 983 } 984 fs->fs_fmod = 1; 985 if (DOINGSOFTDEP(ITOV(ip))) 986 softdep_setup_blkmapdep(bp, fs, bprev); 987 bdwrite(bp); 988 return (bprev); 989 } 990 991 /* 992 * Determine whether a block can be allocated. 993 * 994 * Check to see if a block of the appropriate size is available, 995 * and if it is, allocate it. 996 */ 997 static ufs_daddr_t 998 ffs_alloccg(struct inode *ip, int cg, ufs_daddr_t bpref, int size) 999 { 1000 struct fs *fs; 1001 struct cg *cgp; 1002 struct buf *bp; 1003 int i; 1004 ufs_daddr_t bno, blkno; 1005 int allocsiz, error, frags; 1006 uint8_t *blksfree; 1007 1008 fs = ip->i_fs; 1009 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1010 return (0); 1011 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1012 (int)fs->fs_cgsize, &bp); 1013 if (error) { 1014 brelse(bp); 1015 return (0); 1016 } 1017 cgp = (struct cg *)bp->b_data; 1018 if (!cg_chkmagic(cgp) || 1019 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 1020 brelse(bp); 1021 return (0); 1022 } 1023 cgp->cg_time = time_second; 1024 if (size == fs->fs_bsize) { 1025 bno = ffs_alloccgblk(ip, bp, bpref); 1026 bdwrite(bp); 1027 return (bno); 1028 } 1029 /* 1030 * Check to see if any fragments of sufficient size are already 1031 * available. Fit the data into a larger fragment if necessary, 1032 * before allocating a whole new block. 1033 */ 1034 blksfree = cg_blksfree(cgp); 1035 frags = numfrags(fs, size); 1036 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) { 1037 if (cgp->cg_frsum[allocsiz] != 0) 1038 break; 1039 } 1040 if (allocsiz == fs->fs_frag) { 1041 /* 1042 * No fragments were available, allocate a whole block and 1043 * cut the requested fragment (of size frags) out of it. 1044 */ 1045 if (cgp->cg_cs.cs_nbfree == 0) { 1046 brelse(bp); 1047 return (0); 1048 } 1049 bno = ffs_alloccgblk(ip, bp, bpref); 1050 bpref = dtogd(fs, bno); 1051 for (i = frags; i < fs->fs_frag; i++) 1052 setbit(blksfree, bpref + i); 1053 1054 /* 1055 * Calculate the number of free frags still remaining after 1056 * we have cut out the requested allocation. Indicate that 1057 * a fragment of that size is now available for future 1058 * allocation. 1059 */ 1060 i = fs->fs_frag - frags; 1061 cgp->cg_cs.cs_nffree += i; 1062 fs->fs_cstotal.cs_nffree += i; 1063 fs->fs_cs(fs, cg).cs_nffree += i; 1064 fs->fs_fmod = 1; 1065 cgp->cg_frsum[i]++; 1066 bdwrite(bp); 1067 return (bno); 1068 } 1069 1070 /* 1071 * cg_frsum[] has told us that a free fragment of allocsiz size is 1072 * available. Find it, then clear the bitmap bits associated with 1073 * the size we want. 1074 */ 1075 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1076 if (bno < 0) { 1077 brelse(bp); 1078 return (0); 1079 } 1080 for (i = 0; i < frags; i++) 1081 clrbit(blksfree, bno + i); 1082 cgp->cg_cs.cs_nffree -= frags; 1083 fs->fs_cstotal.cs_nffree -= frags; 1084 fs->fs_cs(fs, cg).cs_nffree -= frags; 1085 fs->fs_fmod = 1; 1086 1087 /* 1088 * Account for the allocation. The original searched size that we 1089 * found is no longer available. If we cut out a smaller piece then 1090 * a smaller fragment is now available. 1091 */ 1092 cgp->cg_frsum[allocsiz]--; 1093 if (frags != allocsiz) 1094 cgp->cg_frsum[allocsiz - frags]++; 1095 blkno = cg * fs->fs_fpg + bno; 1096 if (DOINGSOFTDEP(ITOV(ip))) 1097 softdep_setup_blkmapdep(bp, fs, blkno); 1098 bdwrite(bp); 1099 return ((u_long)blkno); 1100 } 1101 1102 /* 1103 * Allocate a block in a cylinder group. 1104 * 1105 * This algorithm implements the following policy: 1106 * 1) allocate the requested block. 1107 * 2) allocate a rotationally optimal block in the same cylinder. 1108 * 3) allocate the next available block on the block rotor for the 1109 * specified cylinder group. 1110 * Note that this routine only allocates fs_bsize blocks; these 1111 * blocks may be fragmented by the routine that allocates them. 1112 */ 1113 static ufs_daddr_t 1114 ffs_alloccgblk(struct inode *ip, struct buf *bp, ufs_daddr_t bpref) 1115 { 1116 struct fs *fs; 1117 struct cg *cgp; 1118 ufs_daddr_t bno, blkno; 1119 int cylno, pos, delta; 1120 short *cylbp; 1121 int i; 1122 uint8_t *blksfree; 1123 1124 fs = ip->i_fs; 1125 cgp = (struct cg *)bp->b_data; 1126 blksfree = cg_blksfree(cgp); 1127 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1128 bpref = cgp->cg_rotor; 1129 goto norot; 1130 } 1131 bpref = blknum(fs, bpref); 1132 bpref = dtogd(fs, bpref); 1133 /* 1134 * if the requested block is available, use it 1135 */ 1136 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bpref))) { 1137 bno = bpref; 1138 goto gotit; 1139 } 1140 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 1141 /* 1142 * Block layout information is not available. 1143 * Leaving bpref unchanged means we take the 1144 * next available free block following the one 1145 * we just allocated. Hopefully this will at 1146 * least hit a track cache on drives of unknown 1147 * geometry (e.g. SCSI). 1148 */ 1149 goto norot; 1150 } 1151 /* 1152 * check for a block available on the same cylinder 1153 */ 1154 cylno = cbtocylno(fs, bpref); 1155 if (cg_blktot(cgp)[cylno] == 0) 1156 goto norot; 1157 /* 1158 * check the summary information to see if a block is 1159 * available in the requested cylinder starting at the 1160 * requested rotational position and proceeding around. 1161 */ 1162 cylbp = cg_blks(fs, cgp, cylno); 1163 pos = cbtorpos(fs, bpref); 1164 for (i = pos; i < fs->fs_nrpos; i++) 1165 if (cylbp[i] > 0) 1166 break; 1167 if (i == fs->fs_nrpos) 1168 for (i = 0; i < pos; i++) 1169 if (cylbp[i] > 0) 1170 break; 1171 if (cylbp[i] > 0) { 1172 /* 1173 * found a rotational position, now find the actual 1174 * block. A panic if none is actually there. 1175 */ 1176 pos = cylno % fs->fs_cpc; 1177 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 1178 if (fs_postbl(fs, pos)[i] == -1) { 1179 kprintf("pos = %d, i = %d, fs = %s\n", 1180 pos, i, fs->fs_fsmnt); 1181 panic("ffs_alloccgblk: cyl groups corrupted"); 1182 } 1183 for (i = fs_postbl(fs, pos)[i];; ) { 1184 if (ffs_isblock(fs, blksfree, bno + i)) { 1185 bno = blkstofrags(fs, (bno + i)); 1186 goto gotit; 1187 } 1188 delta = fs_rotbl(fs)[i]; 1189 if (delta <= 0 || 1190 delta + i > fragstoblks(fs, fs->fs_fpg)) 1191 break; 1192 i += delta; 1193 } 1194 kprintf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 1195 panic("ffs_alloccgblk: can't find blk in cyl"); 1196 } 1197 norot: 1198 /* 1199 * no blocks in the requested cylinder, so take next 1200 * available one in this cylinder group. 1201 */ 1202 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1203 if (bno < 0) 1204 return (0); 1205 cgp->cg_rotor = bno; 1206 gotit: 1207 blkno = fragstoblks(fs, bno); 1208 ffs_clrblock(fs, blksfree, (long)blkno); 1209 ffs_clusteracct(fs, cgp, blkno, -1); 1210 cgp->cg_cs.cs_nbfree--; 1211 fs->fs_cstotal.cs_nbfree--; 1212 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1213 cylno = cbtocylno(fs, bno); 1214 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 1215 cg_blktot(cgp)[cylno]--; 1216 fs->fs_fmod = 1; 1217 blkno = cgp->cg_cgx * fs->fs_fpg + bno; 1218 if (DOINGSOFTDEP(ITOV(ip))) 1219 softdep_setup_blkmapdep(bp, fs, blkno); 1220 return (blkno); 1221 } 1222 1223 /* 1224 * Determine whether a cluster can be allocated. 1225 * 1226 * We do not currently check for optimal rotational layout if there 1227 * are multiple choices in the same cylinder group. Instead we just 1228 * take the first one that we find following bpref. 1229 */ 1230 static ufs_daddr_t 1231 ffs_clusteralloc(struct inode *ip, int cg, ufs_daddr_t bpref, int len) 1232 { 1233 struct fs *fs; 1234 struct cg *cgp; 1235 struct buf *bp; 1236 int i, got, run, bno, bit, map; 1237 u_char *mapp; 1238 int32_t *lp; 1239 uint8_t *blksfree; 1240 1241 fs = ip->i_fs; 1242 if (fs->fs_maxcluster[cg] < len) 1243 return (0); 1244 if (bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1245 (int)fs->fs_cgsize, &bp)) { 1246 goto fail; 1247 } 1248 cgp = (struct cg *)bp->b_data; 1249 if (!cg_chkmagic(cgp)) 1250 goto fail; 1251 1252 /* 1253 * Check to see if a cluster of the needed size (or bigger) is 1254 * available in this cylinder group. 1255 */ 1256 lp = &cg_clustersum(cgp)[len]; 1257 for (i = len; i <= fs->fs_contigsumsize; i++) 1258 if (*lp++ > 0) 1259 break; 1260 if (i > fs->fs_contigsumsize) { 1261 /* 1262 * This is the first time looking for a cluster in this 1263 * cylinder group. Update the cluster summary information 1264 * to reflect the true maximum sized cluster so that 1265 * future cluster allocation requests can avoid reading 1266 * the cylinder group map only to find no clusters. 1267 */ 1268 lp = &cg_clustersum(cgp)[len - 1]; 1269 for (i = len - 1; i > 0; i--) 1270 if (*lp-- > 0) 1271 break; 1272 fs->fs_maxcluster[cg] = i; 1273 goto fail; 1274 } 1275 /* 1276 * Search the cluster map to find a big enough cluster. 1277 * We take the first one that we find, even if it is larger 1278 * than we need as we prefer to get one close to the previous 1279 * block allocation. We do not search before the current 1280 * preference point as we do not want to allocate a block 1281 * that is allocated before the previous one (as we will 1282 * then have to wait for another pass of the elevator 1283 * algorithm before it will be read). We prefer to fail and 1284 * be recalled to try an allocation in the next cylinder group. 1285 */ 1286 if (dtog(fs, bpref) != cg) 1287 bpref = 0; 1288 else 1289 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1290 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1291 map = *mapp++; 1292 bit = 1 << (bpref % NBBY); 1293 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1294 if ((map & bit) == 0) { 1295 run = 0; 1296 } else { 1297 run++; 1298 if (run == len) 1299 break; 1300 } 1301 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1302 bit <<= 1; 1303 } else { 1304 map = *mapp++; 1305 bit = 1; 1306 } 1307 } 1308 if (got >= cgp->cg_nclusterblks) 1309 goto fail; 1310 /* 1311 * Allocate the cluster that we have found. 1312 */ 1313 blksfree = cg_blksfree(cgp); 1314 for (i = 1; i <= len; i++) { 1315 if (!ffs_isblock(fs, blksfree, got - run + i)) 1316 panic("ffs_clusteralloc: map mismatch"); 1317 } 1318 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1319 if (dtog(fs, bno) != cg) 1320 panic("ffs_clusteralloc: allocated out of group"); 1321 len = blkstofrags(fs, len); 1322 for (i = 0; i < len; i += fs->fs_frag) { 1323 if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i) 1324 panic("ffs_clusteralloc: lost block"); 1325 } 1326 bdwrite(bp); 1327 return (bno); 1328 1329 fail: 1330 brelse(bp); 1331 return (0); 1332 } 1333 1334 /* 1335 * Determine whether an inode can be allocated. 1336 * 1337 * Check to see if an inode is available, and if it is, 1338 * allocate it using the following policy: 1339 * 1) allocate the requested inode. 1340 * 2) allocate the next available inode after the requested 1341 * inode in the specified cylinder group. 1342 * 3) the inode must not already be in the inode hash table. We 1343 * can encounter such a case because the vnode reclamation sequence 1344 * frees the bit 1345 * 3) the inode must not already be in the inode hash, otherwise it 1346 * may be in the process of being deallocated. This can occur 1347 * because the bitmap is updated before the inode is removed from 1348 * hash. If we were to reallocate the inode the caller could wind 1349 * up returning a vnode/inode combination which is in an indeterminate 1350 * state. 1351 */ 1352 static ino_t 1353 ffs_nodealloccg(struct inode *ip, int cg, ufs_daddr_t ipref, int mode) 1354 { 1355 struct ufsmount *ump; 1356 struct fs *fs; 1357 struct cg *cgp; 1358 struct buf *bp; 1359 uint8_t *inosused; 1360 uint8_t map; 1361 int error, len, arraysize, i; 1362 int icheckmiss; 1363 ufs_daddr_t ibase; 1364 struct vnode *vp; 1365 1366 vp = ITOV(ip); 1367 ump = VFSTOUFS(vp->v_mount); 1368 fs = ip->i_fs; 1369 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1370 return (0); 1371 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1372 (int)fs->fs_cgsize, &bp); 1373 if (error) { 1374 brelse(bp); 1375 return (0); 1376 } 1377 cgp = (struct cg *)bp->b_data; 1378 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1379 brelse(bp); 1380 return (0); 1381 } 1382 inosused = cg_inosused(cgp); 1383 icheckmiss = 0; 1384 1385 /* 1386 * Quick check, reuse the most recently free inode or continue 1387 * a scan from where we left off the last time. 1388 */ 1389 ibase = cg * fs->fs_ipg; 1390 if (ipref) { 1391 ipref %= fs->fs_ipg; 1392 if (isclr(inosused, ipref)) { 1393 if (ufs_ihashcheck(ump, ip->i_dev, ibase + ipref) == 0) 1394 goto gotit; 1395 } 1396 } 1397 1398 /* 1399 * Scan the inode bitmap starting at irotor, be sure to handle 1400 * the edge case by going back to the beginning of the array. 1401 * 1402 * If the number of inodes is not byte-aligned, the unused bits 1403 * should be set to 1. This will be sanity checked in gotit. Note 1404 * that we have to be sure not to overlap the beginning and end 1405 * when irotor is in the middle of a byte as this will cause the 1406 * same bitmap byte to be checked twice. To solve this problem we 1407 * just convert everything to a byte index for the loop. 1408 */ 1409 ipref = (cgp->cg_irotor % fs->fs_ipg) >> 3; /* byte index */ 1410 len = (fs->fs_ipg + 7) >> 3; /* byte size */ 1411 arraysize = len; 1412 1413 while (len > 0) { 1414 map = inosused[ipref]; 1415 if (map != 255) { 1416 for (i = 0; i < NBBY; ++i) { 1417 /* 1418 * If we find a free bit we have to make sure 1419 * that the inode is not in the middle of 1420 * being destroyed. The inode should not exist 1421 * in the inode hash. 1422 * 1423 * Adjust the rotor to try to hit the 1424 * quick-check up above. 1425 */ 1426 if ((map & (1 << i)) == 0) { 1427 if (ufs_ihashcheck(ump, ip->i_dev, ibase + (ipref << 3) + i) == 0) { 1428 ipref = (ipref << 3) + i; 1429 cgp->cg_irotor = (ipref + 1) % fs->fs_ipg; 1430 goto gotit; 1431 } 1432 ++icheckmiss; 1433 } 1434 } 1435 } 1436 1437 /* 1438 * Setup for the next byte, start at the beginning again if 1439 * we hit the end of the array. 1440 */ 1441 if (++ipref == arraysize) 1442 ipref = 0; 1443 --len; 1444 } 1445 if (icheckmiss == cgp->cg_cs.cs_nifree) { 1446 brelse(bp); 1447 return(0); 1448 } 1449 kprintf("fs = %s\n", fs->fs_fsmnt); 1450 panic("ffs_nodealloccg: block not in map, icheckmiss/nfree %d/%d", 1451 icheckmiss, cgp->cg_cs.cs_nifree); 1452 /* NOTREACHED */ 1453 1454 /* 1455 * ipref is a bit index as of the gotit label. 1456 */ 1457 gotit: 1458 KKASSERT(ipref >= 0 && ipref < fs->fs_ipg); 1459 cgp->cg_time = time_second; 1460 if (DOINGSOFTDEP(ITOV(ip))) 1461 softdep_setup_inomapdep(bp, ip, ibase + ipref); 1462 setbit(inosused, ipref); 1463 cgp->cg_cs.cs_nifree--; 1464 fs->fs_cstotal.cs_nifree--; 1465 fs->fs_cs(fs, cg).cs_nifree--; 1466 fs->fs_fmod = 1; 1467 if ((mode & IFMT) == IFDIR) { 1468 cgp->cg_cs.cs_ndir++; 1469 fs->fs_cstotal.cs_ndir++; 1470 fs->fs_cs(fs, cg).cs_ndir++; 1471 } 1472 bdwrite(bp); 1473 return (ibase + ipref); 1474 } 1475 1476 /* 1477 * Free a block or fragment. 1478 * 1479 * The specified block or fragment is placed back in the 1480 * free map. If a fragment is deallocated, a possible 1481 * block reassembly is checked. 1482 */ 1483 void 1484 ffs_blkfree_cg(struct fs * fs, struct vnode * i_devvp, cdev_t i_dev, ino_t i_number, 1485 uint32_t i_din_uid, ufs_daddr_t bno, long size) 1486 { 1487 struct cg *cgp; 1488 struct buf *bp; 1489 ufs_daddr_t blkno; 1490 int i, error, cg, blk, frags, bbase; 1491 uint8_t *blksfree; 1492 1493 VOP_FREEBLKS(i_devvp, fsbtodoff(fs, bno), size); 1494 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1495 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1496 kprintf("dev=%s, bno = %ld, bsize = %ld, size = %ld, fs = %s\n", 1497 devtoname(i_dev), (long)bno, (long)fs->fs_bsize, size, 1498 fs->fs_fsmnt); 1499 panic("ffs_blkfree: bad size"); 1500 } 1501 cg = dtog(fs, bno); 1502 if ((uint)bno >= fs->fs_size) { 1503 kprintf("bad block %ld, ino %lu\n", 1504 (long)bno, (u_long)i_number); 1505 ffs_fserr(fs, i_din_uid, "bad block"); 1506 return; 1507 } 1508 1509 /* 1510 * Load the cylinder group 1511 */ 1512 error = bread(i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1513 (int)fs->fs_cgsize, &bp); 1514 if (error) { 1515 brelse(bp); 1516 return; 1517 } 1518 cgp = (struct cg *)bp->b_data; 1519 if (!cg_chkmagic(cgp)) { 1520 brelse(bp); 1521 return; 1522 } 1523 cgp->cg_time = time_second; 1524 bno = dtogd(fs, bno); 1525 blksfree = cg_blksfree(cgp); 1526 1527 if (size == fs->fs_bsize) { 1528 /* 1529 * Free a whole block 1530 */ 1531 blkno = fragstoblks(fs, bno); 1532 if (!ffs_isfreeblock(fs, blksfree, blkno)) { 1533 kprintf("dev = %s, block = %ld, fs = %s\n", 1534 devtoname(i_dev), (long)bno, fs->fs_fsmnt); 1535 panic("ffs_blkfree: freeing free block"); 1536 } 1537 ffs_setblock(fs, blksfree, blkno); 1538 ffs_clusteracct(fs, cgp, blkno, 1); 1539 cgp->cg_cs.cs_nbfree++; 1540 fs->fs_cstotal.cs_nbfree++; 1541 fs->fs_cs(fs, cg).cs_nbfree++; 1542 i = cbtocylno(fs, bno); 1543 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1544 cg_blktot(cgp)[i]++; 1545 } else { 1546 /* 1547 * Free a fragment within a block. 1548 * 1549 * bno is the starting block number of the fragment being 1550 * freed. 1551 * 1552 * bbase is the starting block number for the filesystem 1553 * block containing the fragment. 1554 * 1555 * blk is the current bitmap for the fragments within the 1556 * filesystem block containing the fragment. 1557 * 1558 * frags is the number of fragments being freed 1559 * 1560 * Call ffs_fragacct() to account for the removal of all 1561 * current fragments, then adjust the bitmap to free the 1562 * requested fragment, and finally call ffs_fragacct() again 1563 * to regenerate the accounting. 1564 */ 1565 bbase = bno - fragnum(fs, bno); 1566 blk = blkmap(fs, blksfree, bbase); 1567 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1568 frags = numfrags(fs, size); 1569 for (i = 0; i < frags; i++) { 1570 if (isset(blksfree, bno + i)) { 1571 kprintf("dev = %s, block = %ld, fs = %s\n", 1572 devtoname(i_dev), (long)(bno + i), 1573 fs->fs_fsmnt); 1574 panic("ffs_blkfree: freeing free frag"); 1575 } 1576 setbit(blksfree, bno + i); 1577 } 1578 cgp->cg_cs.cs_nffree += i; 1579 fs->fs_cstotal.cs_nffree += i; 1580 fs->fs_cs(fs, cg).cs_nffree += i; 1581 1582 /* 1583 * Add back in counts associated with the new frags 1584 */ 1585 blk = blkmap(fs, blksfree, bbase); 1586 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1587 1588 /* 1589 * If a complete block has been reassembled, account for it 1590 */ 1591 blkno = fragstoblks(fs, bbase); 1592 if (ffs_isblock(fs, blksfree, blkno)) { 1593 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1594 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1595 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1596 ffs_clusteracct(fs, cgp, blkno, 1); 1597 cgp->cg_cs.cs_nbfree++; 1598 fs->fs_cstotal.cs_nbfree++; 1599 fs->fs_cs(fs, cg).cs_nbfree++; 1600 i = cbtocylno(fs, bbase); 1601 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1602 cg_blktot(cgp)[i]++; 1603 } 1604 } 1605 fs->fs_fmod = 1; 1606 bdwrite(bp); 1607 } 1608 1609 struct ffs_blkfree_trim_params { 1610 struct task task; 1611 ufs_daddr_t bno; 1612 long size; 1613 1614 /* 1615 * With TRIM, inode pointer is gone in the callback but we still need 1616 * the following fields for ffs_blkfree_cg() 1617 */ 1618 struct vnode *i_devvp; 1619 struct fs *i_fs; 1620 cdev_t i_dev; 1621 ino_t i_number; 1622 uint32_t i_din_uid; 1623 }; 1624 1625 1626 static void 1627 ffs_blkfree_trim_task(void *ctx, int pending) 1628 { 1629 struct ffs_blkfree_trim_params *tp; 1630 1631 tp = ctx; 1632 ffs_blkfree_cg(tp->i_fs, tp->i_devvp, tp->i_dev, tp->i_number, 1633 tp->i_din_uid, tp->bno, tp->size); 1634 kfree(tp, M_TEMP); 1635 } 1636 1637 1638 1639 static void 1640 ffs_blkfree_trim_completed(struct bio *biop) 1641 { 1642 struct buf *bp = biop->bio_buf; 1643 struct ffs_blkfree_trim_params *tp; 1644 1645 tp = bp->b_bio1.bio_caller_info1.ptr; 1646 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp); 1647 tp = biop->bio_caller_info1.ptr; 1648 taskqueue_enqueue(taskqueue_swi, &tp->task); 1649 biodone(biop); 1650 } 1651 1652 1653 /* 1654 * If TRIM is enabled, we TRIM the blocks first then free them. We do this 1655 * after TRIM is finished and the callback handler is called. The logic here 1656 * is that we free the blocks before updating the bitmap so that we don't 1657 * reuse a block before we actually trim it, which would result in trimming 1658 * a valid block. 1659 */ 1660 void 1661 ffs_blkfree(struct inode *ip, ufs_daddr_t bno, long size) 1662 { 1663 struct mount *mp = ip->i_devvp->v_mount; 1664 struct ffs_blkfree_trim_params *tp; 1665 1666 if (!(mp->mnt_flag & MNT_TRIM)) { 1667 ffs_blkfree_cg(ip->i_fs, ip->i_devvp,ip->i_dev,ip->i_number, 1668 ip->i_uid, bno, size); 1669 return; 1670 } 1671 1672 struct buf *bp; 1673 1674 tp = kmalloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK); 1675 tp->bno = bno; 1676 tp->i_fs= ip->i_fs; 1677 tp->i_devvp = ip->i_devvp; 1678 tp->i_dev = ip->i_dev; 1679 tp->i_din_uid = ip->i_uid; 1680 tp->i_number = ip->i_number; 1681 tp->size = size; 1682 1683 bp = getnewbuf(0,0,0,1); 1684 BUF_KERNPROC(bp); 1685 bp->b_cmd = BUF_CMD_FREEBLKS; 1686 bp->b_bio1.bio_offset = fsbtodoff(ip->i_fs, bno); 1687 bp->b_bcount = size; 1688 bp->b_bio1.bio_caller_info1.ptr = tp; 1689 bp->b_bio1.bio_done = ffs_blkfree_trim_completed; 1690 vn_strategy(ip->i_devvp, &bp->b_bio1); 1691 } 1692 1693 #ifdef DIAGNOSTIC 1694 /* 1695 * Verify allocation of a block or fragment. Returns true if block or 1696 * fragment is allocated, false if it is free. 1697 */ 1698 static int 1699 ffs_checkblk(struct inode *ip, ufs_daddr_t bno, long size) 1700 { 1701 struct fs *fs; 1702 struct cg *cgp; 1703 struct buf *bp; 1704 int i, error, frags, free; 1705 uint8_t *blksfree; 1706 1707 fs = ip->i_fs; 1708 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1709 kprintf("bsize = %ld, size = %ld, fs = %s\n", 1710 (long)fs->fs_bsize, size, fs->fs_fsmnt); 1711 panic("ffs_checkblk: bad size"); 1712 } 1713 if ((uint)bno >= fs->fs_size) 1714 panic("ffs_checkblk: bad block %d", bno); 1715 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, dtog(fs, bno))), 1716 (int)fs->fs_cgsize, &bp); 1717 if (error) 1718 panic("ffs_checkblk: cg bread failed"); 1719 cgp = (struct cg *)bp->b_data; 1720 if (!cg_chkmagic(cgp)) 1721 panic("ffs_checkblk: cg magic mismatch"); 1722 blksfree = cg_blksfree(cgp); 1723 bno = dtogd(fs, bno); 1724 if (size == fs->fs_bsize) { 1725 free = ffs_isblock(fs, blksfree, fragstoblks(fs, bno)); 1726 } else { 1727 frags = numfrags(fs, size); 1728 for (free = 0, i = 0; i < frags; i++) 1729 if (isset(blksfree, bno + i)) 1730 free++; 1731 if (free != 0 && free != frags) 1732 panic("ffs_checkblk: partially free fragment"); 1733 } 1734 brelse(bp); 1735 return (!free); 1736 } 1737 #endif /* DIAGNOSTIC */ 1738 1739 /* 1740 * Free an inode. 1741 */ 1742 int 1743 ffs_vfree(struct vnode *pvp, ino_t ino, int mode) 1744 { 1745 if (DOINGSOFTDEP(pvp)) { 1746 softdep_freefile(pvp, ino, mode); 1747 return (0); 1748 } 1749 return (ffs_freefile(pvp, ino, mode)); 1750 } 1751 1752 /* 1753 * Do the actual free operation. 1754 * The specified inode is placed back in the free map. 1755 */ 1756 int 1757 ffs_freefile(struct vnode *pvp, ino_t ino, int mode) 1758 { 1759 struct fs *fs; 1760 struct cg *cgp; 1761 struct inode *pip; 1762 struct buf *bp; 1763 int error, cg; 1764 uint8_t *inosused; 1765 1766 pip = VTOI(pvp); 1767 fs = pip->i_fs; 1768 if ((uint)ino >= fs->fs_ipg * fs->fs_ncg) 1769 panic("ffs_vfree: range: dev = (%d,%d), ino = %"PRId64", fs = %s", 1770 major(pip->i_dev), minor(pip->i_dev), ino, fs->fs_fsmnt); 1771 cg = ino_to_cg(fs, ino); 1772 error = bread(pip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1773 (int)fs->fs_cgsize, &bp); 1774 if (error) { 1775 brelse(bp); 1776 return (error); 1777 } 1778 cgp = (struct cg *)bp->b_data; 1779 if (!cg_chkmagic(cgp)) { 1780 brelse(bp); 1781 return (0); 1782 } 1783 cgp->cg_time = time_second; 1784 inosused = cg_inosused(cgp); 1785 ino %= fs->fs_ipg; 1786 if (isclr(inosused, ino)) { 1787 kprintf("dev = %s, ino = %lu, fs = %s\n", 1788 devtoname(pip->i_dev), (u_long)ino, fs->fs_fsmnt); 1789 if (fs->fs_ronly == 0) 1790 panic("ffs_vfree: freeing free inode"); 1791 } 1792 clrbit(inosused, ino); 1793 if (ino < cgp->cg_irotor) 1794 cgp->cg_irotor = ino; 1795 cgp->cg_cs.cs_nifree++; 1796 fs->fs_cstotal.cs_nifree++; 1797 fs->fs_cs(fs, cg).cs_nifree++; 1798 if ((mode & IFMT) == IFDIR) { 1799 cgp->cg_cs.cs_ndir--; 1800 fs->fs_cstotal.cs_ndir--; 1801 fs->fs_cs(fs, cg).cs_ndir--; 1802 } 1803 fs->fs_fmod = 1; 1804 bdwrite(bp); 1805 return (0); 1806 } 1807 1808 /* 1809 * Find a block of the specified size in the specified cylinder group. 1810 * 1811 * It is a panic if a request is made to find a block if none are 1812 * available. 1813 */ 1814 static ufs_daddr_t 1815 ffs_mapsearch(struct fs *fs, struct cg *cgp, ufs_daddr_t bpref, int allocsiz) 1816 { 1817 ufs_daddr_t bno; 1818 int start, len, loc, i; 1819 int blk, field, subfield, pos; 1820 uint8_t *blksfree; 1821 1822 /* 1823 * find the fragment by searching through the free block 1824 * map for an appropriate bit pattern. 1825 */ 1826 if (bpref) 1827 start = dtogd(fs, bpref) / NBBY; 1828 else 1829 start = cgp->cg_frotor / NBBY; 1830 blksfree = cg_blksfree(cgp); 1831 len = howmany(fs->fs_fpg, NBBY) - start; 1832 loc = scanc((uint)len, (u_char *)&blksfree[start], 1833 (u_char *)fragtbl[fs->fs_frag], 1834 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1835 if (loc == 0) { 1836 len = start + 1; /* XXX why overlap here? */ 1837 start = 0; 1838 loc = scanc((uint)len, (u_char *)&blksfree[0], 1839 (u_char *)fragtbl[fs->fs_frag], 1840 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1841 if (loc == 0) { 1842 kprintf("start = %d, len = %d, fs = %s\n", 1843 start, len, fs->fs_fsmnt); 1844 panic("ffs_alloccg: map corrupted"); 1845 /* NOTREACHED */ 1846 } 1847 } 1848 bno = (start + len - loc) * NBBY; 1849 cgp->cg_frotor = bno; 1850 /* 1851 * found the byte in the map 1852 * sift through the bits to find the selected frag 1853 */ 1854 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1855 blk = blkmap(fs, blksfree, bno); 1856 blk <<= 1; 1857 field = around[allocsiz]; 1858 subfield = inside[allocsiz]; 1859 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1860 if ((blk & field) == subfield) 1861 return (bno + pos); 1862 field <<= 1; 1863 subfield <<= 1; 1864 } 1865 } 1866 kprintf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 1867 panic("ffs_alloccg: block not in map"); 1868 return (-1); 1869 } 1870 1871 /* 1872 * Update the cluster map because of an allocation or free. 1873 * 1874 * Cnt == 1 means free; cnt == -1 means allocating. 1875 */ 1876 static void 1877 ffs_clusteracct(struct fs *fs, struct cg *cgp, ufs_daddr_t blkno, int cnt) 1878 { 1879 int32_t *sump; 1880 int32_t *lp; 1881 u_char *freemapp, *mapp; 1882 int i, start, end, forw, back, map, bit; 1883 1884 if (fs->fs_contigsumsize <= 0) 1885 return; 1886 freemapp = cg_clustersfree(cgp); 1887 sump = cg_clustersum(cgp); 1888 /* 1889 * Allocate or clear the actual block. 1890 */ 1891 if (cnt > 0) 1892 setbit(freemapp, blkno); 1893 else 1894 clrbit(freemapp, blkno); 1895 /* 1896 * Find the size of the cluster going forward. 1897 */ 1898 start = blkno + 1; 1899 end = start + fs->fs_contigsumsize; 1900 if (end >= cgp->cg_nclusterblks) 1901 end = cgp->cg_nclusterblks; 1902 mapp = &freemapp[start / NBBY]; 1903 map = *mapp++; 1904 bit = 1 << (start % NBBY); 1905 for (i = start; i < end; i++) { 1906 if ((map & bit) == 0) 1907 break; 1908 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1909 bit <<= 1; 1910 } else { 1911 map = *mapp++; 1912 bit = 1; 1913 } 1914 } 1915 forw = i - start; 1916 /* 1917 * Find the size of the cluster going backward. 1918 */ 1919 start = blkno - 1; 1920 end = start - fs->fs_contigsumsize; 1921 if (end < 0) 1922 end = -1; 1923 mapp = &freemapp[start / NBBY]; 1924 map = *mapp--; 1925 bit = 1 << (start % NBBY); 1926 for (i = start; i > end; i--) { 1927 if ((map & bit) == 0) 1928 break; 1929 if ((i & (NBBY - 1)) != 0) { 1930 bit >>= 1; 1931 } else { 1932 map = *mapp--; 1933 bit = 1 << (NBBY - 1); 1934 } 1935 } 1936 back = start - i; 1937 /* 1938 * Account for old cluster and the possibly new forward and 1939 * back clusters. 1940 */ 1941 i = back + forw + 1; 1942 if (i > fs->fs_contigsumsize) 1943 i = fs->fs_contigsumsize; 1944 sump[i] += cnt; 1945 if (back > 0) 1946 sump[back] -= cnt; 1947 if (forw > 0) 1948 sump[forw] -= cnt; 1949 /* 1950 * Update cluster summary information. 1951 */ 1952 lp = &sump[fs->fs_contigsumsize]; 1953 for (i = fs->fs_contigsumsize; i > 0; i--) 1954 if (*lp-- > 0) 1955 break; 1956 fs->fs_maxcluster[cgp->cg_cgx] = i; 1957 } 1958 1959 /* 1960 * Fserr prints the name of a filesystem with an error diagnostic. 1961 * 1962 * The form of the error message is: 1963 * fs: error message 1964 */ 1965 static void 1966 ffs_fserr(struct fs *fs, uint uid, char *cp) 1967 { 1968 struct thread *td = curthread; 1969 struct proc *p; 1970 1971 if ((p = td->td_proc) != NULL) { 1972 log(LOG_ERR, "pid %d (%s), uid %d on %s: %s\n", p ? p->p_pid : -1, 1973 p ? p->p_comm : "-", uid, fs->fs_fsmnt, cp); 1974 } else { 1975 log(LOG_ERR, "system thread %p, uid %d on %s: %s\n", 1976 td, uid, fs->fs_fsmnt, cp); 1977 } 1978 } 1979