1 /* $OpenBSD: ffs_alloc.c,v 1.38 2001/12/19 08:58:07 art Exp $ */ 2 /* $NetBSD: ffs_alloc.c,v 1.11 1996/05/11 18:27:09 mycroft Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ffs_alloc.c 8.11 (Berkeley) 10/27/94 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/buf.h> 42 #include <sys/proc.h> 43 #include <sys/vnode.h> 44 #include <sys/mount.h> 45 #include <sys/kernel.h> 46 #include <sys/syslog.h> 47 48 #include <uvm/uvm_extern.h> 49 50 #include <dev/rndvar.h> 51 52 #include <ufs/ufs/quota.h> 53 #include <ufs/ufs/inode.h> 54 #include <ufs/ufs/ufs_extern.h> 55 56 #include <ufs/ffs/fs.h> 57 #include <ufs/ffs/ffs_extern.h> 58 59 extern u_long nextgennumber; 60 61 static daddr_t ffs_alloccg __P((struct inode *, int, daddr_t, int)); 62 static daddr_t ffs_alloccgblk __P((struct inode *, struct buf *, daddr_t)); 63 static daddr_t ffs_clusteralloc __P((struct inode *, int, daddr_t, int)); 64 static ino_t ffs_dirpref __P((struct inode *)); 65 static daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 66 static void ffs_fserr __P((struct fs *, u_int, char *)); 67 static u_long ffs_hashalloc __P((struct inode *, int, long, int, 68 daddr_t (*)(struct inode *, int, daddr_t, 69 int))); 70 static daddr_t ffs_nodealloccg __P((struct inode *, int, daddr_t, int)); 71 static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int)); 72 73 #ifdef DIAGNOSTIC 74 static int ffs_checkblk __P((struct inode *, daddr_t, long)); 75 #endif 76 77 /* 78 * Allocate a block in the file system. 79 * 80 * The size of the requested block is given, which must be some 81 * multiple of fs_fsize and <= fs_bsize. 82 * A preference may be optionally specified. If a preference is given 83 * the following hierarchy is used to allocate a block: 84 * 1) allocate the requested block. 85 * 2) allocate a rotationally optimal block in the same cylinder. 86 * 3) allocate a block in the same cylinder group. 87 * 4) quadradically rehash into other cylinder groups, until an 88 * available block is located. 89 * If no block preference is given the following heirarchy is used 90 * to allocate a block: 91 * 1) allocate a block in the cylinder group that contains the 92 * inode for the file. 93 * 2) quadradically rehash into other cylinder groups, until an 94 * available block is located. 95 */ 96 int 97 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 98 register struct inode *ip; 99 daddr_t lbn, bpref; 100 int size; 101 struct ucred *cred; 102 daddr_t *bnp; 103 { 104 register struct fs *fs; 105 daddr_t bno; 106 int cg; 107 int error; 108 109 *bnp = 0; 110 fs = ip->i_fs; 111 #ifdef DIAGNOSTIC 112 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 113 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 114 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 115 panic("ffs_alloc: bad size"); 116 } 117 if (cred == NOCRED) 118 panic("ffs_alloc: missing credential"); 119 #endif /* DIAGNOSTIC */ 120 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 121 goto nospace; 122 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 123 goto nospace; 124 125 if ((error = ufs_quota_alloc_blocks(ip, btodb(size), cred)) != 0) 126 return (error); 127 128 if (bpref >= fs->fs_size) 129 bpref = 0; 130 if (bpref == 0) 131 cg = ino_to_cg(fs, ip->i_number); 132 else 133 cg = dtog(fs, bpref); 134 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 135 ffs_alloccg); 136 if (bno > 0) { 137 ip->i_ffs_blocks += btodb(size); 138 ip->i_flag |= IN_CHANGE | IN_UPDATE; 139 *bnp = bno; 140 return (0); 141 } 142 143 /* 144 * Restore user's disk quota because allocation failed. 145 */ 146 (void) ufs_quota_free_blocks(ip, btodb(size), cred); 147 148 nospace: 149 ffs_fserr(fs, cred->cr_uid, "file system full"); 150 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 151 return (ENOSPC); 152 } 153 154 /* 155 * Reallocate a fragment to a bigger size 156 * 157 * The number and size of the old block is given, and a preference 158 * and new size is also specified. The allocator attempts to extend 159 * the original block. Failing that, the regular block allocator is 160 * invoked to get an appropriate block. 161 */ 162 int 163 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp, blknop) 164 register struct inode *ip; 165 daddr_t lbprev; 166 daddr_t bpref; 167 int osize, nsize; 168 struct ucred *cred; 169 struct buf **bpp; 170 ufs_daddr_t *blknop; 171 { 172 register struct fs *fs; 173 struct buf *bp = NULL; 174 ufs_daddr_t quota_updated = 0; 175 int cg, request, error; 176 daddr_t bprev, bno; 177 178 if (bpp != NULL) 179 *bpp = NULL; 180 fs = ip->i_fs; 181 #ifdef DIAGNOSTIC 182 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 183 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 184 printf( 185 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 186 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 187 panic("ffs_realloccg: bad size"); 188 } 189 if (cred == NOCRED) 190 panic("ffs_realloccg: missing credential"); 191 #endif /* DIAGNOSTIC */ 192 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 193 goto nospace; 194 if ((bprev = ip->i_ffs_db[lbprev]) == 0) { 195 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 196 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 197 panic("ffs_realloccg: bad bprev"); 198 } 199 /* 200 * Allocate the extra space in the buffer. 201 */ 202 if (bpp != NULL && 203 (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) != 0) 204 goto error; 205 206 if ((error = ufs_quota_alloc_blocks(ip, btodb(nsize - osize), cred)) 207 != 0) 208 goto error; 209 210 quota_updated = btodb(nsize - osize); 211 212 /* 213 * Check for extension in the existing location. 214 */ 215 cg = dtog(fs, bprev); 216 if ((bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) != 0) { 217 ip->i_ffs_blocks += btodb(nsize - osize); 218 ip->i_flag |= IN_CHANGE | IN_UPDATE; 219 if (bpp != NULL) { 220 if (bp->b_blkno != fsbtodb(fs, bno)) 221 panic("ffs_realloccg: bad blockno"); 222 allocbuf(bp, nsize); 223 bp->b_flags |= B_DONE; 224 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 225 *bpp = bp; 226 } 227 if (blknop != NULL) { 228 *blknop = bno; 229 } 230 return (0); 231 } 232 /* 233 * Allocate a new disk location. 234 */ 235 if (bpref >= fs->fs_size) 236 bpref = 0; 237 switch ((int)fs->fs_optim) { 238 case FS_OPTSPACE: 239 /* 240 * Allocate an exact sized fragment. Although this makes 241 * best use of space, we will waste time relocating it if 242 * the file continues to grow. If the fragmentation is 243 * less than half of the minimum free reserve, we choose 244 * to begin optimizing for time. 245 */ 246 request = nsize; 247 if (fs->fs_minfree < 5 || 248 fs->fs_cstotal.cs_nffree > 249 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 250 break; 251 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 252 fs->fs_fsmnt); 253 fs->fs_optim = FS_OPTTIME; 254 break; 255 case FS_OPTTIME: 256 /* 257 * At this point we have discovered a file that is trying to 258 * grow a small fragment to a larger fragment. To save time, 259 * we allocate a full sized block, then free the unused portion. 260 * If the file continues to grow, the `ffs_fragextend' call 261 * above will be able to grow it in place without further 262 * copying. If aberrant programs cause disk fragmentation to 263 * grow within 2% of the free reserve, we choose to begin 264 * optimizing for space. 265 */ 266 request = fs->fs_bsize; 267 if (fs->fs_cstotal.cs_nffree < 268 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 269 break; 270 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 271 fs->fs_fsmnt); 272 fs->fs_optim = FS_OPTSPACE; 273 break; 274 default: 275 printf("dev = 0x%x, optim = %d, fs = %s\n", 276 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 277 panic("ffs_realloccg: bad optim"); 278 /* NOTREACHED */ 279 } 280 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 281 ffs_alloccg); 282 if (bno <= 0) 283 goto nospace; 284 285 (void) uvm_vnp_uncache(ITOV(ip)); 286 if (!DOINGSOFTDEP(ITOV(ip))) 287 ffs_blkfree(ip, bprev, (long)osize); 288 if (nsize < request) 289 ffs_blkfree(ip, bno + numfrags(fs, nsize), 290 (long)(request - nsize)); 291 ip->i_ffs_blocks += btodb(nsize - osize); 292 ip->i_flag |= IN_CHANGE | IN_UPDATE; 293 if (bpp != NULL) { 294 bp->b_blkno = fsbtodb(fs, bno); 295 allocbuf(bp, nsize); 296 bp->b_flags |= B_DONE; 297 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 298 *bpp = bp; 299 } 300 if (blknop != NULL) { 301 *blknop = bno; 302 } 303 return (0); 304 305 nospace: 306 /* 307 * no space available 308 */ 309 ffs_fserr(fs, cred->cr_uid, "file system full"); 310 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 311 error = ENOSPC; 312 313 error: 314 if (bp != NULL) { 315 brelse(bp); 316 bp = NULL; 317 } 318 319 /* 320 * Restore user's disk quota because allocation failed. 321 */ 322 if (quota_updated != 0) 323 (void)ufs_quota_free_blocks(ip, quota_updated, cred); 324 325 return error; 326 } 327 328 /* 329 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 330 * 331 * The vnode and an array of buffer pointers for a range of sequential 332 * logical blocks to be made contiguous is given. The allocator attempts 333 * to find a range of sequential blocks starting as close as possible to 334 * an fs_rotdelay offset from the end of the allocation for the logical 335 * block immediately preceeding the current range. If successful, the 336 * physical block numbers in the buffer pointers and in the inode are 337 * changed to reflect the new allocation. If unsuccessful, the allocation 338 * is left unchanged. The success in doing the reallocation is returned. 339 * Note that the error return is not reflected back to the user. Rather 340 * the previous block allocation will be used. 341 */ 342 343 int doasyncfree = 1; 344 int doreallocblks = 1; 345 int prtrealloc = 0; 346 347 int 348 ffs_reallocblks(v) 349 void *v; 350 { 351 struct vop_reallocblks_args /* { 352 struct vnode *a_vp; 353 struct cluster_save *a_buflist; 354 } */ *ap = v; 355 struct fs *fs; 356 struct inode *ip; 357 struct vnode *vp; 358 struct buf *sbp, *ebp; 359 daddr_t *bap, *sbap, *ebap = NULL; 360 struct cluster_save *buflist; 361 daddr_t start_lbn, end_lbn, soff, newblk, blkno; 362 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 363 int i, len, start_lvl, end_lvl, pref, ssize; 364 365 if (doreallocblks == 0) 366 return (ENOSPC); 367 368 vp = ap->a_vp; 369 ip = VTOI(vp); 370 fs = ip->i_fs; 371 if (fs->fs_contigsumsize <= 0) 372 return (ENOSPC); 373 buflist = ap->a_buflist; 374 len = buflist->bs_nchildren; 375 start_lbn = buflist->bs_children[0]->b_lblkno; 376 end_lbn = start_lbn + len - 1; 377 378 #ifdef DIAGNOSTIC 379 for (i = 0; i < len; i++) 380 if (!ffs_checkblk(ip, 381 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 382 panic("ffs_reallocblks: unallocated block 1"); 383 384 for (i = 1; i < len; i++) 385 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 386 panic("ffs_reallocblks: non-logical cluster"); 387 388 blkno = buflist->bs_children[0]->b_blkno; 389 ssize = fsbtodb(fs, fs->fs_frag); 390 for (i = 1; i < len - 1; i++) 391 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 392 panic("ffs_reallocblks: non-physical cluster %d", i); 393 #endif 394 /* 395 * If the latest allocation is in a new cylinder group, assume that 396 * the filesystem has decided to move and do not force it back to 397 * the previous cylinder group. 398 */ 399 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 400 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 401 return (ENOSPC); 402 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 403 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 404 return (ENOSPC); 405 /* 406 * Get the starting offset and block map for the first block. 407 */ 408 if (start_lvl == 0) { 409 sbap = &ip->i_ffs_db[0]; 410 soff = start_lbn; 411 } else { 412 idp = &start_ap[start_lvl - 1]; 413 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 414 brelse(sbp); 415 return (ENOSPC); 416 } 417 sbap = (daddr_t *)sbp->b_data; 418 soff = idp->in_off; 419 } 420 /* 421 * Find the preferred location for the cluster. 422 */ 423 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 424 /* 425 * If the block range spans two block maps, get the second map. 426 */ 427 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 428 ssize = len; 429 } else { 430 #ifdef DIAGNOSTIC 431 if (start_lvl > 1 && 432 start_ap[start_lvl-1].in_lbn == idp->in_lbn) 433 panic("ffs_reallocblk: start == end"); 434 #endif 435 ssize = len - (idp->in_off + 1); 436 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 437 goto fail; 438 ebap = (daddr_t *)ebp->b_data; 439 } 440 /* 441 * Search the block map looking for an allocation of the desired size. 442 */ 443 if ((newblk = (daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 444 len, ffs_clusteralloc)) == 0) 445 goto fail; 446 /* 447 * We have found a new contiguous block. 448 * 449 * First we have to replace the old block pointers with the new 450 * block pointers in the inode and indirect blocks associated 451 * with the file. 452 */ 453 #ifdef DEBUG 454 if (prtrealloc) 455 printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 456 start_lbn, end_lbn); 457 #endif 458 blkno = newblk; 459 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 460 if (i == ssize) { 461 bap = ebap; 462 soff = -i; 463 } 464 #ifdef DIAGNOSTIC 465 if (!ffs_checkblk(ip, 466 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 467 panic("ffs_reallocblks: unallocated block 2"); 468 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 469 panic("ffs_reallocblks: alloc mismatch"); 470 #endif 471 #ifdef DEBUG 472 if (prtrealloc) 473 printf(" %d,", *bap); 474 #endif 475 if (DOINGSOFTDEP(vp)) { 476 if (sbap == &ip->i_ffs_db[0] && i < ssize) 477 softdep_setup_allocdirect(ip, start_lbn + i, 478 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 479 buflist->bs_children[i]); 480 else 481 softdep_setup_allocindir_page(ip, start_lbn + i, 482 i < ssize ? sbp : ebp, soff + i, blkno, 483 *bap, buflist->bs_children[i]); 484 } 485 486 *bap++ = blkno; 487 } 488 /* 489 * Next we must write out the modified inode and indirect blocks. 490 * For strict correctness, the writes should be synchronous since 491 * the old block values may have been written to disk. In practise 492 * they are almost never written, but if we are concerned about 493 * strict correctness, the `doasyncfree' flag should be set to zero. 494 * 495 * The test on `doasyncfree' should be changed to test a flag 496 * that shows whether the associated buffers and inodes have 497 * been written. The flag should be set when the cluster is 498 * started and cleared whenever the buffer or inode is flushed. 499 * We can then check below to see if it is set, and do the 500 * synchronous write only when it has been cleared. 501 */ 502 if (sbap != &ip->i_ffs_db[0]) { 503 if (doasyncfree) 504 bdwrite(sbp); 505 else 506 bwrite(sbp); 507 } else { 508 ip->i_flag |= IN_CHANGE | IN_UPDATE; 509 if (!doasyncfree) { 510 UFS_UPDATE(ip, MNT_WAIT); 511 } 512 } 513 if (ssize < len) { 514 if (doasyncfree) 515 bdwrite(ebp); 516 else 517 bwrite(ebp); 518 } 519 /* 520 * Last, free the old blocks and assign the new blocks to the buffers. 521 */ 522 #ifdef DEBUG 523 if (prtrealloc) 524 printf("\n\tnew:"); 525 #endif 526 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 527 if (!DOINGSOFTDEP(vp)) 528 ffs_blkfree(ip, 529 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 530 fs->fs_bsize); 531 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 532 #ifdef DIAGNOSTIC 533 if (!ffs_checkblk(ip, 534 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 535 panic("ffs_reallocblks: unallocated block 3"); 536 if (prtrealloc) 537 printf(" %d,", blkno); 538 #endif 539 } 540 #ifdef DEBUG 541 if (prtrealloc) { 542 prtrealloc--; 543 printf("\n"); 544 } 545 #endif 546 return (0); 547 548 fail: 549 if (ssize < len) 550 brelse(ebp); 551 if (sbap != &ip->i_ffs_db[0]) 552 brelse(sbp); 553 return (ENOSPC); 554 } 555 556 /* 557 * Allocate an inode in the file system. 558 * 559 * If allocating a directory, use ffs_dirpref to select the inode. 560 * If allocating in a directory, the following hierarchy is followed: 561 * 1) allocate the preferred inode. 562 * 2) allocate an inode in the same cylinder group. 563 * 3) quadradically rehash into other cylinder groups, until an 564 * available inode is located. 565 * If no inode preference is given the following heirarchy is used 566 * to allocate an inode: 567 * 1) allocate an inode in cylinder group 0. 568 * 2) quadradically rehash into other cylinder groups, until an 569 * available inode is located. 570 */ 571 int 572 ffs_inode_alloc(struct inode *pip, int mode, struct ucred *cred, 573 struct vnode **vpp) 574 { 575 struct vnode *pvp = ITOV(pip); 576 struct fs *fs; 577 struct inode *ip; 578 ino_t ino, ipref; 579 int cg, error; 580 581 *vpp = NULL; 582 fs = pip->i_fs; 583 if (fs->fs_cstotal.cs_nifree == 0) 584 goto noinodes; 585 586 if ((mode & IFMT) == IFDIR) 587 ipref = ffs_dirpref(pip); 588 else 589 ipref = pip->i_number; 590 if (ipref >= fs->fs_ncg * fs->fs_ipg) 591 ipref = 0; 592 cg = ino_to_cg(fs, ipref); 593 594 /* 595 * Track number of dirs created one after another 596 * in a same cg without intervening by files. 597 */ 598 if ((mode & IFMT) == IFDIR) { 599 if (fs->fs_contigdirs[cg] < 255) 600 fs->fs_contigdirs[cg]++; 601 } else { 602 if (fs->fs_contigdirs[cg] > 0) 603 fs->fs_contigdirs[cg]--; 604 } 605 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); 606 if (ino == 0) 607 goto noinodes; 608 error = VFS_VGET(pvp->v_mount, ino, vpp); 609 if (error) { 610 ffs_inode_free(pip, ino, mode); 611 return (error); 612 } 613 ip = VTOI(*vpp); 614 if (ip->i_ffs_mode) { 615 printf("mode = 0%o, inum = %d, fs = %s\n", 616 ip->i_ffs_mode, ip->i_number, fs->fs_fsmnt); 617 panic("ffs_valloc: dup alloc"); 618 } 619 if (ip->i_ffs_blocks) { /* XXX */ 620 printf("free inode %s/%d had %d blocks\n", 621 fs->fs_fsmnt, ino, ip->i_ffs_blocks); 622 ip->i_ffs_blocks = 0; 623 } 624 ip->i_ffs_flags = 0; 625 /* 626 * Set up a new generation number for this inode. 627 * XXX - just increment for now, this is wrong! (millert) 628 * Need a way to preserve randomization. 629 */ 630 if (ip->i_ffs_gen == 0 || ++(ip->i_ffs_gen) == 0) 631 ip->i_ffs_gen = arc4random(); 632 if (ip->i_ffs_gen == 0 || ip->i_ffs_gen == -1) 633 ip->i_ffs_gen = 1; /* shouldn't happen */ 634 return (0); 635 noinodes: 636 ffs_fserr(fs, cred->cr_uid, "out of inodes"); 637 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 638 return (ENOSPC); 639 } 640 641 /* 642 * Find a cylinder group to place a directory. 643 * 644 * The policy implemented by this algorithm is to allocate a 645 * directory inode in the same cylinder group as its parent 646 * directory, but also to reserve space for its files inodes 647 * and data. Restrict the number of directories which may be 648 * allocated one after another in the same cylinder group 649 * without intervening allocation of files. 650 * 651 * If we allocate a first level directory then force allocation 652 * in another cylinder group. 653 */ 654 static ino_t 655 ffs_dirpref(pip) 656 struct inode *pip; 657 { 658 register struct fs *fs; 659 int cg, prefcg, dirsize, cgsize; 660 int avgifree, avgbfree, avgndir, curdirsize; 661 int minifree, minbfree, maxndir; 662 int mincg, minndir; 663 int maxcontigdirs; 664 665 fs = pip->i_fs; 666 667 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 668 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 669 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 670 #if 1 671 672 /* 673 * Force allocation in another cg if creating a first level dir. 674 */ 675 if (ITOV(pip)->v_flag & VROOT) { 676 prefcg = arc4random() % fs->fs_ncg; 677 mincg = prefcg; 678 minndir = fs->fs_ipg; 679 for (cg = prefcg; cg < fs->fs_ncg; cg++) 680 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 681 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 682 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 683 mincg = cg; 684 minndir = fs->fs_cs(fs, cg).cs_ndir; 685 } 686 for (cg = 0; cg < prefcg; cg++) 687 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 688 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 689 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 690 mincg = cg; 691 minndir = fs->fs_cs(fs, cg).cs_ndir; 692 } 693 cg = mincg; 694 goto end; 695 } else 696 prefcg = ino_to_cg(fs, pip->i_number); 697 #else 698 prefcg = ino_to_cg(fs, pip->i_number); 699 #endif 700 701 /* 702 * Count various limits which used for 703 * optimal allocation of a directory inode. 704 */ 705 #if 1 706 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 707 minifree = avgifree - fs->fs_ipg / 4; 708 if (minifree < 0) 709 minifree = 0; 710 minbfree = avgbfree - fs->fs_fpg / fs->fs_frag / 4; 711 if (minbfree < 0) 712 minbfree = 0; 713 #else 714 maxndir = avgndir + (fs->fs_ipg - avgndir) / 16; 715 minifree = avgifree * 3 / 4; 716 minbfree = avgbfree * 3 / 4; 717 #endif 718 cgsize = fs->fs_fsize * fs->fs_fpg; 719 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; 720 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 721 if (dirsize < curdirsize) 722 dirsize = curdirsize; 723 maxcontigdirs = min(cgsize / dirsize, 255); 724 if (fs->fs_avgfpdir > 0) 725 maxcontigdirs = min(maxcontigdirs, 726 fs->fs_ipg / fs->fs_avgfpdir); 727 if (maxcontigdirs == 0) 728 maxcontigdirs = 1; 729 730 /* 731 * Limit number of dirs in one cg and reserve space for 732 * regular files, but only if we have no deficit in 733 * inodes or space. 734 */ 735 for (cg = prefcg; cg < fs->fs_ncg; cg++) 736 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 737 fs->fs_cs(fs, cg).cs_nifree >= minifree && 738 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 739 if (fs->fs_contigdirs[cg] < maxcontigdirs) 740 goto end; 741 } 742 for (cg = 0; cg < prefcg; cg++) 743 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 744 fs->fs_cs(fs, cg).cs_nifree >= minifree && 745 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 746 if (fs->fs_contigdirs[cg] < maxcontigdirs) 747 goto end; 748 } 749 /* 750 * This is a backstop when we have deficit in space. 751 */ 752 for (cg = prefcg; cg < fs->fs_ncg; cg++) 753 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 754 goto end; 755 for (cg = 0; cg < prefcg; cg++) 756 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 757 goto end; 758 end: 759 return ((ino_t)(fs->fs_ipg * cg)); 760 } 761 762 /* 763 * Select the desired position for the next block in a file. The file is 764 * logically divided into sections. The first section is composed of the 765 * direct blocks. Each additional section contains fs_maxbpg blocks. 766 * 767 * If no blocks have been allocated in the first section, the policy is to 768 * request a block in the same cylinder group as the inode that describes 769 * the file. If no blocks have been allocated in any other section, the 770 * policy is to place the section in a cylinder group with a greater than 771 * average number of free blocks. An appropriate cylinder group is found 772 * by using a rotor that sweeps the cylinder groups. When a new group of 773 * blocks is needed, the sweep begins in the cylinder group following the 774 * cylinder group from which the previous allocation was made. The sweep 775 * continues until a cylinder group with greater than the average number 776 * of free blocks is found. If the allocation is for the first block in an 777 * indirect block, the information on the previous allocation is unavailable; 778 * here a best guess is made based upon the logical block number being 779 * allocated. 780 * 781 * If a section is already partially allocated, the policy is to 782 * contiguously allocate fs_maxcontig blocks. The end of one of these 783 * contiguous blocks and the beginning of the next is physically separated 784 * so that the disk head will be in transit between them for at least 785 * fs_rotdelay milliseconds. This is to allow time for the processor to 786 * schedule another I/O transfer. 787 */ 788 daddr_t 789 ffs_blkpref(ip, lbn, indx, bap) 790 struct inode *ip; 791 daddr_t lbn; 792 int indx; 793 daddr_t *bap; 794 { 795 register struct fs *fs; 796 register int cg; 797 int avgbfree, startcg; 798 daddr_t nextblk; 799 800 fs = ip->i_fs; 801 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 802 if (lbn < NDADDR + NINDIR(fs)) { 803 cg = ino_to_cg(fs, ip->i_number); 804 return (fs->fs_fpg * cg + fs->fs_frag); 805 } 806 /* 807 * Find a cylinder with greater than average number of 808 * unused data blocks. 809 */ 810 if (indx == 0 || bap[indx - 1] == 0) 811 startcg = 812 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 813 else 814 startcg = dtog(fs, bap[indx - 1]) + 1; 815 startcg %= fs->fs_ncg; 816 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 817 for (cg = startcg; cg < fs->fs_ncg; cg++) 818 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 819 fs->fs_cgrotor = cg; 820 return (fs->fs_fpg * cg + fs->fs_frag); 821 } 822 for (cg = 0; cg <= startcg; cg++) 823 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 824 fs->fs_cgrotor = cg; 825 return (fs->fs_fpg * cg + fs->fs_frag); 826 } 827 return (0); 828 } 829 /* 830 * One or more previous blocks have been laid out. If less 831 * than fs_maxcontig previous blocks are contiguous, the 832 * next block is requested contiguously, otherwise it is 833 * requested rotationally delayed by fs_rotdelay milliseconds. 834 */ 835 nextblk = bap[indx - 1] + fs->fs_frag; 836 if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] + 837 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 838 return (nextblk); 839 if (fs->fs_rotdelay != 0) 840 /* 841 * Here we convert ms of delay to frags as: 842 * (frags) = (ms) * (rev/sec) * (sect/rev) / 843 * ((sect/frag) * (ms/sec)) 844 * then round up to the next block. 845 */ 846 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 847 (NSPF(fs) * 1000), fs->fs_frag); 848 return (nextblk); 849 } 850 851 /* 852 * Implement the cylinder overflow algorithm. 853 * 854 * The policy implemented by this algorithm is: 855 * 1) allocate the block in its requested cylinder group. 856 * 2) quadradically rehash on the cylinder group number. 857 * 3) brute force search for a free block. 858 */ 859 /*VARARGS5*/ 860 static u_long 861 ffs_hashalloc(ip, cg, pref, size, allocator) 862 struct inode *ip; 863 int cg; 864 long pref; 865 int size; /* size for data blocks, mode for inodes */ 866 daddr_t (*allocator) __P((struct inode *, int, daddr_t, int)); 867 { 868 register struct fs *fs; 869 long result; 870 int i, icg = cg; 871 872 fs = ip->i_fs; 873 /* 874 * 1: preferred cylinder group 875 */ 876 result = (*allocator)(ip, cg, pref, size); 877 if (result) 878 return (result); 879 /* 880 * 2: quadratic rehash 881 */ 882 for (i = 1; i < fs->fs_ncg; i *= 2) { 883 cg += i; 884 if (cg >= fs->fs_ncg) 885 cg -= fs->fs_ncg; 886 result = (*allocator)(ip, cg, 0, size); 887 if (result) 888 return (result); 889 } 890 /* 891 * 3: brute force search 892 * Note that we start at i == 2, since 0 was checked initially, 893 * and 1 is always checked in the quadratic rehash. 894 */ 895 cg = (icg + 2) % fs->fs_ncg; 896 for (i = 2; i < fs->fs_ncg; i++) { 897 result = (*allocator)(ip, cg, 0, size); 898 if (result) 899 return (result); 900 cg++; 901 if (cg == fs->fs_ncg) 902 cg = 0; 903 } 904 return (0); 905 } 906 907 /* 908 * Determine whether a fragment can be extended. 909 * 910 * Check to see if the necessary fragments are available, and 911 * if they are, allocate them. 912 */ 913 static daddr_t 914 ffs_fragextend(ip, cg, bprev, osize, nsize) 915 struct inode *ip; 916 int cg; 917 long bprev; 918 int osize, nsize; 919 { 920 register struct fs *fs; 921 register struct cg *cgp; 922 struct buf *bp; 923 long bno; 924 int frags, bbase; 925 int i, error; 926 927 fs = ip->i_fs; 928 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 929 return (0); 930 frags = numfrags(fs, nsize); 931 bbase = fragnum(fs, bprev); 932 if (bbase > fragnum(fs, (bprev + frags - 1))) { 933 /* cannot extend across a block boundary */ 934 return (0); 935 } 936 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 937 (int)fs->fs_cgsize, NOCRED, &bp); 938 if (error) { 939 brelse(bp); 940 return (0); 941 } 942 cgp = (struct cg *)bp->b_data; 943 if (!cg_chkmagic(cgp)) { 944 brelse(bp); 945 return (0); 946 } 947 cgp->cg_time = time.tv_sec; 948 bno = dtogd(fs, bprev); 949 for (i = numfrags(fs, osize); i < frags; i++) 950 if (isclr(cg_blksfree(cgp), bno + i)) { 951 brelse(bp); 952 return (0); 953 } 954 /* 955 * the current fragment can be extended 956 * deduct the count on fragment being extended into 957 * increase the count on the remaining fragment (if any) 958 * allocate the extended piece 959 */ 960 for (i = frags; i < fs->fs_frag - bbase; i++) 961 if (isclr(cg_blksfree(cgp), bno + i)) 962 break; 963 cgp->cg_frsum[i - numfrags(fs, osize)]--; 964 if (i != frags) 965 cgp->cg_frsum[i - frags]++; 966 for (i = numfrags(fs, osize); i < frags; i++) { 967 clrbit(cg_blksfree(cgp), bno + i); 968 cgp->cg_cs.cs_nffree--; 969 fs->fs_cstotal.cs_nffree--; 970 fs->fs_cs(fs, cg).cs_nffree--; 971 } 972 fs->fs_fmod = 1; 973 if (DOINGSOFTDEP(ITOV(ip))) 974 softdep_setup_blkmapdep(bp, fs, bprev); 975 976 bdwrite(bp); 977 return (bprev); 978 } 979 980 /* 981 * Determine whether a block can be allocated. 982 * 983 * Check to see if a block of the appropriate size is available, 984 * and if it is, allocate it. 985 */ 986 static daddr_t 987 ffs_alloccg(ip, cg, bpref, size) 988 struct inode *ip; 989 int cg; 990 daddr_t bpref; 991 int size; 992 { 993 register struct fs *fs; 994 register struct cg *cgp; 995 struct buf *bp; 996 daddr_t bno, blkno; 997 int error, i, frags, allocsiz; 998 999 fs = ip->i_fs; 1000 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1001 return (0); 1002 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1003 (int)fs->fs_cgsize, NOCRED, &bp); 1004 if (error) { 1005 brelse(bp); 1006 return (0); 1007 } 1008 cgp = (struct cg *)bp->b_data; 1009 if (!cg_chkmagic(cgp) || 1010 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 1011 brelse(bp); 1012 return (0); 1013 } 1014 cgp->cg_time = time.tv_sec; 1015 if (size == fs->fs_bsize) { 1016 bno = ffs_alloccgblk(ip, bp, bpref); 1017 bdwrite(bp); 1018 return (bno); 1019 } 1020 /* 1021 * check to see if any fragments are already available 1022 * allocsiz is the size which will be allocated, hacking 1023 * it down to a smaller size if necessary 1024 */ 1025 frags = numfrags(fs, size); 1026 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 1027 if (cgp->cg_frsum[allocsiz] != 0) 1028 break; 1029 if (allocsiz == fs->fs_frag) { 1030 /* 1031 * no fragments were available, so a block will be 1032 * allocated, and hacked up 1033 */ 1034 if (cgp->cg_cs.cs_nbfree == 0) { 1035 brelse(bp); 1036 return (0); 1037 } 1038 bno = ffs_alloccgblk(ip, bp, bpref); 1039 bpref = dtogd(fs, bno); 1040 for (i = frags; i < fs->fs_frag; i++) 1041 setbit(cg_blksfree(cgp), bpref + i); 1042 i = fs->fs_frag - frags; 1043 cgp->cg_cs.cs_nffree += i; 1044 fs->fs_cstotal.cs_nffree += i; 1045 fs->fs_cs(fs, cg).cs_nffree += i; 1046 fs->fs_fmod = 1; 1047 cgp->cg_frsum[i]++; 1048 bdwrite(bp); 1049 return (bno); 1050 } 1051 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1052 if (bno < 0) { 1053 brelse(bp); 1054 return (0); 1055 } 1056 1057 for (i = 0; i < frags; i++) 1058 clrbit(cg_blksfree(cgp), bno + i); 1059 cgp->cg_cs.cs_nffree -= frags; 1060 fs->fs_cstotal.cs_nffree -= frags; 1061 fs->fs_cs(fs, cg).cs_nffree -= frags; 1062 fs->fs_fmod = 1; 1063 cgp->cg_frsum[allocsiz]--; 1064 if (frags != allocsiz) 1065 cgp->cg_frsum[allocsiz - frags]++; 1066 1067 blkno = cg * fs->fs_fpg + bno; 1068 if (DOINGSOFTDEP(ITOV(ip))) 1069 softdep_setup_blkmapdep(bp, fs, blkno); 1070 bdwrite(bp); 1071 return ((u_long)blkno); 1072 } 1073 1074 /* 1075 * Allocate a block in a cylinder group. 1076 * 1077 * This algorithm implements the following policy: 1078 * 1) allocate the requested block. 1079 * 2) allocate a rotationally optimal block in the same cylinder. 1080 * 3) allocate the next available block on the block rotor for the 1081 * specified cylinder group. 1082 * Note that this routine only allocates fs_bsize blocks; these 1083 * blocks may be fragmented by the routine that allocates them. 1084 */ 1085 static daddr_t 1086 ffs_alloccgblk(ip, bp, bpref) 1087 struct inode *ip; 1088 struct buf *bp; 1089 daddr_t bpref; 1090 { 1091 struct fs *fs; 1092 struct cg *cgp; 1093 daddr_t bno, blkno; 1094 int cylno, pos, delta; 1095 short *cylbp; 1096 register int i; 1097 1098 fs = ip->i_fs; 1099 cgp = (struct cg *)bp->b_data; 1100 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1101 bpref = cgp->cg_rotor; 1102 goto norot; 1103 } 1104 bpref = blknum(fs, bpref); 1105 bpref = dtogd(fs, bpref); 1106 /* 1107 * if the requested block is available, use it 1108 */ 1109 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 1110 bno = bpref; 1111 goto gotit; 1112 } 1113 if (fs->fs_cpc == 0 || fs->fs_nrpos <= 1) { 1114 /* 1115 * Block layout information is not available. 1116 * Leaving bpref unchanged means we take the 1117 * next available free block following the one 1118 * we just allocated. Hopefully this will at 1119 * least hit a track cache on drives of unknown 1120 * geometry (e.g. SCSI). 1121 */ 1122 goto norot; 1123 } 1124 /* 1125 * check for a block available on the same cylinder 1126 */ 1127 cylno = cbtocylno(fs, bpref); 1128 if (cg_blktot(cgp)[cylno] == 0) 1129 goto norot; 1130 /* 1131 * check the summary information to see if a block is 1132 * available in the requested cylinder starting at the 1133 * requested rotational position and proceeding around. 1134 */ 1135 cylbp = cg_blks(fs, cgp, cylno); 1136 pos = cbtorpos(fs, bpref); 1137 for (i = pos; i < fs->fs_nrpos; i++) 1138 if (cylbp[i] > 0) 1139 break; 1140 if (i == fs->fs_nrpos) 1141 for (i = 0; i < pos; i++) 1142 if (cylbp[i] > 0) 1143 break; 1144 if (cylbp[i] > 0) { 1145 /* 1146 * found a rotational position, now find the actual 1147 * block. A panic if none is actually there. 1148 */ 1149 pos = cylno % fs->fs_cpc; 1150 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 1151 if (fs_postbl(fs, pos)[i] == -1) { 1152 printf("pos = %d, i = %d, fs = %s\n", 1153 pos, i, fs->fs_fsmnt); 1154 panic("ffs_alloccgblk: cyl groups corrupted"); 1155 } 1156 for (i = fs_postbl(fs, pos)[i];; ) { 1157 if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 1158 bno = blkstofrags(fs, (bno + i)); 1159 goto gotit; 1160 } 1161 delta = fs_rotbl(fs)[i]; 1162 if (delta <= 0 || 1163 delta + i > fragstoblks(fs, fs->fs_fpg)) 1164 break; 1165 i += delta; 1166 } 1167 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 1168 panic("ffs_alloccgblk: can't find blk in cyl"); 1169 } 1170 norot: 1171 /* 1172 * no blocks in the requested cylinder, so take next 1173 * available one in this cylinder group. 1174 */ 1175 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1176 if (bno < 0) 1177 return (0); 1178 cgp->cg_rotor = bno; 1179 gotit: 1180 blkno = fragstoblks(fs, bno); 1181 ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); 1182 ffs_clusteracct(fs, cgp, blkno, -1); 1183 cgp->cg_cs.cs_nbfree--; 1184 fs->fs_cstotal.cs_nbfree--; 1185 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1186 cylno = cbtocylno(fs, bno); 1187 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 1188 cg_blktot(cgp)[cylno]--; 1189 fs->fs_fmod = 1; 1190 blkno = cgp->cg_cgx * fs->fs_fpg + bno; 1191 if (DOINGSOFTDEP(ITOV(ip))) 1192 softdep_setup_blkmapdep(bp, fs, blkno); 1193 return (blkno); 1194 } 1195 1196 /* 1197 * Determine whether a cluster can be allocated. 1198 * 1199 * We do not currently check for optimal rotational layout if there 1200 * are multiple choices in the same cylinder group. Instead we just 1201 * take the first one that we find following bpref. 1202 */ 1203 static daddr_t 1204 ffs_clusteralloc(ip, cg, bpref, len) 1205 struct inode *ip; 1206 int cg; 1207 daddr_t bpref; 1208 int len; 1209 { 1210 register struct fs *fs; 1211 register struct cg *cgp; 1212 struct buf *bp; 1213 int i, got, run, bno, bit, map; 1214 u_char *mapp; 1215 int32_t *lp; 1216 1217 fs = ip->i_fs; 1218 if (fs->fs_maxcluster[cg] < len) 1219 return (0); 1220 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1221 NOCRED, &bp)) 1222 goto fail; 1223 cgp = (struct cg *)bp->b_data; 1224 if (!cg_chkmagic(cgp)) 1225 goto fail; 1226 /* 1227 * Check to see if a cluster of the needed size (or bigger) is 1228 * available in this cylinder group. 1229 */ 1230 lp = &cg_clustersum(cgp)[len]; 1231 for (i = len; i <= fs->fs_contigsumsize; i++) 1232 if (*lp++ > 0) 1233 break; 1234 if (i > fs->fs_contigsumsize) { 1235 /* 1236 * This is the first time looking for a cluster in this 1237 * cylinder group. Update the cluster summary information 1238 * to reflect the true maximum sized cluster so that 1239 * future cluster allocation requests can avoid reading 1240 * the cylinder group map only to find no clusters. 1241 */ 1242 lp = &cg_clustersum(cgp)[len - 1]; 1243 for (i = len - 1; i > 0; i--) 1244 if (*lp-- > 0) 1245 break; 1246 fs->fs_maxcluster[cg] = i; 1247 goto fail; 1248 } 1249 /* 1250 * Search the cluster map to find a big enough cluster. 1251 * We take the first one that we find, even if it is larger 1252 * than we need as we prefer to get one close to the previous 1253 * block allocation. We do not search before the current 1254 * preference point as we do not want to allocate a block 1255 * that is allocated before the previous one (as we will 1256 * then have to wait for another pass of the elevator 1257 * algorithm before it will be read). We prefer to fail and 1258 * be recalled to try an allocation in the next cylinder group. 1259 */ 1260 if (dtog(fs, bpref) != cg) 1261 bpref = 0; 1262 else 1263 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1264 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1265 map = *mapp++; 1266 bit = 1 << (bpref % NBBY); 1267 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1268 if ((map & bit) == 0) { 1269 run = 0; 1270 } else { 1271 run++; 1272 if (run == len) 1273 break; 1274 } 1275 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1276 bit <<= 1; 1277 } else { 1278 map = *mapp++; 1279 bit = 1; 1280 } 1281 } 1282 if (got >= cgp->cg_nclusterblks) 1283 goto fail; 1284 /* 1285 * Allocate the cluster that we have found. 1286 */ 1287 #ifdef DIAGNOSTIC 1288 for (i = 1; i <= len; i++) 1289 if (!ffs_isblock(fs, cg_blksfree(cgp), got - run + i)) 1290 panic("ffs_clusteralloc: map mismatch"); 1291 #endif 1292 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1293 #ifdef DIAGNOSTIC 1294 if (dtog(fs, bno) != cg) 1295 panic("ffs_clusteralloc: allocated out of group"); 1296 #endif 1297 1298 len = blkstofrags(fs, len); 1299 for (i = 0; i < len; i += fs->fs_frag) 1300 if (ffs_alloccgblk(ip, bp, bno + i) != bno + i) 1301 panic("ffs_clusteralloc: lost block"); 1302 bdwrite(bp); 1303 return (bno); 1304 1305 fail: 1306 brelse(bp); 1307 return (0); 1308 } 1309 1310 /* 1311 * Determine whether an inode can be allocated. 1312 * 1313 * Check to see if an inode is available, and if it is, 1314 * allocate it using the following policy: 1315 * 1) allocate the requested inode. 1316 * 2) allocate the next available inode after the requested 1317 * inode in the specified cylinder group. 1318 */ 1319 static daddr_t 1320 ffs_nodealloccg(ip, cg, ipref, mode) 1321 struct inode *ip; 1322 int cg; 1323 daddr_t ipref; 1324 int mode; 1325 { 1326 register struct fs *fs; 1327 register struct cg *cgp; 1328 struct buf *bp; 1329 int error, start, len, loc, map, i; 1330 1331 fs = ip->i_fs; 1332 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1333 return (0); 1334 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1335 (int)fs->fs_cgsize, NOCRED, &bp); 1336 if (error) { 1337 brelse(bp); 1338 return (0); 1339 } 1340 cgp = (struct cg *)bp->b_data; 1341 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1342 brelse(bp); 1343 return (0); 1344 } 1345 cgp->cg_time = time.tv_sec; 1346 if (ipref) { 1347 ipref %= fs->fs_ipg; 1348 if (isclr(cg_inosused(cgp), ipref)) 1349 goto gotit; 1350 } 1351 start = cgp->cg_irotor / NBBY; 1352 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1353 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 1354 if (loc == 0) { 1355 len = start + 1; 1356 start = 0; 1357 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 1358 if (loc == 0) { 1359 printf("cg = %d, irotor = %d, fs = %s\n", 1360 cg, cgp->cg_irotor, fs->fs_fsmnt); 1361 panic("ffs_nodealloccg: map corrupted"); 1362 /* NOTREACHED */ 1363 } 1364 } 1365 i = start + len - loc; 1366 map = cg_inosused(cgp)[i]; 1367 ipref = i * NBBY; 1368 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1369 if ((map & i) == 0) { 1370 cgp->cg_irotor = ipref; 1371 goto gotit; 1372 } 1373 } 1374 printf("fs = %s\n", fs->fs_fsmnt); 1375 panic("ffs_nodealloccg: block not in map"); 1376 /* NOTREACHED */ 1377 gotit: 1378 if (DOINGSOFTDEP(ITOV(ip))) 1379 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref); 1380 1381 setbit(cg_inosused(cgp), ipref); 1382 cgp->cg_cs.cs_nifree--; 1383 fs->fs_cstotal.cs_nifree--; 1384 fs->fs_cs(fs, cg).cs_nifree--; 1385 fs->fs_fmod = 1; 1386 if ((mode & IFMT) == IFDIR) { 1387 cgp->cg_cs.cs_ndir++; 1388 fs->fs_cstotal.cs_ndir++; 1389 fs->fs_cs(fs, cg).cs_ndir++; 1390 } 1391 bdwrite(bp); 1392 return (cg * fs->fs_ipg + ipref); 1393 } 1394 1395 /* 1396 * Free a block or fragment. 1397 * 1398 * The specified block or fragment is placed back in the 1399 * free map. If a fragment is deallocated, a possible 1400 * block reassembly is checked. 1401 */ 1402 void 1403 ffs_blkfree(ip, bno, size) 1404 register struct inode *ip; 1405 daddr_t bno; 1406 long size; 1407 { 1408 register struct fs *fs; 1409 register struct cg *cgp; 1410 struct buf *bp; 1411 daddr_t blkno; 1412 int i, error, cg, blk, frags, bbase; 1413 1414 fs = ip->i_fs; 1415 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1416 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1417 printf("dev = 0x%x, bsize = %d, size = %ld, fs = %s\n", 1418 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 1419 panic("ffs_blkfree: bad size"); 1420 } 1421 cg = dtog(fs, bno); 1422 if ((u_int)bno >= fs->fs_size) { 1423 printf("bad block %d, ino %d\n", bno, ip->i_number); 1424 ffs_fserr(fs, ip->i_ffs_uid, "bad block"); 1425 return; 1426 } 1427 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1428 (int)fs->fs_cgsize, NOCRED, &bp); 1429 if (error) { 1430 brelse(bp); 1431 return; 1432 } 1433 cgp = (struct cg *)bp->b_data; 1434 if (!cg_chkmagic(cgp)) { 1435 brelse(bp); 1436 return; 1437 } 1438 cgp->cg_time = time.tv_sec; 1439 bno = dtogd(fs, bno); 1440 if (size == fs->fs_bsize) { 1441 blkno = fragstoblks(fs, bno); 1442 if (!ffs_isfreeblock(fs, cg_blksfree(cgp), blkno)) { 1443 printf("dev = 0x%x, block = %d, fs = %s\n", 1444 ip->i_dev, bno, fs->fs_fsmnt); 1445 panic("ffs_blkfree: freeing free block"); 1446 } 1447 ffs_setblock(fs, cg_blksfree(cgp), blkno); 1448 ffs_clusteracct(fs, cgp, blkno, 1); 1449 cgp->cg_cs.cs_nbfree++; 1450 fs->fs_cstotal.cs_nbfree++; 1451 fs->fs_cs(fs, cg).cs_nbfree++; 1452 i = cbtocylno(fs, bno); 1453 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1454 cg_blktot(cgp)[i]++; 1455 } else { 1456 bbase = bno - fragnum(fs, bno); 1457 /* 1458 * decrement the counts associated with the old frags 1459 */ 1460 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1461 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1462 /* 1463 * deallocate the fragment 1464 */ 1465 frags = numfrags(fs, size); 1466 for (i = 0; i < frags; i++) { 1467 if (isset(cg_blksfree(cgp), bno + i)) { 1468 printf("dev = 0x%x, block = %d, fs = %s\n", 1469 ip->i_dev, bno + i, fs->fs_fsmnt); 1470 panic("ffs_blkfree: freeing free frag"); 1471 } 1472 setbit(cg_blksfree(cgp), bno + i); 1473 } 1474 cgp->cg_cs.cs_nffree += i; 1475 fs->fs_cstotal.cs_nffree += i; 1476 fs->fs_cs(fs, cg).cs_nffree += i; 1477 /* 1478 * add back in counts associated with the new frags 1479 */ 1480 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1481 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1482 /* 1483 * if a complete block has been reassembled, account for it 1484 */ 1485 blkno = fragstoblks(fs, bbase); 1486 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1487 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1488 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1489 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1490 ffs_clusteracct(fs, cgp, blkno, 1); 1491 cgp->cg_cs.cs_nbfree++; 1492 fs->fs_cstotal.cs_nbfree++; 1493 fs->fs_cs(fs, cg).cs_nbfree++; 1494 i = cbtocylno(fs, bbase); 1495 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1496 cg_blktot(cgp)[i]++; 1497 } 1498 } 1499 fs->fs_fmod = 1; 1500 bdwrite(bp); 1501 } 1502 1503 int 1504 ffs_inode_free(struct inode *pip, ino_t ino, int mode) 1505 { 1506 struct vnode *pvp = ITOV(pip); 1507 1508 if (DOINGSOFTDEP(pvp)) { 1509 softdep_freefile(pvp, ino, mode); 1510 return (0); 1511 } 1512 1513 return (ffs_freefile(pip, ino, mode)); 1514 } 1515 1516 /* 1517 * Do the actual free operation. 1518 * The specified inode is placed back in the free map. 1519 */ 1520 int 1521 ffs_freefile(struct inode *pip, ino_t ino, int mode) 1522 { 1523 struct fs *fs; 1524 struct cg *cgp; 1525 struct buf *bp; 1526 int error, cg; 1527 1528 fs = pip->i_fs; 1529 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1530 panic("ffs_freefile: range: dev = 0x%x, ino = %d, fs = %s", 1531 pip->i_dev, ino, fs->fs_fsmnt); 1532 cg = ino_to_cg(fs, ino); 1533 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1534 (int)fs->fs_cgsize, NOCRED, &bp); 1535 if (error) { 1536 brelse(bp); 1537 return (error); 1538 } 1539 cgp = (struct cg *)bp->b_data; 1540 if (!cg_chkmagic(cgp)) { 1541 brelse(bp); 1542 return (0); 1543 } 1544 cgp->cg_time = time.tv_sec; 1545 ino %= fs->fs_ipg; 1546 if (isclr(cg_inosused(cgp), ino)) { 1547 printf("dev = 0x%x, ino = %d, fs = %s\n", 1548 pip->i_dev, ino, fs->fs_fsmnt); 1549 if (fs->fs_ronly == 0) 1550 panic("ffs_freefile: freeing free inode"); 1551 } 1552 clrbit(cg_inosused(cgp), ino); 1553 if (ino < cgp->cg_irotor) 1554 cgp->cg_irotor = ino; 1555 cgp->cg_cs.cs_nifree++; 1556 fs->fs_cstotal.cs_nifree++; 1557 fs->fs_cs(fs, cg).cs_nifree++; 1558 if ((mode & IFMT) == IFDIR) { 1559 cgp->cg_cs.cs_ndir--; 1560 fs->fs_cstotal.cs_ndir--; 1561 fs->fs_cs(fs, cg).cs_ndir--; 1562 } 1563 fs->fs_fmod = 1; 1564 bdwrite(bp); 1565 return (0); 1566 } 1567 1568 #ifdef DIAGNOSTIC 1569 /* 1570 * Verify allocation of a block or fragment. Returns true if block or 1571 * fragment is allocated, false if it is free. 1572 */ 1573 static int 1574 ffs_checkblk(ip, bno, size) 1575 struct inode *ip; 1576 daddr_t bno; 1577 long size; 1578 { 1579 struct fs *fs; 1580 struct cg *cgp; 1581 struct buf *bp; 1582 int i, error, frags, free; 1583 1584 fs = ip->i_fs; 1585 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1586 printf("bsize = %d, size = %ld, fs = %s\n", 1587 fs->fs_bsize, size, fs->fs_fsmnt); 1588 panic("ffs_checkblk: bad size"); 1589 } 1590 if ((u_int)bno >= fs->fs_size) 1591 panic("ffs_checkblk: bad block %d", bno); 1592 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 1593 (int)fs->fs_cgsize, NOCRED, &bp); 1594 if (error) { 1595 /* XXX - probably should panic here */ 1596 brelse(bp); 1597 return (-1); 1598 } 1599 cgp = (struct cg *)bp->b_data; 1600 if (!cg_chkmagic(cgp)) { 1601 /* XXX - probably should panic here */ 1602 brelse(bp); 1603 return (-1); 1604 } 1605 bno = dtogd(fs, bno); 1606 if (size == fs->fs_bsize) { 1607 free = ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); 1608 } else { 1609 frags = numfrags(fs, size); 1610 for (free = 0, i = 0; i < frags; i++) 1611 if (isset(cg_blksfree(cgp), bno + i)) 1612 free++; 1613 if (free != 0 && free != frags) 1614 panic("ffs_checkblk: partially free fragment"); 1615 } 1616 brelse(bp); 1617 return (!free); 1618 } 1619 #endif /* DIAGNOSTIC */ 1620 1621 1622 /* 1623 * Find a block of the specified size in the specified cylinder group. 1624 * 1625 * It is a panic if a request is made to find a block if none are 1626 * available. 1627 */ 1628 static daddr_t 1629 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1630 register struct fs *fs; 1631 register struct cg *cgp; 1632 daddr_t bpref; 1633 int allocsiz; 1634 { 1635 daddr_t bno; 1636 int start, len, loc, i; 1637 int blk, field, subfield, pos; 1638 1639 /* 1640 * find the fragment by searching through the free block 1641 * map for an appropriate bit pattern 1642 */ 1643 if (bpref) 1644 start = dtogd(fs, bpref) / NBBY; 1645 else 1646 start = cgp->cg_frotor / NBBY; 1647 len = howmany(fs->fs_fpg, NBBY) - start; 1648 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], 1649 (u_char *)fragtbl[fs->fs_frag], 1650 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1651 if (loc == 0) { 1652 len = start + 1; 1653 start = 0; 1654 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], 1655 (u_char *)fragtbl[fs->fs_frag], 1656 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1657 if (loc == 0) { 1658 printf("start = %d, len = %d, fs = %s\n", 1659 start, len, fs->fs_fsmnt); 1660 panic("ffs_alloccg: map corrupted"); 1661 /* NOTREACHED */ 1662 } 1663 } 1664 bno = (start + len - loc) * NBBY; 1665 cgp->cg_frotor = bno; 1666 /* 1667 * found the byte in the map 1668 * sift through the bits to find the selected frag 1669 */ 1670 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1671 blk = blkmap(fs, cg_blksfree(cgp), bno); 1672 blk <<= 1; 1673 field = around[allocsiz]; 1674 subfield = inside[allocsiz]; 1675 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1676 if ((blk & field) == subfield) 1677 return (bno + pos); 1678 field <<= 1; 1679 subfield <<= 1; 1680 } 1681 } 1682 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1683 panic("ffs_alloccg: block not in map"); 1684 return (-1); 1685 } 1686 1687 /* 1688 * Update the cluster map because of an allocation or free. 1689 * 1690 * Cnt == 1 means free; cnt == -1 means allocating. 1691 */ 1692 void 1693 ffs_clusteracct(fs, cgp, blkno, cnt) 1694 struct fs *fs; 1695 struct cg *cgp; 1696 daddr_t blkno; 1697 int cnt; 1698 { 1699 int32_t *sump; 1700 int32_t *lp; 1701 u_char *freemapp, *mapp; 1702 int i, start, end, forw, back, map, bit; 1703 1704 if (fs->fs_contigsumsize <= 0) 1705 return; 1706 freemapp = cg_clustersfree(cgp); 1707 sump = cg_clustersum(cgp); 1708 /* 1709 * Allocate or clear the actual block. 1710 */ 1711 if (cnt > 0) 1712 setbit(freemapp, blkno); 1713 else 1714 clrbit(freemapp, blkno); 1715 /* 1716 * Find the size of the cluster going forward. 1717 */ 1718 start = blkno + 1; 1719 end = start + fs->fs_contigsumsize; 1720 if (end >= cgp->cg_nclusterblks) 1721 end = cgp->cg_nclusterblks; 1722 mapp = &freemapp[start / NBBY]; 1723 map = *mapp++; 1724 bit = 1 << (start % NBBY); 1725 for (i = start; i < end; i++) { 1726 if ((map & bit) == 0) 1727 break; 1728 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1729 bit <<= 1; 1730 } else { 1731 map = *mapp++; 1732 bit = 1; 1733 } 1734 } 1735 forw = i - start; 1736 /* 1737 * Find the size of the cluster going backward. 1738 */ 1739 start = blkno - 1; 1740 end = start - fs->fs_contigsumsize; 1741 if (end < 0) 1742 end = -1; 1743 mapp = &freemapp[start / NBBY]; 1744 map = *mapp--; 1745 bit = 1 << (start % NBBY); 1746 for (i = start; i > end; i--) { 1747 if ((map & bit) == 0) 1748 break; 1749 if ((i & (NBBY - 1)) != 0) { 1750 bit >>= 1; 1751 } else { 1752 map = *mapp--; 1753 bit = 1 << (NBBY - 1); 1754 } 1755 } 1756 back = start - i; 1757 /* 1758 * Account for old cluster and the possibly new forward and 1759 * back clusters. 1760 */ 1761 i = back + forw + 1; 1762 if (i > fs->fs_contigsumsize) 1763 i = fs->fs_contigsumsize; 1764 sump[i] += cnt; 1765 if (back > 0) 1766 sump[back] -= cnt; 1767 if (forw > 0) 1768 sump[forw] -= cnt; 1769 /* 1770 * Update cluster summary information. 1771 */ 1772 lp = &sump[fs->fs_contigsumsize]; 1773 for (i = fs->fs_contigsumsize; i > 0; i--) 1774 if (*lp-- > 0) 1775 break; 1776 fs->fs_maxcluster[cgp->cg_cgx] = i; 1777 } 1778 1779 /* 1780 * Fserr prints the name of a file system with an error diagnostic. 1781 * 1782 * The form of the error message is: 1783 * fs: error message 1784 */ 1785 static void 1786 ffs_fserr(fs, uid, cp) 1787 struct fs *fs; 1788 u_int uid; 1789 char *cp; 1790 { 1791 1792 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1793 } 1794