1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_alloc.c 8.15 (Berkeley) 03/21/95 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/buf.h> 13 #include <sys/proc.h> 14 #include <sys/vnode.h> 15 #include <sys/mount.h> 16 #include <sys/kernel.h> 17 #include <sys/syslog.h> 18 19 #include <vm/vm.h> 20 21 #include <ufs/ufs/quota.h> 22 #include <ufs/ufs/inode.h> 23 24 #include <ufs/ffs/fs.h> 25 #include <ufs/ffs/ffs_extern.h> 26 27 extern u_long nextgennumber; 28 29 static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); 30 static ufs_daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, ufs_daddr_t)); 31 static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, 32 int)); 33 static ino_t ffs_dirpref __P((struct fs *)); 34 static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 35 static void ffs_fserr __P((struct fs *, u_int, char *)); 36 static u_long ffs_hashalloc 37 __P((struct inode *, int, long, int, u_int32_t (*)())); 38 static ino_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int)); 39 static ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, ufs_daddr_t, 40 int)); 41 42 /* 43 * Allocate a block in the file system. 44 * 45 * The size of the requested block is given, which must be some 46 * multiple of fs_fsize and <= fs_bsize. 47 * A preference may be optionally specified. If a preference is given 48 * the following hierarchy is used to allocate a block: 49 * 1) allocate the requested block. 50 * 2) allocate a rotationally optimal block in the same cylinder. 51 * 3) allocate a block in the same cylinder group. 52 * 4) quadradically rehash into other cylinder groups, until an 53 * available block is located. 54 * If no block preference is given the following heirarchy is used 55 * to allocate a block: 56 * 1) allocate a block in the cylinder group that contains the 57 * inode for the file. 58 * 2) quadradically rehash into other cylinder groups, until an 59 * available block is located. 60 */ 61 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 62 register struct inode *ip; 63 ufs_daddr_t lbn, bpref; 64 int size; 65 struct ucred *cred; 66 ufs_daddr_t *bnp; 67 { 68 register struct fs *fs; 69 ufs_daddr_t bno; 70 int cg, error; 71 72 *bnp = 0; 73 fs = ip->i_fs; 74 #ifdef DIAGNOSTIC 75 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 76 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 77 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 78 panic("ffs_alloc: bad size"); 79 } 80 if (cred == NOCRED) 81 panic("ffs_alloc: missing credential\n"); 82 #endif /* DIAGNOSTIC */ 83 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 84 goto nospace; 85 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 86 goto nospace; 87 #ifdef QUOTA 88 if (error = chkdq(ip, (long)btodb(size), cred, 0)) 89 return (error); 90 #endif 91 if (bpref >= fs->fs_size) 92 bpref = 0; 93 if (bpref == 0) 94 cg = ino_to_cg(fs, ip->i_number); 95 else 96 cg = dtog(fs, bpref); 97 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 98 (u_int32_t (*)())ffs_alloccg); 99 if (bno > 0) { 100 ip->i_blocks += btodb(size); 101 ip->i_flag |= IN_CHANGE | IN_UPDATE; 102 *bnp = bno; 103 return (0); 104 } 105 #ifdef QUOTA 106 /* 107 * Restore user's disk quota because allocation failed. 108 */ 109 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 110 #endif 111 nospace: 112 ffs_fserr(fs, cred->cr_uid, "file system full"); 113 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 114 return (ENOSPC); 115 } 116 117 /* 118 * Reallocate a fragment to a bigger size 119 * 120 * The number and size of the old block is given, and a preference 121 * and new size is also specified. The allocator attempts to extend 122 * the original block. Failing that, the regular block allocator is 123 * invoked to get an appropriate block. 124 */ 125 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 126 register struct inode *ip; 127 ufs_daddr_t lbprev; 128 ufs_daddr_t bpref; 129 int osize, nsize; 130 struct ucred *cred; 131 struct buf **bpp; 132 { 133 register struct fs *fs; 134 struct buf *bp; 135 int cg, request, error; 136 ufs_daddr_t bprev, bno; 137 138 *bpp = 0; 139 fs = ip->i_fs; 140 #ifdef DIAGNOSTIC 141 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 142 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 143 printf( 144 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 145 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 146 panic("ffs_realloccg: bad size"); 147 } 148 if (cred == NOCRED) 149 panic("ffs_realloccg: missing credential\n"); 150 #endif /* DIAGNOSTIC */ 151 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 152 goto nospace; 153 if ((bprev = ip->i_db[lbprev]) == 0) { 154 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 155 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 156 panic("ffs_realloccg: bad bprev"); 157 } 158 /* 159 * Allocate the extra space in the buffer. 160 */ 161 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 162 brelse(bp); 163 return (error); 164 } 165 #ifdef QUOTA 166 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) { 167 brelse(bp); 168 return (error); 169 } 170 #endif 171 /* 172 * Check for extension in the existing location. 173 */ 174 cg = dtog(fs, bprev); 175 if (bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) { 176 if (bp->b_blkno != fsbtodb(fs, bno)) 177 panic("bad blockno"); 178 ip->i_blocks += btodb(nsize - osize); 179 ip->i_flag |= IN_CHANGE | IN_UPDATE; 180 allocbuf(bp, nsize); 181 bp->b_flags |= B_DONE; 182 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 183 *bpp = bp; 184 return (0); 185 } 186 /* 187 * Allocate a new disk location. 188 */ 189 if (bpref >= fs->fs_size) 190 bpref = 0; 191 switch ((int)fs->fs_optim) { 192 case FS_OPTSPACE: 193 /* 194 * Allocate an exact sized fragment. Although this makes 195 * best use of space, we will waste time relocating it if 196 * the file continues to grow. If the fragmentation is 197 * less than half of the minimum free reserve, we choose 198 * to begin optimizing for time. 199 */ 200 request = nsize; 201 if (fs->fs_minfree < 5 || 202 fs->fs_cstotal.cs_nffree > 203 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 204 break; 205 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 206 fs->fs_fsmnt); 207 fs->fs_optim = FS_OPTTIME; 208 break; 209 case FS_OPTTIME: 210 /* 211 * At this point we have discovered a file that is trying to 212 * grow a small fragment to a larger fragment. To save time, 213 * we allocate a full sized block, then free the unused portion. 214 * If the file continues to grow, the `ffs_fragextend' call 215 * above will be able to grow it in place without further 216 * copying. If aberrant programs cause disk fragmentation to 217 * grow within 2% of the free reserve, we choose to begin 218 * optimizing for space. 219 */ 220 request = fs->fs_bsize; 221 if (fs->fs_cstotal.cs_nffree < 222 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 223 break; 224 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 225 fs->fs_fsmnt); 226 fs->fs_optim = FS_OPTSPACE; 227 break; 228 default: 229 printf("dev = 0x%x, optim = %d, fs = %s\n", 230 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 231 panic("ffs_realloccg: bad optim"); 232 /* NOTREACHED */ 233 } 234 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 235 (u_int32_t (*)())ffs_alloccg); 236 if (bno > 0) { 237 bp->b_blkno = fsbtodb(fs, bno); 238 (void) vnode_pager_uncache(ITOV(ip)); 239 ffs_blkfree(ip, bprev, (long)osize); 240 if (nsize < request) 241 ffs_blkfree(ip, bno + numfrags(fs, nsize), 242 (long)(request - nsize)); 243 ip->i_blocks += btodb(nsize - osize); 244 ip->i_flag |= IN_CHANGE | IN_UPDATE; 245 allocbuf(bp, nsize); 246 bp->b_flags |= B_DONE; 247 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 248 *bpp = bp; 249 return (0); 250 } 251 #ifdef QUOTA 252 /* 253 * Restore user's disk quota because allocation failed. 254 */ 255 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 256 #endif 257 brelse(bp); 258 nospace: 259 /* 260 * no space available 261 */ 262 ffs_fserr(fs, cred->cr_uid, "file system full"); 263 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 264 return (ENOSPC); 265 } 266 267 /* 268 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 269 * 270 * The vnode and an array of buffer pointers for a range of sequential 271 * logical blocks to be made contiguous is given. The allocator attempts 272 * to find a range of sequential blocks starting as close as possible to 273 * an fs_rotdelay offset from the end of the allocation for the logical 274 * block immediately preceeding the current range. If successful, the 275 * physical block numbers in the buffer pointers and in the inode are 276 * changed to reflect the new allocation. If unsuccessful, the allocation 277 * is left unchanged. The success in doing the reallocation is returned. 278 * Note that the error return is not reflected back to the user. Rather 279 * the previous block allocation will be used. 280 */ 281 #ifdef DEBUG 282 #include <sys/sysctl.h> 283 int doasyncfree = 1; 284 struct ctldebug debug14 = { "doasyncfree", &doasyncfree }; 285 int prtrealloc = 0; 286 struct ctldebug debug15 = { "prtrealloc", &prtrealloc }; 287 #else 288 #define doasyncfree 1 289 #endif 290 291 int 292 ffs_reallocblks(ap) 293 struct vop_reallocblks_args /* { 294 struct vnode *a_vp; 295 struct cluster_save *a_buflist; 296 } */ *ap; 297 { 298 struct fs *fs; 299 struct inode *ip; 300 struct vnode *vp; 301 struct buf *sbp, *ebp; 302 ufs_daddr_t *bap, *sbap, *ebap; 303 struct cluster_save *buflist; 304 ufs_daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno; 305 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 306 int i, len, start_lvl, end_lvl, pref, ssize; 307 308 vp = ap->a_vp; 309 ip = VTOI(vp); 310 fs = ip->i_fs; 311 if (fs->fs_contigsumsize <= 0) 312 return (ENOSPC); 313 buflist = ap->a_buflist; 314 len = buflist->bs_nchildren; 315 start_lbn = buflist->bs_children[0]->b_lblkno; 316 end_lbn = start_lbn + len - 1; 317 #ifdef DIAGNOSTIC 318 for (i = 1; i < len; i++) 319 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 320 panic("ffs_reallocblks: non-cluster"); 321 #endif 322 /* 323 * If the latest allocation is in a new cylinder group, assume that 324 * the filesystem has decided to move and do not force it back to 325 * the previous cylinder group. 326 */ 327 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 328 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 329 return (ENOSPC); 330 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 331 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 332 return (ENOSPC); 333 /* 334 * Get the starting offset and block map for the first block. 335 */ 336 if (start_lvl == 0) { 337 sbap = &ip->i_db[0]; 338 soff = start_lbn; 339 } else { 340 idp = &start_ap[start_lvl - 1]; 341 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 342 brelse(sbp); 343 return (ENOSPC); 344 } 345 sbap = (ufs_daddr_t *)sbp->b_data; 346 soff = idp->in_off; 347 } 348 /* 349 * Find the preferred location for the cluster. 350 */ 351 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 352 /* 353 * If the block range spans two block maps, get the second map. 354 */ 355 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 356 ssize = len; 357 } else { 358 #ifdef DIAGNOSTIC 359 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 360 panic("ffs_reallocblk: start == end"); 361 #endif 362 ssize = len - (idp->in_off + 1); 363 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 364 goto fail; 365 ebap = (ufs_daddr_t *)ebp->b_data; 366 } 367 /* 368 * Search the block map looking for an allocation of the desired size. 369 */ 370 if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 371 len, (u_int32_t (*)())ffs_clusteralloc)) == 0) 372 goto fail; 373 /* 374 * We have found a new contiguous block. 375 * 376 * First we have to replace the old block pointers with the new 377 * block pointers in the inode and indirect blocks associated 378 * with the file. 379 */ 380 #ifdef DEBUG 381 if (prtrealloc) 382 printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 383 start_lbn, end_lbn); 384 #endif 385 blkno = newblk; 386 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 387 if (i == ssize) 388 bap = ebap; 389 #ifdef DIAGNOSTIC 390 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 391 panic("ffs_reallocblks: alloc mismatch"); 392 #endif 393 #ifdef DEBUG 394 if (prtrealloc) 395 printf(" %d,", *bap); 396 #endif 397 *bap++ = blkno; 398 } 399 /* 400 * Next we must write out the modified inode and indirect blocks. 401 * For strict correctness, the writes should be synchronous since 402 * the old block values may have been written to disk. In practise 403 * they are almost never written, but if we are concerned about 404 * strict correctness, the `doasyncfree' flag should be set to zero. 405 * 406 * The test on `doasyncfree' should be changed to test a flag 407 * that shows whether the associated buffers and inodes have 408 * been written. The flag should be set when the cluster is 409 * started and cleared whenever the buffer or inode is flushed. 410 * We can then check below to see if it is set, and do the 411 * synchronous write only when it has been cleared. 412 */ 413 if (sbap != &ip->i_db[0]) { 414 if (doasyncfree) 415 bdwrite(sbp); 416 else 417 bwrite(sbp); 418 } else { 419 ip->i_flag |= IN_CHANGE | IN_UPDATE; 420 if (!doasyncfree) 421 VOP_UPDATE(vp, &time, &time, MNT_WAIT); 422 } 423 if (ssize < len) 424 if (doasyncfree) 425 bdwrite(ebp); 426 else 427 bwrite(ebp); 428 /* 429 * Last, free the old blocks and assign the new blocks to the buffers. 430 */ 431 #ifdef DEBUG 432 if (prtrealloc) 433 printf("\n\tnew:"); 434 #endif 435 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 436 ffs_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 437 fs->fs_bsize); 438 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 439 #ifdef DEBUG 440 if (prtrealloc) 441 printf(" %d,", blkno); 442 #endif 443 } 444 #ifdef DEBUG 445 if (prtrealloc) { 446 prtrealloc--; 447 printf("\n"); 448 } 449 #endif 450 return (0); 451 452 fail: 453 if (ssize < len) 454 brelse(ebp); 455 if (sbap != &ip->i_db[0]) 456 brelse(sbp); 457 return (ENOSPC); 458 } 459 460 /* 461 * Allocate an inode in the file system. 462 * 463 * If allocating a directory, use ffs_dirpref to select the inode. 464 * If allocating in a directory, the following hierarchy is followed: 465 * 1) allocate the preferred inode. 466 * 2) allocate an inode in the same cylinder group. 467 * 3) quadradically rehash into other cylinder groups, until an 468 * available inode is located. 469 * If no inode preference is given the following heirarchy is used 470 * to allocate an inode: 471 * 1) allocate an inode in cylinder group 0. 472 * 2) quadradically rehash into other cylinder groups, until an 473 * available inode is located. 474 */ 475 ffs_valloc(ap) 476 struct vop_valloc_args /* { 477 struct vnode *a_pvp; 478 int a_mode; 479 struct ucred *a_cred; 480 struct vnode **a_vpp; 481 } */ *ap; 482 { 483 register struct vnode *pvp = ap->a_pvp; 484 register struct inode *pip; 485 register struct fs *fs; 486 register struct inode *ip; 487 mode_t mode = ap->a_mode; 488 ino_t ino, ipref; 489 int cg, error; 490 491 *ap->a_vpp = NULL; 492 pip = VTOI(pvp); 493 fs = pip->i_fs; 494 if (fs->fs_cstotal.cs_nifree == 0) 495 goto noinodes; 496 497 if ((mode & IFMT) == IFDIR) 498 ipref = ffs_dirpref(fs); 499 else 500 ipref = pip->i_number; 501 if (ipref >= fs->fs_ncg * fs->fs_ipg) 502 ipref = 0; 503 cg = ino_to_cg(fs, ipref); 504 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); 505 if (ino == 0) 506 goto noinodes; 507 error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); 508 if (error) { 509 VOP_VFREE(pvp, ino, mode); 510 return (error); 511 } 512 ip = VTOI(*ap->a_vpp); 513 if (ip->i_mode) { 514 printf("mode = 0%o, inum = %d, fs = %s\n", 515 ip->i_mode, ip->i_number, fs->fs_fsmnt); 516 panic("ffs_valloc: dup alloc"); 517 } 518 if (ip->i_blocks) { /* XXX */ 519 printf("free inode %s/%d had %d blocks\n", 520 fs->fs_fsmnt, ino, ip->i_blocks); 521 ip->i_blocks = 0; 522 } 523 ip->i_flags = 0; 524 /* 525 * Set up a new generation number for this inode. 526 */ 527 if (++nextgennumber < (u_long)time.tv_sec) 528 nextgennumber = time.tv_sec; 529 ip->i_gen = nextgennumber; 530 return (0); 531 noinodes: 532 ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes"); 533 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 534 return (ENOSPC); 535 } 536 537 /* 538 * Find a cylinder to place a directory. 539 * 540 * The policy implemented by this algorithm is to select from 541 * among those cylinder groups with above the average number of 542 * free inodes, the one with the smallest number of directories. 543 */ 544 static ino_t 545 ffs_dirpref(fs) 546 register struct fs *fs; 547 { 548 int cg, minndir, mincg, avgifree; 549 550 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 551 minndir = fs->fs_ipg; 552 mincg = 0; 553 for (cg = 0; cg < fs->fs_ncg; cg++) 554 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 555 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 556 mincg = cg; 557 minndir = fs->fs_cs(fs, cg).cs_ndir; 558 } 559 return ((ino_t)(fs->fs_ipg * mincg)); 560 } 561 562 /* 563 * Select the desired position for the next block in a file. The file is 564 * logically divided into sections. The first section is composed of the 565 * direct blocks. Each additional section contains fs_maxbpg blocks. 566 * 567 * If no blocks have been allocated in the first section, the policy is to 568 * request a block in the same cylinder group as the inode that describes 569 * the file. If no blocks have been allocated in any other section, the 570 * policy is to place the section in a cylinder group with a greater than 571 * average number of free blocks. An appropriate cylinder group is found 572 * by using a rotor that sweeps the cylinder groups. When a new group of 573 * blocks is needed, the sweep begins in the cylinder group following the 574 * cylinder group from which the previous allocation was made. The sweep 575 * continues until a cylinder group with greater than the average number 576 * of free blocks is found. If the allocation is for the first block in an 577 * indirect block, the information on the previous allocation is unavailable; 578 * here a best guess is made based upon the logical block number being 579 * allocated. 580 * 581 * If a section is already partially allocated, the policy is to 582 * contiguously allocate fs_maxcontig blocks. The end of one of these 583 * contiguous blocks and the beginning of the next is physically separated 584 * so that the disk head will be in transit between them for at least 585 * fs_rotdelay milliseconds. This is to allow time for the processor to 586 * schedule another I/O transfer. 587 */ 588 ufs_daddr_t 589 ffs_blkpref(ip, lbn, indx, bap) 590 struct inode *ip; 591 ufs_daddr_t lbn; 592 int indx; 593 ufs_daddr_t *bap; 594 { 595 register struct fs *fs; 596 register int cg; 597 int avgbfree, startcg; 598 ufs_daddr_t nextblk; 599 600 fs = ip->i_fs; 601 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 602 if (lbn < NDADDR) { 603 cg = ino_to_cg(fs, ip->i_number); 604 return (fs->fs_fpg * cg + fs->fs_frag); 605 } 606 /* 607 * Find a cylinder with greater than average number of 608 * unused data blocks. 609 */ 610 if (indx == 0 || bap[indx - 1] == 0) 611 startcg = 612 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 613 else 614 startcg = dtog(fs, bap[indx - 1]) + 1; 615 startcg %= fs->fs_ncg; 616 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 617 for (cg = startcg; cg < fs->fs_ncg; cg++) 618 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 619 fs->fs_cgrotor = cg; 620 return (fs->fs_fpg * cg + fs->fs_frag); 621 } 622 for (cg = 0; cg <= startcg; cg++) 623 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 624 fs->fs_cgrotor = cg; 625 return (fs->fs_fpg * cg + fs->fs_frag); 626 } 627 return (NULL); 628 } 629 /* 630 * One or more previous blocks have been laid out. If less 631 * than fs_maxcontig previous blocks are contiguous, the 632 * next block is requested contiguously, otherwise it is 633 * requested rotationally delayed by fs_rotdelay milliseconds. 634 */ 635 nextblk = bap[indx - 1] + fs->fs_frag; 636 if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] + 637 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 638 return (nextblk); 639 if (fs->fs_rotdelay != 0) 640 /* 641 * Here we convert ms of delay to frags as: 642 * (frags) = (ms) * (rev/sec) * (sect/rev) / 643 * ((sect/frag) * (ms/sec)) 644 * then round up to the next block. 645 */ 646 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 647 (NSPF(fs) * 1000), fs->fs_frag); 648 return (nextblk); 649 } 650 651 /* 652 * Implement the cylinder overflow algorithm. 653 * 654 * The policy implemented by this algorithm is: 655 * 1) allocate the block in its requested cylinder group. 656 * 2) quadradically rehash on the cylinder group number. 657 * 3) brute force search for a free block. 658 */ 659 /*VARARGS5*/ 660 static u_long 661 ffs_hashalloc(ip, cg, pref, size, allocator) 662 struct inode *ip; 663 int cg; 664 long pref; 665 int size; /* size for data blocks, mode for inodes */ 666 u_int32_t (*allocator)(); 667 { 668 register struct fs *fs; 669 long result; 670 int i, icg = cg; 671 672 fs = ip->i_fs; 673 /* 674 * 1: preferred cylinder group 675 */ 676 result = (*allocator)(ip, cg, pref, size); 677 if (result) 678 return (result); 679 /* 680 * 2: quadratic rehash 681 */ 682 for (i = 1; i < fs->fs_ncg; i *= 2) { 683 cg += i; 684 if (cg >= fs->fs_ncg) 685 cg -= fs->fs_ncg; 686 result = (*allocator)(ip, cg, 0, size); 687 if (result) 688 return (result); 689 } 690 /* 691 * 3: brute force search 692 * Note that we start at i == 2, since 0 was checked initially, 693 * and 1 is always checked in the quadratic rehash. 694 */ 695 cg = (icg + 2) % fs->fs_ncg; 696 for (i = 2; i < fs->fs_ncg; i++) { 697 result = (*allocator)(ip, cg, 0, size); 698 if (result) 699 return (result); 700 cg++; 701 if (cg == fs->fs_ncg) 702 cg = 0; 703 } 704 return (NULL); 705 } 706 707 /* 708 * Determine whether a fragment can be extended. 709 * 710 * Check to see if the necessary fragments are available, and 711 * if they are, allocate them. 712 */ 713 static ufs_daddr_t 714 ffs_fragextend(ip, cg, bprev, osize, nsize) 715 struct inode *ip; 716 int cg; 717 long bprev; 718 int osize, nsize; 719 { 720 register struct fs *fs; 721 register struct cg *cgp; 722 struct buf *bp; 723 long bno; 724 int frags, bbase; 725 int i, error; 726 727 fs = ip->i_fs; 728 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 729 return (NULL); 730 frags = numfrags(fs, nsize); 731 bbase = fragnum(fs, bprev); 732 if (bbase > fragnum(fs, (bprev + frags - 1))) { 733 /* cannot extend across a block boundary */ 734 return (NULL); 735 } 736 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 737 (int)fs->fs_cgsize, NOCRED, &bp); 738 if (error) { 739 brelse(bp); 740 return (NULL); 741 } 742 cgp = (struct cg *)bp->b_data; 743 if (!cg_chkmagic(cgp)) { 744 brelse(bp); 745 return (NULL); 746 } 747 cgp->cg_time = time.tv_sec; 748 bno = dtogd(fs, bprev); 749 for (i = numfrags(fs, osize); i < frags; i++) 750 if (isclr(cg_blksfree(cgp), bno + i)) { 751 brelse(bp); 752 return (NULL); 753 } 754 /* 755 * the current fragment can be extended 756 * deduct the count on fragment being extended into 757 * increase the count on the remaining fragment (if any) 758 * allocate the extended piece 759 */ 760 for (i = frags; i < fs->fs_frag - bbase; i++) 761 if (isclr(cg_blksfree(cgp), bno + i)) 762 break; 763 cgp->cg_frsum[i - numfrags(fs, osize)]--; 764 if (i != frags) 765 cgp->cg_frsum[i - frags]++; 766 for (i = numfrags(fs, osize); i < frags; i++) { 767 clrbit(cg_blksfree(cgp), bno + i); 768 cgp->cg_cs.cs_nffree--; 769 fs->fs_cstotal.cs_nffree--; 770 fs->fs_cs(fs, cg).cs_nffree--; 771 } 772 fs->fs_fmod = 1; 773 bdwrite(bp); 774 return (bprev); 775 } 776 777 /* 778 * Determine whether a block can be allocated. 779 * 780 * Check to see if a block of the appropriate size is available, 781 * and if it is, allocate it. 782 */ 783 static ufs_daddr_t 784 ffs_alloccg(ip, cg, bpref, size) 785 struct inode *ip; 786 int cg; 787 ufs_daddr_t bpref; 788 int size; 789 { 790 register struct fs *fs; 791 register struct cg *cgp; 792 struct buf *bp; 793 register int i; 794 int error, bno, frags, allocsiz; 795 796 fs = ip->i_fs; 797 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 798 return (NULL); 799 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 800 (int)fs->fs_cgsize, NOCRED, &bp); 801 if (error) { 802 brelse(bp); 803 return (NULL); 804 } 805 cgp = (struct cg *)bp->b_data; 806 if (!cg_chkmagic(cgp) || 807 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 808 brelse(bp); 809 return (NULL); 810 } 811 cgp->cg_time = time.tv_sec; 812 if (size == fs->fs_bsize) { 813 bno = ffs_alloccgblk(fs, cgp, bpref); 814 bdwrite(bp); 815 return (bno); 816 } 817 /* 818 * check to see if any fragments are already available 819 * allocsiz is the size which will be allocated, hacking 820 * it down to a smaller size if necessary 821 */ 822 frags = numfrags(fs, size); 823 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 824 if (cgp->cg_frsum[allocsiz] != 0) 825 break; 826 if (allocsiz == fs->fs_frag) { 827 /* 828 * no fragments were available, so a block will be 829 * allocated, and hacked up 830 */ 831 if (cgp->cg_cs.cs_nbfree == 0) { 832 brelse(bp); 833 return (NULL); 834 } 835 bno = ffs_alloccgblk(fs, cgp, bpref); 836 bpref = dtogd(fs, bno); 837 for (i = frags; i < fs->fs_frag; i++) 838 setbit(cg_blksfree(cgp), bpref + i); 839 i = fs->fs_frag - frags; 840 cgp->cg_cs.cs_nffree += i; 841 fs->fs_cstotal.cs_nffree += i; 842 fs->fs_cs(fs, cg).cs_nffree += i; 843 fs->fs_fmod = 1; 844 cgp->cg_frsum[i]++; 845 bdwrite(bp); 846 return (bno); 847 } 848 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 849 if (bno < 0) { 850 brelse(bp); 851 return (NULL); 852 } 853 for (i = 0; i < frags; i++) 854 clrbit(cg_blksfree(cgp), bno + i); 855 cgp->cg_cs.cs_nffree -= frags; 856 fs->fs_cstotal.cs_nffree -= frags; 857 fs->fs_cs(fs, cg).cs_nffree -= frags; 858 fs->fs_fmod = 1; 859 cgp->cg_frsum[allocsiz]--; 860 if (frags != allocsiz) 861 cgp->cg_frsum[allocsiz - frags]++; 862 bdwrite(bp); 863 return (cg * fs->fs_fpg + bno); 864 } 865 866 /* 867 * Allocate a block in a cylinder group. 868 * 869 * This algorithm implements the following policy: 870 * 1) allocate the requested block. 871 * 2) allocate a rotationally optimal block in the same cylinder. 872 * 3) allocate the next available block on the block rotor for the 873 * specified cylinder group. 874 * Note that this routine only allocates fs_bsize blocks; these 875 * blocks may be fragmented by the routine that allocates them. 876 */ 877 static ufs_daddr_t 878 ffs_alloccgblk(fs, cgp, bpref) 879 register struct fs *fs; 880 register struct cg *cgp; 881 ufs_daddr_t bpref; 882 { 883 ufs_daddr_t bno, blkno; 884 int cylno, pos, delta; 885 short *cylbp; 886 register int i; 887 888 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 889 bpref = cgp->cg_rotor; 890 goto norot; 891 } 892 bpref = blknum(fs, bpref); 893 bpref = dtogd(fs, bpref); 894 /* 895 * if the requested block is available, use it 896 */ 897 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 898 bno = bpref; 899 goto gotit; 900 } 901 /* 902 * check for a block available on the same cylinder 903 */ 904 cylno = cbtocylno(fs, bpref); 905 if (cg_blktot(cgp)[cylno] == 0) 906 goto norot; 907 if (fs->fs_cpc == 0) { 908 /* 909 * Block layout information is not available. 910 * Leaving bpref unchanged means we take the 911 * next available free block following the one 912 * we just allocated. Hopefully this will at 913 * least hit a track cache on drives of unknown 914 * geometry (e.g. SCSI). 915 */ 916 goto norot; 917 } 918 /* 919 * check the summary information to see if a block is 920 * available in the requested cylinder starting at the 921 * requested rotational position and proceeding around. 922 */ 923 cylbp = cg_blks(fs, cgp, cylno); 924 pos = cbtorpos(fs, bpref); 925 for (i = pos; i < fs->fs_nrpos; i++) 926 if (cylbp[i] > 0) 927 break; 928 if (i == fs->fs_nrpos) 929 for (i = 0; i < pos; i++) 930 if (cylbp[i] > 0) 931 break; 932 if (cylbp[i] > 0) { 933 /* 934 * found a rotational position, now find the actual 935 * block. A panic if none is actually there. 936 */ 937 pos = cylno % fs->fs_cpc; 938 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 939 if (fs_postbl(fs, pos)[i] == -1) { 940 printf("pos = %d, i = %d, fs = %s\n", 941 pos, i, fs->fs_fsmnt); 942 panic("ffs_alloccgblk: cyl groups corrupted"); 943 } 944 for (i = fs_postbl(fs, pos)[i];; ) { 945 if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 946 bno = blkstofrags(fs, (bno + i)); 947 goto gotit; 948 } 949 delta = fs_rotbl(fs)[i]; 950 if (delta <= 0 || 951 delta + i > fragstoblks(fs, fs->fs_fpg)) 952 break; 953 i += delta; 954 } 955 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 956 panic("ffs_alloccgblk: can't find blk in cyl"); 957 } 958 norot: 959 /* 960 * no blocks in the requested cylinder, so take next 961 * available one in this cylinder group. 962 */ 963 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 964 if (bno < 0) 965 return (NULL); 966 cgp->cg_rotor = bno; 967 gotit: 968 blkno = fragstoblks(fs, bno); 969 ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); 970 ffs_clusteracct(fs, cgp, blkno, -1); 971 cgp->cg_cs.cs_nbfree--; 972 fs->fs_cstotal.cs_nbfree--; 973 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 974 cylno = cbtocylno(fs, bno); 975 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 976 cg_blktot(cgp)[cylno]--; 977 fs->fs_fmod = 1; 978 return (cgp->cg_cgx * fs->fs_fpg + bno); 979 } 980 981 /* 982 * Determine whether a cluster can be allocated. 983 * 984 * We do not currently check for optimal rotational layout if there 985 * are multiple choices in the same cylinder group. Instead we just 986 * take the first one that we find following bpref. 987 */ 988 static ufs_daddr_t 989 ffs_clusteralloc(ip, cg, bpref, len) 990 struct inode *ip; 991 int cg; 992 ufs_daddr_t bpref; 993 int len; 994 { 995 register struct fs *fs; 996 register struct cg *cgp; 997 struct buf *bp; 998 int i, got, run, bno, bit, map; 999 u_char *mapp; 1000 int32_t *lp; 1001 1002 fs = ip->i_fs; 1003 if (fs->fs_maxcluster[cg] < len) 1004 return (NULL); 1005 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1006 NOCRED, &bp)) 1007 goto fail; 1008 cgp = (struct cg *)bp->b_data; 1009 if (!cg_chkmagic(cgp)) 1010 goto fail; 1011 /* 1012 * Check to see if a cluster of the needed size (or bigger) is 1013 * available in this cylinder group. 1014 */ 1015 lp = &cg_clustersum(cgp)[len]; 1016 for (i = len; i <= fs->fs_contigsumsize; i++) 1017 if (*lp++ > 0) 1018 break; 1019 if (i > fs->fs_contigsumsize) { 1020 /* 1021 * This is the first time looking for a cluster in this 1022 * cylinder group. Update the cluster summary information 1023 * to reflect the true maximum sized cluster so that 1024 * future cluster allocation requests can avoid reading 1025 * the cylinder group map only to find no clusters. 1026 */ 1027 lp = &cg_clustersum(cgp)[len - 1]; 1028 for (i = len - 1; i > 0; i--) 1029 if (*lp-- > 0) 1030 break; 1031 fs->fs_maxcluster[cg] = i; 1032 goto fail; 1033 } 1034 /* 1035 * Search the cluster map to find a big enough cluster. 1036 * We take the first one that we find, even if it is larger 1037 * than we need as we prefer to get one close to the previous 1038 * block allocation. We do not search before the current 1039 * preference point as we do not want to allocate a block 1040 * that is allocated before the previous one (as we will 1041 * then have to wait for another pass of the elevator 1042 * algorithm before it will be read). We prefer to fail and 1043 * be recalled to try an allocation in the next cylinder group. 1044 */ 1045 if (dtog(fs, bpref) != cg) 1046 bpref = 0; 1047 else 1048 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1049 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1050 map = *mapp++; 1051 bit = 1 << (bpref % NBBY); 1052 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1053 if ((map & bit) == 0) { 1054 run = 0; 1055 } else { 1056 run++; 1057 if (run == len) 1058 break; 1059 } 1060 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1061 bit <<= 1; 1062 } else { 1063 map = *mapp++; 1064 bit = 1; 1065 } 1066 } 1067 if (got == cgp->cg_nclusterblks) 1068 goto fail; 1069 /* 1070 * Allocate the cluster that we have found. 1071 */ 1072 for (i = 1; i <= len; i++) 1073 if (!ffs_isblock(fs, cg_blksfree(cgp), got - run + i)) 1074 panic("ffs_clusteralloc: map mismatch"); 1075 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1076 len = blkstofrags(fs, len); 1077 for (i = 0; i < len; i += fs->fs_frag) 1078 if ((got = ffs_alloccgblk(fs, cgp, bno + i)) != bno + i) 1079 panic("ffs_clusteralloc: lost block"); 1080 brelse(bp); 1081 return (bno); 1082 1083 fail: 1084 brelse(bp); 1085 return (0); 1086 } 1087 1088 /* 1089 * Determine whether an inode can be allocated. 1090 * 1091 * Check to see if an inode is available, and if it is, 1092 * allocate it using the following policy: 1093 * 1) allocate the requested inode. 1094 * 2) allocate the next available inode after the requested 1095 * inode in the specified cylinder group. 1096 */ 1097 static ino_t 1098 ffs_nodealloccg(ip, cg, ipref, mode) 1099 struct inode *ip; 1100 int cg; 1101 ufs_daddr_t ipref; 1102 int mode; 1103 { 1104 register struct fs *fs; 1105 register struct cg *cgp; 1106 struct buf *bp; 1107 int error, start, len, loc, map, i; 1108 1109 fs = ip->i_fs; 1110 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1111 return (NULL); 1112 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1113 (int)fs->fs_cgsize, NOCRED, &bp); 1114 if (error) { 1115 brelse(bp); 1116 return (NULL); 1117 } 1118 cgp = (struct cg *)bp->b_data; 1119 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1120 brelse(bp); 1121 return (NULL); 1122 } 1123 cgp->cg_time = time.tv_sec; 1124 if (ipref) { 1125 ipref %= fs->fs_ipg; 1126 if (isclr(cg_inosused(cgp), ipref)) 1127 goto gotit; 1128 } 1129 start = cgp->cg_irotor / NBBY; 1130 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1131 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 1132 if (loc == 0) { 1133 len = start + 1; 1134 start = 0; 1135 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 1136 if (loc == 0) { 1137 printf("cg = %d, irotor = %d, fs = %s\n", 1138 cg, cgp->cg_irotor, fs->fs_fsmnt); 1139 panic("ffs_nodealloccg: map corrupted"); 1140 /* NOTREACHED */ 1141 } 1142 } 1143 i = start + len - loc; 1144 map = cg_inosused(cgp)[i]; 1145 ipref = i * NBBY; 1146 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1147 if ((map & i) == 0) { 1148 cgp->cg_irotor = ipref; 1149 goto gotit; 1150 } 1151 } 1152 printf("fs = %s\n", fs->fs_fsmnt); 1153 panic("ffs_nodealloccg: block not in map"); 1154 /* NOTREACHED */ 1155 gotit: 1156 setbit(cg_inosused(cgp), ipref); 1157 cgp->cg_cs.cs_nifree--; 1158 fs->fs_cstotal.cs_nifree--; 1159 fs->fs_cs(fs, cg).cs_nifree--; 1160 fs->fs_fmod = 1; 1161 if ((mode & IFMT) == IFDIR) { 1162 cgp->cg_cs.cs_ndir++; 1163 fs->fs_cstotal.cs_ndir++; 1164 fs->fs_cs(fs, cg).cs_ndir++; 1165 } 1166 bdwrite(bp); 1167 return (cg * fs->fs_ipg + ipref); 1168 } 1169 1170 /* 1171 * Free a block or fragment. 1172 * 1173 * The specified block or fragment is placed back in the 1174 * free map. If a fragment is deallocated, a possible 1175 * block reassembly is checked. 1176 */ 1177 ffs_blkfree(ip, bno, size) 1178 register struct inode *ip; 1179 ufs_daddr_t bno; 1180 long size; 1181 { 1182 register struct fs *fs; 1183 register struct cg *cgp; 1184 struct buf *bp; 1185 ufs_daddr_t blkno; 1186 int i, error, cg, blk, frags, bbase; 1187 1188 fs = ip->i_fs; 1189 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1190 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 1191 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 1192 panic("blkfree: bad size"); 1193 } 1194 cg = dtog(fs, bno); 1195 if ((u_int)bno >= fs->fs_size) { 1196 printf("bad block %d, ino %d\n", bno, ip->i_number); 1197 ffs_fserr(fs, ip->i_uid, "bad block"); 1198 return; 1199 } 1200 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1201 (int)fs->fs_cgsize, NOCRED, &bp); 1202 if (error) { 1203 brelse(bp); 1204 return; 1205 } 1206 cgp = (struct cg *)bp->b_data; 1207 if (!cg_chkmagic(cgp)) { 1208 brelse(bp); 1209 return; 1210 } 1211 cgp->cg_time = time.tv_sec; 1212 bno = dtogd(fs, bno); 1213 if (size == fs->fs_bsize) { 1214 blkno = fragstoblks(fs, bno); 1215 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1216 printf("dev = 0x%x, block = %d, fs = %s\n", 1217 ip->i_dev, bno, fs->fs_fsmnt); 1218 panic("blkfree: freeing free block"); 1219 } 1220 ffs_setblock(fs, cg_blksfree(cgp), blkno); 1221 ffs_clusteracct(fs, cgp, blkno, 1); 1222 cgp->cg_cs.cs_nbfree++; 1223 fs->fs_cstotal.cs_nbfree++; 1224 fs->fs_cs(fs, cg).cs_nbfree++; 1225 i = cbtocylno(fs, bno); 1226 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1227 cg_blktot(cgp)[i]++; 1228 } else { 1229 bbase = bno - fragnum(fs, bno); 1230 /* 1231 * decrement the counts associated with the old frags 1232 */ 1233 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1234 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1235 /* 1236 * deallocate the fragment 1237 */ 1238 frags = numfrags(fs, size); 1239 for (i = 0; i < frags; i++) { 1240 if (isset(cg_blksfree(cgp), bno + i)) { 1241 printf("dev = 0x%x, block = %d, fs = %s\n", 1242 ip->i_dev, bno + i, fs->fs_fsmnt); 1243 panic("blkfree: freeing free frag"); 1244 } 1245 setbit(cg_blksfree(cgp), bno + i); 1246 } 1247 cgp->cg_cs.cs_nffree += i; 1248 fs->fs_cstotal.cs_nffree += i; 1249 fs->fs_cs(fs, cg).cs_nffree += i; 1250 /* 1251 * add back in counts associated with the new frags 1252 */ 1253 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1254 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1255 /* 1256 * if a complete block has been reassembled, account for it 1257 */ 1258 blkno = fragstoblks(fs, bbase); 1259 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1260 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1261 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1262 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1263 ffs_clusteracct(fs, cgp, blkno, 1); 1264 cgp->cg_cs.cs_nbfree++; 1265 fs->fs_cstotal.cs_nbfree++; 1266 fs->fs_cs(fs, cg).cs_nbfree++; 1267 i = cbtocylno(fs, bbase); 1268 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1269 cg_blktot(cgp)[i]++; 1270 } 1271 } 1272 fs->fs_fmod = 1; 1273 bdwrite(bp); 1274 } 1275 1276 /* 1277 * Free an inode. 1278 * 1279 * The specified inode is placed back in the free map. 1280 */ 1281 int 1282 ffs_vfree(ap) 1283 struct vop_vfree_args /* { 1284 struct vnode *a_pvp; 1285 ino_t a_ino; 1286 int a_mode; 1287 } */ *ap; 1288 { 1289 register struct fs *fs; 1290 register struct cg *cgp; 1291 register struct inode *pip; 1292 ino_t ino = ap->a_ino; 1293 struct buf *bp; 1294 int error, cg; 1295 1296 pip = VTOI(ap->a_pvp); 1297 fs = pip->i_fs; 1298 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1299 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n", 1300 pip->i_dev, ino, fs->fs_fsmnt); 1301 cg = ino_to_cg(fs, ino); 1302 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1303 (int)fs->fs_cgsize, NOCRED, &bp); 1304 if (error) { 1305 brelse(bp); 1306 return (0); 1307 } 1308 cgp = (struct cg *)bp->b_data; 1309 if (!cg_chkmagic(cgp)) { 1310 brelse(bp); 1311 return (0); 1312 } 1313 cgp->cg_time = time.tv_sec; 1314 ino %= fs->fs_ipg; 1315 if (isclr(cg_inosused(cgp), ino)) { 1316 printf("dev = 0x%x, ino = %d, fs = %s\n", 1317 pip->i_dev, ino, fs->fs_fsmnt); 1318 if (fs->fs_ronly == 0) 1319 panic("ifree: freeing free inode"); 1320 } 1321 clrbit(cg_inosused(cgp), ino); 1322 if (ino < cgp->cg_irotor) 1323 cgp->cg_irotor = ino; 1324 cgp->cg_cs.cs_nifree++; 1325 fs->fs_cstotal.cs_nifree++; 1326 fs->fs_cs(fs, cg).cs_nifree++; 1327 if ((ap->a_mode & IFMT) == IFDIR) { 1328 cgp->cg_cs.cs_ndir--; 1329 fs->fs_cstotal.cs_ndir--; 1330 fs->fs_cs(fs, cg).cs_ndir--; 1331 } 1332 fs->fs_fmod = 1; 1333 bdwrite(bp); 1334 return (0); 1335 } 1336 1337 /* 1338 * Find a block of the specified size in the specified cylinder group. 1339 * 1340 * It is a panic if a request is made to find a block if none are 1341 * available. 1342 */ 1343 static ufs_daddr_t 1344 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1345 register struct fs *fs; 1346 register struct cg *cgp; 1347 ufs_daddr_t bpref; 1348 int allocsiz; 1349 { 1350 ufs_daddr_t bno; 1351 int start, len, loc, i; 1352 int blk, field, subfield, pos; 1353 1354 /* 1355 * find the fragment by searching through the free block 1356 * map for an appropriate bit pattern 1357 */ 1358 if (bpref) 1359 start = dtogd(fs, bpref) / NBBY; 1360 else 1361 start = cgp->cg_frotor / NBBY; 1362 len = howmany(fs->fs_fpg, NBBY) - start; 1363 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], 1364 (u_char *)fragtbl[fs->fs_frag], 1365 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1366 if (loc == 0) { 1367 len = start + 1; 1368 start = 0; 1369 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], 1370 (u_char *)fragtbl[fs->fs_frag], 1371 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1372 if (loc == 0) { 1373 printf("start = %d, len = %d, fs = %s\n", 1374 start, len, fs->fs_fsmnt); 1375 panic("ffs_alloccg: map corrupted"); 1376 /* NOTREACHED */ 1377 } 1378 } 1379 bno = (start + len - loc) * NBBY; 1380 cgp->cg_frotor = bno; 1381 /* 1382 * found the byte in the map 1383 * sift through the bits to find the selected frag 1384 */ 1385 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1386 blk = blkmap(fs, cg_blksfree(cgp), bno); 1387 blk <<= 1; 1388 field = around[allocsiz]; 1389 subfield = inside[allocsiz]; 1390 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1391 if ((blk & field) == subfield) 1392 return (bno + pos); 1393 field <<= 1; 1394 subfield <<= 1; 1395 } 1396 } 1397 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1398 panic("ffs_alloccg: block not in map"); 1399 return (-1); 1400 } 1401 1402 /* 1403 * Update the cluster map because of an allocation or free. 1404 * 1405 * Cnt == 1 means free; cnt == -1 means allocating. 1406 */ 1407 ffs_clusteracct(fs, cgp, blkno, cnt) 1408 struct fs *fs; 1409 struct cg *cgp; 1410 ufs_daddr_t blkno; 1411 int cnt; 1412 { 1413 int32_t *sump; 1414 int32_t *lp; 1415 u_char *freemapp, *mapp; 1416 int i, start, end, forw, back, map, bit; 1417 1418 if (fs->fs_contigsumsize <= 0) 1419 return; 1420 freemapp = cg_clustersfree(cgp); 1421 sump = cg_clustersum(cgp); 1422 /* 1423 * Allocate or clear the actual block. 1424 */ 1425 if (cnt > 0) 1426 setbit(freemapp, blkno); 1427 else 1428 clrbit(freemapp, blkno); 1429 /* 1430 * Find the size of the cluster going forward. 1431 */ 1432 start = blkno + 1; 1433 end = start + fs->fs_contigsumsize; 1434 if (end >= cgp->cg_nclusterblks) 1435 end = cgp->cg_nclusterblks; 1436 mapp = &freemapp[start / NBBY]; 1437 map = *mapp++; 1438 bit = 1 << (start % NBBY); 1439 for (i = start; i < end; i++) { 1440 if ((map & bit) == 0) 1441 break; 1442 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1443 bit <<= 1; 1444 } else { 1445 map = *mapp++; 1446 bit = 1; 1447 } 1448 } 1449 forw = i - start; 1450 /* 1451 * Find the size of the cluster going backward. 1452 */ 1453 start = blkno - 1; 1454 end = start - fs->fs_contigsumsize; 1455 if (end < 0) 1456 end = -1; 1457 mapp = &freemapp[start / NBBY]; 1458 map = *mapp--; 1459 bit = 1 << (start % NBBY); 1460 for (i = start; i > end; i--) { 1461 if ((map & bit) == 0) 1462 break; 1463 if ((i & (NBBY - 1)) != 0) { 1464 bit >>= 1; 1465 } else { 1466 map = *mapp--; 1467 bit = 1 << (NBBY - 1); 1468 } 1469 } 1470 back = start - i; 1471 /* 1472 * Account for old cluster and the possibly new forward and 1473 * back clusters. 1474 */ 1475 i = back + forw + 1; 1476 if (i > fs->fs_contigsumsize) 1477 i = fs->fs_contigsumsize; 1478 sump[i] += cnt; 1479 if (back > 0) 1480 sump[back] -= cnt; 1481 if (forw > 0) 1482 sump[forw] -= cnt; 1483 /* 1484 * Update cluster summary information. 1485 */ 1486 lp = &sump[fs->fs_contigsumsize]; 1487 for (i = fs->fs_contigsumsize; i > 0; i--) 1488 if (*lp-- > 0) 1489 break; 1490 fs->fs_maxcluster[cgp->cg_cgx] = i; 1491 } 1492 1493 /* 1494 * Fserr prints the name of a file system with an error diagnostic. 1495 * 1496 * The form of the error message is: 1497 * fs: error message 1498 */ 1499 static void 1500 ffs_fserr(fs, uid, cp) 1501 struct fs *fs; 1502 u_int uid; 1503 char *cp; 1504 { 1505 1506 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1507 } 1508