1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_alloc.c 7.33 (Berkeley) 05/04/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/buf.h> 13 #include <sys/proc.h> 14 #include <sys/vnode.h> 15 #include <sys/kernel.h> 16 #include <sys/syslog.h> 17 18 #include <vm/vm.h> 19 20 #include <ufs/ufs/quota.h> 21 #include <ufs/ufs/inode.h> 22 23 #include <ufs/ffs/fs.h> 24 #include <ufs/ffs/ffs_extern.h> 25 26 extern u_long nextgennumber; 27 28 static daddr_t ffs_alloccg __P((struct inode *, int, daddr_t, int)); 29 static daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, daddr_t)); 30 static ino_t ffs_dirpref __P((struct fs *)); 31 static daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 32 static void ffs_fserr __P((struct fs *, u_int, char *)); 33 static u_long ffs_hashalloc 34 __P((struct inode *, int, long, int, u_long (*)())); 35 static ino_t ffs_ialloccg __P((struct inode *, int, daddr_t, int)); 36 static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int)); 37 38 /* 39 * Allocate a block in the file system. 40 * 41 * The size of the requested block is given, which must be some 42 * multiple of fs_fsize and <= fs_bsize. 43 * A preference may be optionally specified. If a preference is given 44 * the following hierarchy is used to allocate a block: 45 * 1) allocate the requested block. 46 * 2) allocate a rotationally optimal block in the same cylinder. 47 * 3) allocate a block in the same cylinder group. 48 * 4) quadradically rehash into other cylinder groups, until an 49 * available block is located. 50 * If no block preference is given the following heirarchy is used 51 * to allocate a block: 52 * 1) allocate a block in the cylinder group that contains the 53 * inode for the file. 54 * 2) quadradically rehash into other cylinder groups, until an 55 * available block is located. 56 */ 57 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 58 register struct inode *ip; 59 daddr_t lbn, bpref; 60 int size; 61 struct ucred *cred; 62 daddr_t *bnp; 63 { 64 daddr_t bno; 65 register struct fs *fs; 66 register struct buf *bp; 67 int cg, error; 68 69 *bnp = 0; 70 fs = ip->i_fs; 71 #ifdef DIAGNOSTIC 72 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 73 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 74 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 75 panic("ffs_alloc: bad size"); 76 } 77 if (cred == NOCRED) 78 panic("ffs_alloc: missing credential\n"); 79 #endif /* DIAGNOSTIC */ 80 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 81 goto nospace; 82 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 83 goto nospace; 84 #ifdef QUOTA 85 if (error = chkdq(ip, (long)btodb(size), cred, 0)) 86 return (error); 87 #endif 88 if (bpref >= fs->fs_size) 89 bpref = 0; 90 if (bpref == 0) 91 cg = itog(fs, ip->i_number); 92 else 93 cg = dtog(fs, bpref); 94 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 95 (u_long (*)())ffs_alloccg); 96 if (bno > 0) { 97 ip->i_blocks += btodb(size); 98 ip->i_flag |= IUPD|ICHG; 99 *bnp = bno; 100 return (0); 101 } 102 #ifdef QUOTA 103 /* 104 * Restore user's disk quota because allocation failed. 105 */ 106 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 107 #endif 108 nospace: 109 ffs_fserr(fs, cred->cr_uid, "file system full"); 110 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 111 return (ENOSPC); 112 } 113 114 /* 115 * Reallocate a fragment to a bigger size 116 * 117 * The number and size of the old block is given, and a preference 118 * and new size is also specified. The allocator attempts to extend 119 * the original block. Failing that, the regular block allocator is 120 * invoked to get an appropriate block. 121 */ 122 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 123 register struct inode *ip; 124 daddr_t lbprev; 125 daddr_t bpref; 126 int osize, nsize; 127 struct ucred *cred; 128 struct buf **bpp; 129 { 130 register struct fs *fs; 131 struct buf *bp, *obp; 132 int cg, request, error; 133 daddr_t bprev, bno; 134 135 *bpp = 0; 136 fs = ip->i_fs; 137 #ifdef DIAGNOSTIC 138 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 139 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 140 printf( 141 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 142 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 143 panic("ffs_realloccg: bad size"); 144 } 145 if (cred == NOCRED) 146 panic("ffs_realloccg: missing credential\n"); 147 #endif /* DIAGNOSTIC */ 148 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 149 goto nospace; 150 if ((bprev = ip->i_db[lbprev]) == 0) { 151 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 152 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 153 panic("ffs_realloccg: bad bprev"); 154 } 155 /* 156 * Allocate the extra space in the buffer. 157 */ 158 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 159 brelse(bp); 160 return (error); 161 } 162 #ifdef QUOTA 163 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) { 164 brelse(bp); 165 return (error); 166 } 167 #endif 168 /* 169 * Check for extension in the existing location. 170 */ 171 cg = dtog(fs, bprev); 172 if (bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) { 173 if (bp->b_blkno != fsbtodb(fs, bno)) 174 panic("bad blockno"); 175 ip->i_blocks += btodb(nsize - osize); 176 ip->i_flag |= IUPD|ICHG; 177 allocbuf(bp, nsize); 178 bp->b_flags |= B_DONE; 179 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 180 *bpp = bp; 181 return (0); 182 } 183 /* 184 * Allocate a new disk location. 185 */ 186 if (bpref >= fs->fs_size) 187 bpref = 0; 188 switch ((int)fs->fs_optim) { 189 case FS_OPTSPACE: 190 /* 191 * Allocate an exact sized fragment. Although this makes 192 * best use of space, we will waste time relocating it if 193 * the file continues to grow. If the fragmentation is 194 * less than half of the minimum free reserve, we choose 195 * to begin optimizing for time. 196 */ 197 request = nsize; 198 if (fs->fs_minfree < 5 || 199 fs->fs_cstotal.cs_nffree > 200 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 201 break; 202 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 203 fs->fs_fsmnt); 204 fs->fs_optim = FS_OPTTIME; 205 break; 206 case FS_OPTTIME: 207 /* 208 * At this point we have discovered a file that is trying to 209 * grow a small fragment to a larger fragment. To save time, 210 * we allocate a full sized block, then free the unused portion. 211 * If the file continues to grow, the `ffs_fragextend' call 212 * above will be able to grow it in place without further 213 * copying. If aberrant programs cause disk fragmentation to 214 * grow within 2% of the free reserve, we choose to begin 215 * optimizing for space. 216 */ 217 request = fs->fs_bsize; 218 if (fs->fs_cstotal.cs_nffree < 219 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 220 break; 221 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 222 fs->fs_fsmnt); 223 fs->fs_optim = FS_OPTSPACE; 224 break; 225 default: 226 printf("dev = 0x%x, optim = %d, fs = %s\n", 227 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 228 panic("ffs_realloccg: bad optim"); 229 /* NOTREACHED */ 230 } 231 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 232 (u_long (*)())ffs_alloccg); 233 if (bno > 0) { 234 bp->b_blkno = fsbtodb(fs, bno); 235 (void) vnode_pager_uncache(ITOV(ip)); 236 ffs_blkfree(ip, bprev, (long)osize); 237 if (nsize < request) 238 ffs_blkfree(ip, bno + numfrags(fs, nsize), 239 (long)(request - nsize)); 240 ip->i_blocks += btodb(nsize - osize); 241 ip->i_flag |= IUPD|ICHG; 242 allocbuf(bp, nsize); 243 bp->b_flags |= B_DONE; 244 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 245 *bpp = bp; 246 return (0); 247 } 248 #ifdef QUOTA 249 /* 250 * Restore user's disk quota because allocation failed. 251 */ 252 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 253 #endif 254 brelse(bp); 255 nospace: 256 /* 257 * no space available 258 */ 259 ffs_fserr(fs, cred->cr_uid, "file system full"); 260 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 261 return (ENOSPC); 262 } 263 264 /* 265 * Allocate an inode in the file system. 266 * 267 * If allocating a directory, use ffs_dirpref to select the inode. 268 * If allocating in a directory, the following hierarchy is followed: 269 * 1) allocate the preferred inode. 270 * 2) allocate an inode in the same cylinder group. 271 * 3) quadradically rehash into other cylinder groups, until an 272 * available inode is located. 273 * If no inode preference is given the following heirarchy is used 274 * to allocate an inode: 275 * 1) allocate an inode in cylinder group 0. 276 * 2) quadradically rehash into other cylinder groups, until an 277 * available inode is located. 278 */ 279 ffs_valloc(pvp, mode, cred, vpp) 280 register struct vnode *pvp; 281 int mode; 282 struct ucred *cred; 283 struct vnode **vpp; 284 { 285 register struct inode *pip; 286 register struct fs *fs; 287 register struct inode *ip; 288 ino_t ino, ipref; 289 int cg, error; 290 291 *vpp = NULL; 292 pip = VTOI(pvp); 293 fs = pip->i_fs; 294 if (fs->fs_cstotal.cs_nifree == 0) 295 goto noinodes; 296 297 if ((mode & IFMT) == IFDIR) 298 ipref = ffs_dirpref(fs); 299 else 300 ipref = pip->i_number; 301 if (ipref >= fs->fs_ncg * fs->fs_ipg) 302 ipref = 0; 303 cg = itog(fs, ipref); 304 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_ialloccg); 305 if (ino == 0) 306 goto noinodes; 307 error = ffs_vget(pvp->v_mount, ino, vpp); 308 if (error) { 309 ffs_vfree(pvp, ino, mode); 310 return (error); 311 } 312 ip = VTOI(*vpp); 313 if (ip->i_mode) { 314 printf("mode = 0%o, inum = %d, fs = %s\n", 315 ip->i_mode, ip->i_number, fs->fs_fsmnt); 316 panic("ffs_valloc: dup alloc"); 317 } 318 if (ip->i_blocks) { /* XXX */ 319 printf("free inode %s/%d had %d blocks\n", 320 fs->fs_fsmnt, ino, ip->i_blocks); 321 ip->i_blocks = 0; 322 } 323 ip->i_flags = 0; 324 /* 325 * Set up a new generation number for this inode. 326 */ 327 if (++nextgennumber < (u_long)time.tv_sec) 328 nextgennumber = time.tv_sec; 329 ip->i_gen = nextgennumber; 330 return (0); 331 noinodes: 332 ffs_fserr(fs, cred->cr_uid, "out of inodes"); 333 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 334 return (ENOSPC); 335 } 336 337 /* 338 * Find a cylinder to place a directory. 339 * 340 * The policy implemented by this algorithm is to select from 341 * among those cylinder groups with above the average number of 342 * free inodes, the one with the smallest number of directories. 343 */ 344 static ino_t 345 ffs_dirpref(fs) 346 register struct fs *fs; 347 { 348 int cg, minndir, mincg, avgifree; 349 350 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 351 minndir = fs->fs_ipg; 352 mincg = 0; 353 for (cg = 0; cg < fs->fs_ncg; cg++) 354 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 355 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 356 mincg = cg; 357 minndir = fs->fs_cs(fs, cg).cs_ndir; 358 } 359 return ((ino_t)(fs->fs_ipg * mincg)); 360 } 361 362 /* 363 * Select the desired position for the next block in a file. The file is 364 * logically divided into sections. The first section is composed of the 365 * direct blocks. Each additional section contains fs_maxbpg blocks. 366 * 367 * If no blocks have been allocated in the first section, the policy is to 368 * request a block in the same cylinder group as the inode that describes 369 * the file. If no blocks have been allocated in any other section, the 370 * policy is to place the section in a cylinder group with a greater than 371 * average number of free blocks. An appropriate cylinder group is found 372 * by using a rotor that sweeps the cylinder groups. When a new group of 373 * blocks is needed, the sweep begins in the cylinder group following the 374 * cylinder group from which the previous allocation was made. The sweep 375 * continues until a cylinder group with greater than the average number 376 * of free blocks is found. If the allocation is for the first block in an 377 * indirect block, the information on the previous allocation is unavailable; 378 * here a best guess is made based upon the logical block number being 379 * allocated. 380 * 381 * If a section is already partially allocated, the policy is to 382 * contiguously allocate fs_maxcontig blocks. The end of one of these 383 * contiguous blocks and the beginning of the next is physically separated 384 * so that the disk head will be in transit between them for at least 385 * fs_rotdelay milliseconds. This is to allow time for the processor to 386 * schedule another I/O transfer. 387 */ 388 daddr_t 389 ffs_blkpref(ip, lbn, indx, bap) 390 struct inode *ip; 391 daddr_t lbn; 392 int indx; 393 daddr_t *bap; 394 { 395 register struct fs *fs; 396 register int cg; 397 int avgbfree, startcg; 398 daddr_t nextblk; 399 400 fs = ip->i_fs; 401 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 402 if (lbn < NDADDR) { 403 cg = itog(fs, ip->i_number); 404 return (fs->fs_fpg * cg + fs->fs_frag); 405 } 406 /* 407 * Find a cylinder with greater than average number of 408 * unused data blocks. 409 */ 410 if (indx == 0 || bap[indx - 1] == 0) 411 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg; 412 else 413 startcg = dtog(fs, bap[indx - 1]) + 1; 414 startcg %= fs->fs_ncg; 415 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 416 for (cg = startcg; cg < fs->fs_ncg; cg++) 417 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 418 fs->fs_cgrotor = cg; 419 return (fs->fs_fpg * cg + fs->fs_frag); 420 } 421 for (cg = 0; cg <= startcg; cg++) 422 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 423 fs->fs_cgrotor = cg; 424 return (fs->fs_fpg * cg + fs->fs_frag); 425 } 426 return (NULL); 427 } 428 /* 429 * One or more previous blocks have been laid out. If less 430 * than fs_maxcontig previous blocks are contiguous, the 431 * next block is requested contiguously, otherwise it is 432 * requested rotationally delayed by fs_rotdelay milliseconds. 433 */ 434 nextblk = bap[indx - 1] + fs->fs_frag; 435 if (indx > fs->fs_maxcontig && 436 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig) 437 != nextblk) 438 return (nextblk); 439 if (fs->fs_rotdelay != 0) 440 /* 441 * Here we convert ms of delay to frags as: 442 * (frags) = (ms) * (rev/sec) * (sect/rev) / 443 * ((sect/frag) * (ms/sec)) 444 * then round up to the next block. 445 */ 446 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 447 (NSPF(fs) * 1000), fs->fs_frag); 448 return (nextblk); 449 } 450 451 /* 452 * Implement the cylinder overflow algorithm. 453 * 454 * The policy implemented by this algorithm is: 455 * 1) allocate the block in its requested cylinder group. 456 * 2) quadradically rehash on the cylinder group number. 457 * 3) brute force search for a free block. 458 */ 459 /*VARARGS5*/ 460 static u_long 461 ffs_hashalloc(ip, cg, pref, size, allocator) 462 struct inode *ip; 463 int cg; 464 long pref; 465 int size; /* size for data blocks, mode for inodes */ 466 u_long (*allocator)(); 467 { 468 register struct fs *fs; 469 long result; 470 int i, icg = cg; 471 472 fs = ip->i_fs; 473 /* 474 * 1: preferred cylinder group 475 */ 476 result = (*allocator)(ip, cg, pref, size); 477 if (result) 478 return (result); 479 /* 480 * 2: quadratic rehash 481 */ 482 for (i = 1; i < fs->fs_ncg; i *= 2) { 483 cg += i; 484 if (cg >= fs->fs_ncg) 485 cg -= fs->fs_ncg; 486 result = (*allocator)(ip, cg, 0, size); 487 if (result) 488 return (result); 489 } 490 /* 491 * 3: brute force search 492 * Note that we start at i == 2, since 0 was checked initially, 493 * and 1 is always checked in the quadratic rehash. 494 */ 495 cg = (icg + 2) % fs->fs_ncg; 496 for (i = 2; i < fs->fs_ncg; i++) { 497 result = (*allocator)(ip, cg, 0, size); 498 if (result) 499 return (result); 500 cg++; 501 if (cg == fs->fs_ncg) 502 cg = 0; 503 } 504 return (NULL); 505 } 506 507 /* 508 * Determine whether a fragment can be extended. 509 * 510 * Check to see if the necessary fragments are available, and 511 * if they are, allocate them. 512 */ 513 static daddr_t 514 ffs_fragextend(ip, cg, bprev, osize, nsize) 515 struct inode *ip; 516 int cg; 517 long bprev; 518 int osize, nsize; 519 { 520 register struct fs *fs; 521 register struct cg *cgp; 522 struct buf *bp; 523 long bno; 524 int frags, bbase; 525 int i, error; 526 527 fs = ip->i_fs; 528 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 529 return (NULL); 530 frags = numfrags(fs, nsize); 531 bbase = fragnum(fs, bprev); 532 if (bbase > fragnum(fs, (bprev + frags - 1))) { 533 /* cannot extend across a block boundary */ 534 return (NULL); 535 } 536 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 537 (int)fs->fs_cgsize, NOCRED, &bp); 538 if (error) { 539 brelse(bp); 540 return (NULL); 541 } 542 cgp = bp->b_un.b_cg; 543 if (!cg_chkmagic(cgp)) { 544 brelse(bp); 545 return (NULL); 546 } 547 cgp->cg_time = time.tv_sec; 548 bno = dtogd(fs, bprev); 549 for (i = numfrags(fs, osize); i < frags; i++) 550 if (isclr(cg_blksfree(cgp), bno + i)) { 551 brelse(bp); 552 return (NULL); 553 } 554 /* 555 * the current fragment can be extended 556 * deduct the count on fragment being extended into 557 * increase the count on the remaining fragment (if any) 558 * allocate the extended piece 559 */ 560 for (i = frags; i < fs->fs_frag - bbase; i++) 561 if (isclr(cg_blksfree(cgp), bno + i)) 562 break; 563 cgp->cg_frsum[i - numfrags(fs, osize)]--; 564 if (i != frags) 565 cgp->cg_frsum[i - frags]++; 566 for (i = numfrags(fs, osize); i < frags; i++) { 567 clrbit(cg_blksfree(cgp), bno + i); 568 cgp->cg_cs.cs_nffree--; 569 fs->fs_cstotal.cs_nffree--; 570 fs->fs_cs(fs, cg).cs_nffree--; 571 } 572 fs->fs_fmod = 1; 573 bdwrite(bp); 574 return (bprev); 575 } 576 577 /* 578 * Determine whether a block can be allocated. 579 * 580 * Check to see if a block of the apprpriate size is available, 581 * and if it is, allocate it. 582 */ 583 static daddr_t 584 ffs_alloccg(ip, cg, bpref, size) 585 struct inode *ip; 586 int cg; 587 daddr_t bpref; 588 int size; 589 { 590 register struct fs *fs; 591 register struct cg *cgp; 592 struct buf *bp; 593 register int i; 594 int error, bno, frags, allocsiz; 595 596 fs = ip->i_fs; 597 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 598 return (NULL); 599 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 600 (int)fs->fs_cgsize, NOCRED, &bp); 601 if (error) { 602 brelse(bp); 603 return (NULL); 604 } 605 cgp = bp->b_un.b_cg; 606 if (!cg_chkmagic(cgp) || 607 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 608 brelse(bp); 609 return (NULL); 610 } 611 cgp->cg_time = time.tv_sec; 612 if (size == fs->fs_bsize) { 613 bno = ffs_alloccgblk(fs, cgp, bpref); 614 bdwrite(bp); 615 return (bno); 616 } 617 /* 618 * check to see if any fragments are already available 619 * allocsiz is the size which will be allocated, hacking 620 * it down to a smaller size if necessary 621 */ 622 frags = numfrags(fs, size); 623 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 624 if (cgp->cg_frsum[allocsiz] != 0) 625 break; 626 if (allocsiz == fs->fs_frag) { 627 /* 628 * no fragments were available, so a block will be 629 * allocated, and hacked up 630 */ 631 if (cgp->cg_cs.cs_nbfree == 0) { 632 brelse(bp); 633 return (NULL); 634 } 635 bno = ffs_alloccgblk(fs, cgp, bpref); 636 bpref = dtogd(fs, bno); 637 for (i = frags; i < fs->fs_frag; i++) 638 setbit(cg_blksfree(cgp), bpref + i); 639 i = fs->fs_frag - frags; 640 cgp->cg_cs.cs_nffree += i; 641 fs->fs_cstotal.cs_nffree += i; 642 fs->fs_cs(fs, cg).cs_nffree += i; 643 fs->fs_fmod = 1; 644 cgp->cg_frsum[i]++; 645 bdwrite(bp); 646 return (bno); 647 } 648 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 649 if (bno < 0) { 650 brelse(bp); 651 return (NULL); 652 } 653 for (i = 0; i < frags; i++) 654 clrbit(cg_blksfree(cgp), bno + i); 655 cgp->cg_cs.cs_nffree -= frags; 656 fs->fs_cstotal.cs_nffree -= frags; 657 fs->fs_cs(fs, cg).cs_nffree -= frags; 658 fs->fs_fmod = 1; 659 cgp->cg_frsum[allocsiz]--; 660 if (frags != allocsiz) 661 cgp->cg_frsum[allocsiz - frags]++; 662 bdwrite(bp); 663 return (cg * fs->fs_fpg + bno); 664 } 665 666 /* 667 * Allocate a block in a cylinder group. 668 * 669 * This algorithm implements the following policy: 670 * 1) allocate the requested block. 671 * 2) allocate a rotationally optimal block in the same cylinder. 672 * 3) allocate the next available block on the block rotor for the 673 * specified cylinder group. 674 * Note that this routine only allocates fs_bsize blocks; these 675 * blocks may be fragmented by the routine that allocates them. 676 */ 677 static daddr_t 678 ffs_alloccgblk(fs, cgp, bpref) 679 register struct fs *fs; 680 register struct cg *cgp; 681 daddr_t bpref; 682 { 683 daddr_t bno; 684 int cylno, pos, delta; 685 short *cylbp; 686 register int i; 687 688 if (bpref == 0) { 689 bpref = cgp->cg_rotor; 690 goto norot; 691 } 692 bpref = blknum(fs, bpref); 693 bpref = dtogd(fs, bpref); 694 /* 695 * if the requested block is available, use it 696 */ 697 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 698 bno = bpref; 699 goto gotit; 700 } 701 /* 702 * check for a block available on the same cylinder 703 */ 704 cylno = cbtocylno(fs, bpref); 705 if (cg_blktot(cgp)[cylno] == 0) 706 goto norot; 707 if (fs->fs_cpc == 0) { 708 /* 709 * block layout info is not available, so just have 710 * to take any block in this cylinder. 711 */ 712 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 713 goto norot; 714 } 715 /* 716 * check the summary information to see if a block is 717 * available in the requested cylinder starting at the 718 * requested rotational position and proceeding around. 719 */ 720 cylbp = cg_blks(fs, cgp, cylno); 721 pos = cbtorpos(fs, bpref); 722 for (i = pos; i < fs->fs_nrpos; i++) 723 if (cylbp[i] > 0) 724 break; 725 if (i == fs->fs_nrpos) 726 for (i = 0; i < pos; i++) 727 if (cylbp[i] > 0) 728 break; 729 if (cylbp[i] > 0) { 730 /* 731 * found a rotational position, now find the actual 732 * block. A panic if none is actually there. 733 */ 734 pos = cylno % fs->fs_cpc; 735 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 736 if (fs_postbl(fs, pos)[i] == -1) { 737 printf("pos = %d, i = %d, fs = %s\n", 738 pos, i, fs->fs_fsmnt); 739 panic("ffs_alloccgblk: cyl groups corrupted"); 740 } 741 for (i = fs_postbl(fs, pos)[i];; ) { 742 if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 743 bno = blkstofrags(fs, (bno + i)); 744 goto gotit; 745 } 746 delta = fs_rotbl(fs)[i]; 747 if (delta <= 0 || 748 delta + i > fragstoblks(fs, fs->fs_fpg)) 749 break; 750 i += delta; 751 } 752 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 753 panic("ffs_alloccgblk: can't find blk in cyl"); 754 } 755 norot: 756 /* 757 * no blocks in the requested cylinder, so take next 758 * available one in this cylinder group. 759 */ 760 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 761 if (bno < 0) 762 return (NULL); 763 cgp->cg_rotor = bno; 764 gotit: 765 ffs_clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno)); 766 cgp->cg_cs.cs_nbfree--; 767 fs->fs_cstotal.cs_nbfree--; 768 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 769 cylno = cbtocylno(fs, bno); 770 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 771 cg_blktot(cgp)[cylno]--; 772 fs->fs_fmod = 1; 773 return (cgp->cg_cgx * fs->fs_fpg + bno); 774 } 775 776 /* 777 * Determine whether an inode can be allocated. 778 * 779 * Check to see if an inode is available, and if it is, 780 * allocate it using the following policy: 781 * 1) allocate the requested inode. 782 * 2) allocate the next available inode after the requested 783 * inode in the specified cylinder group. 784 */ 785 static ino_t 786 ffs_ialloccg(ip, cg, ipref, mode) 787 struct inode *ip; 788 int cg; 789 daddr_t ipref; 790 int mode; 791 { 792 register struct fs *fs; 793 register struct cg *cgp; 794 struct buf *bp; 795 int error, start, len, loc, map, i; 796 797 fs = ip->i_fs; 798 if (fs->fs_cs(fs, cg).cs_nifree == 0) 799 return (NULL); 800 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 801 (int)fs->fs_cgsize, NOCRED, &bp); 802 if (error) { 803 brelse(bp); 804 return (NULL); 805 } 806 cgp = bp->b_un.b_cg; 807 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 808 brelse(bp); 809 return (NULL); 810 } 811 cgp->cg_time = time.tv_sec; 812 if (ipref) { 813 ipref %= fs->fs_ipg; 814 if (isclr(cg_inosused(cgp), ipref)) 815 goto gotit; 816 } 817 start = cgp->cg_irotor / NBBY; 818 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 819 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 820 if (loc == 0) { 821 len = start + 1; 822 start = 0; 823 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 824 if (loc == 0) { 825 printf("cg = %s, irotor = %d, fs = %s\n", 826 cg, cgp->cg_irotor, fs->fs_fsmnt); 827 panic("ffs_ialloccg: map corrupted"); 828 /* NOTREACHED */ 829 } 830 } 831 i = start + len - loc; 832 map = cg_inosused(cgp)[i]; 833 ipref = i * NBBY; 834 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 835 if ((map & i) == 0) { 836 cgp->cg_irotor = ipref; 837 goto gotit; 838 } 839 } 840 printf("fs = %s\n", fs->fs_fsmnt); 841 panic("ffs_ialloccg: block not in map"); 842 /* NOTREACHED */ 843 gotit: 844 setbit(cg_inosused(cgp), ipref); 845 cgp->cg_cs.cs_nifree--; 846 fs->fs_cstotal.cs_nifree--; 847 fs->fs_cs(fs, cg).cs_nifree--; 848 fs->fs_fmod = 1; 849 if ((mode & IFMT) == IFDIR) { 850 cgp->cg_cs.cs_ndir++; 851 fs->fs_cstotal.cs_ndir++; 852 fs->fs_cs(fs, cg).cs_ndir++; 853 } 854 bdwrite(bp); 855 return (cg * fs->fs_ipg + ipref); 856 } 857 858 /* 859 * Free a block or fragment. 860 * 861 * The specified block or fragment is placed back in the 862 * free map. If a fragment is deallocated, a possible 863 * block reassembly is checked. 864 */ 865 ffs_blkfree(ip, bno, size) 866 register struct inode *ip; 867 daddr_t bno; 868 long size; 869 { 870 register struct fs *fs; 871 register struct cg *cgp; 872 struct buf *bp; 873 int error, cg, blk, frags, bbase; 874 register int i; 875 876 fs = ip->i_fs; 877 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 878 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 879 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 880 panic("blkfree: bad size"); 881 } 882 cg = dtog(fs, bno); 883 if ((unsigned)bno >= fs->fs_size) { 884 printf("bad block %d, ino %d\n", bno, ip->i_number); 885 ffs_fserr(fs, ip->i_uid, "bad block"); 886 return; 887 } 888 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 889 (int)fs->fs_cgsize, NOCRED, &bp); 890 if (error) { 891 brelse(bp); 892 return; 893 } 894 cgp = bp->b_un.b_cg; 895 if (!cg_chkmagic(cgp)) { 896 brelse(bp); 897 return; 898 } 899 cgp->cg_time = time.tv_sec; 900 bno = dtogd(fs, bno); 901 if (size == fs->fs_bsize) { 902 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) { 903 printf("dev = 0x%x, block = %d, fs = %s\n", 904 ip->i_dev, bno, fs->fs_fsmnt); 905 panic("blkfree: freeing free block"); 906 } 907 ffs_setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); 908 cgp->cg_cs.cs_nbfree++; 909 fs->fs_cstotal.cs_nbfree++; 910 fs->fs_cs(fs, cg).cs_nbfree++; 911 i = cbtocylno(fs, bno); 912 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 913 cg_blktot(cgp)[i]++; 914 } else { 915 bbase = bno - fragnum(fs, bno); 916 /* 917 * decrement the counts associated with the old frags 918 */ 919 blk = blkmap(fs, cg_blksfree(cgp), bbase); 920 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 921 /* 922 * deallocate the fragment 923 */ 924 frags = numfrags(fs, size); 925 for (i = 0; i < frags; i++) { 926 if (isset(cg_blksfree(cgp), bno + i)) { 927 printf("dev = 0x%x, block = %d, fs = %s\n", 928 ip->i_dev, bno + i, fs->fs_fsmnt); 929 panic("blkfree: freeing free frag"); 930 } 931 setbit(cg_blksfree(cgp), bno + i); 932 } 933 cgp->cg_cs.cs_nffree += i; 934 fs->fs_cstotal.cs_nffree += i; 935 fs->fs_cs(fs, cg).cs_nffree += i; 936 /* 937 * add back in counts associated with the new frags 938 */ 939 blk = blkmap(fs, cg_blksfree(cgp), bbase); 940 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 941 /* 942 * if a complete block has been reassembled, account for it 943 */ 944 if (ffs_isblock(fs, cg_blksfree(cgp), 945 (daddr_t)fragstoblks(fs, bbase))) { 946 cgp->cg_cs.cs_nffree -= fs->fs_frag; 947 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 948 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 949 cgp->cg_cs.cs_nbfree++; 950 fs->fs_cstotal.cs_nbfree++; 951 fs->fs_cs(fs, cg).cs_nbfree++; 952 i = cbtocylno(fs, bbase); 953 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 954 cg_blktot(cgp)[i]++; 955 } 956 } 957 fs->fs_fmod = 1; 958 bdwrite(bp); 959 } 960 961 /* 962 * Free an inode. 963 * 964 * The specified inode is placed back in the free map. 965 */ 966 void 967 ffs_vfree(pvp, ino, mode) 968 struct vnode *pvp; 969 ino_t ino; 970 int mode; 971 { 972 register struct fs *fs; 973 register struct cg *cgp; 974 register struct inode *pip; 975 struct buf *bp; 976 int error, cg; 977 978 pip = VTOI(pvp); 979 fs = pip->i_fs; 980 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 981 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n", 982 pip->i_dev, ino, fs->fs_fsmnt); 983 cg = itog(fs, ino); 984 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 985 (int)fs->fs_cgsize, NOCRED, &bp); 986 if (error) { 987 brelse(bp); 988 return; 989 } 990 cgp = bp->b_un.b_cg; 991 if (!cg_chkmagic(cgp)) { 992 brelse(bp); 993 return; 994 } 995 cgp->cg_time = time.tv_sec; 996 ino %= fs->fs_ipg; 997 if (isclr(cg_inosused(cgp), ino)) { 998 printf("dev = 0x%x, ino = %d, fs = %s\n", 999 pip->i_dev, ino, fs->fs_fsmnt); 1000 if (fs->fs_ronly == 0) 1001 panic("ifree: freeing free inode"); 1002 } 1003 clrbit(cg_inosused(cgp), ino); 1004 if (ino < cgp->cg_irotor) 1005 cgp->cg_irotor = ino; 1006 cgp->cg_cs.cs_nifree++; 1007 fs->fs_cstotal.cs_nifree++; 1008 fs->fs_cs(fs, cg).cs_nifree++; 1009 if ((mode & IFMT) == IFDIR) { 1010 cgp->cg_cs.cs_ndir--; 1011 fs->fs_cstotal.cs_ndir--; 1012 fs->fs_cs(fs, cg).cs_ndir--; 1013 } 1014 fs->fs_fmod = 1; 1015 bdwrite(bp); 1016 } 1017 1018 /* 1019 * Find a block of the specified size in the specified cylinder group. 1020 * 1021 * It is a panic if a request is made to find a block if none are 1022 * available. 1023 */ 1024 static daddr_t 1025 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1026 register struct fs *fs; 1027 register struct cg *cgp; 1028 daddr_t bpref; 1029 int allocsiz; 1030 { 1031 daddr_t bno; 1032 int start, len, loc, i; 1033 int blk, field, subfield, pos; 1034 1035 /* 1036 * find the fragment by searching through the free block 1037 * map for an appropriate bit pattern 1038 */ 1039 if (bpref) 1040 start = dtogd(fs, bpref) / NBBY; 1041 else 1042 start = cgp->cg_frotor / NBBY; 1043 len = howmany(fs->fs_fpg, NBBY) - start; 1044 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start], 1045 (u_char *)fragtbl[fs->fs_frag], 1046 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1047 if (loc == 0) { 1048 len = start + 1; 1049 start = 0; 1050 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0], 1051 (u_char *)fragtbl[fs->fs_frag], 1052 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1053 if (loc == 0) { 1054 printf("start = %d, len = %d, fs = %s\n", 1055 start, len, fs->fs_fsmnt); 1056 panic("ffs_alloccg: map corrupted"); 1057 /* NOTREACHED */ 1058 } 1059 } 1060 bno = (start + len - loc) * NBBY; 1061 cgp->cg_frotor = bno; 1062 /* 1063 * found the byte in the map 1064 * sift through the bits to find the selected frag 1065 */ 1066 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1067 blk = blkmap(fs, cg_blksfree(cgp), bno); 1068 blk <<= 1; 1069 field = around[allocsiz]; 1070 subfield = inside[allocsiz]; 1071 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1072 if ((blk & field) == subfield) 1073 return (bno + pos); 1074 field <<= 1; 1075 subfield <<= 1; 1076 } 1077 } 1078 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1079 panic("ffs_alloccg: block not in map"); 1080 return (-1); 1081 } 1082 1083 /* 1084 * Fserr prints the name of a file system with an error diagnostic. 1085 * 1086 * The form of the error message is: 1087 * fs: error message 1088 */ 1089 static void 1090 ffs_fserr(fs, uid, cp) 1091 struct fs *fs; 1092 u_int uid; 1093 char *cp; 1094 { 1095 1096 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1097 } 1098