1 /* ffs_alloc.c 2.4 82/04/19 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/mount.h" 6 #include "../h/fs.h" 7 #include "../h/conf.h" 8 #include "../h/buf.h" 9 #include "../h/inode.h" 10 #include "../h/dir.h" 11 #include "../h/user.h" 12 13 extern u_long hashalloc(); 14 extern ino_t ialloccg(); 15 extern daddr_t alloccg(); 16 extern daddr_t alloccgblk(); 17 extern daddr_t fragextend(); 18 extern daddr_t blkpref(); 19 extern daddr_t mapsearch(); 20 extern int inside[], around[]; 21 extern unsigned char *fragtbl[]; 22 23 /* 24 * Allocate a block in the file system. 25 * 26 * The size of the requested block is given, which must be some 27 * multiple of fs_fsize and <= fs_bsize. 28 * A preference may be optionally specified. If a preference is given 29 * the following hierarchy is used to allocate a block: 30 * 1) allocate the requested block. 31 * 2) allocate a rotationally optimal block in the same cylinder. 32 * 3) allocate a block in the same cylinder group. 33 * 4) quadradically rehash into other cylinder groups, until an 34 * available block is located. 35 * If no block preference is given the following heirarchy is used 36 * to allocate a block: 37 * 1) allocate a block in the cylinder group that contains the 38 * inode for the file. 39 * 2) quadradically rehash into other cylinder groups, until an 40 * available block is located. 41 */ 42 struct buf * 43 alloc(ip, bpref, size) 44 register struct inode *ip; 45 daddr_t bpref; 46 int size; 47 { 48 daddr_t bno; 49 register struct fs *fs; 50 register struct buf *bp; 51 int cg; 52 53 fs = ip->i_fs; 54 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) 55 panic("alloc: bad size"); 56 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 57 goto nospace; 58 if (u.u_uid != 0 && 59 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 60 fs->fs_dsize * fs->fs_minfree / 100) 61 goto nospace; 62 if (bpref >= fs->fs_size) 63 bpref = 0; 64 if (bpref == 0) 65 cg = itog(fs, ip->i_number); 66 else 67 cg = dtog(fs, bpref); 68 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, alloccg); 69 if (bno <= 0) 70 goto nospace; 71 bp = getblk(ip->i_dev, fsbtodb(fs, bno), size); 72 clrbuf(bp); 73 return (bp); 74 nospace: 75 fserr(fs, "file system full"); 76 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 77 u.u_error = ENOSPC; 78 return (NULL); 79 } 80 81 /* 82 * Reallocate a fragment to a bigger size 83 * 84 * The number and size of the old block is given, and a preference 85 * and new size is also specified. The allocator attempts to extend 86 * the original block. Failing that, the regular block allocator is 87 * invoked to get an appropriate block. 88 */ 89 struct buf * 90 realloccg(ip, bprev, bpref, osize, nsize) 91 register struct inode *ip; 92 daddr_t bprev, bpref; 93 int osize, nsize; 94 { 95 daddr_t bno; 96 register struct fs *fs; 97 register struct buf *bp, *obp; 98 int cg; 99 100 fs = ip->i_fs; 101 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 102 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) 103 panic("realloccg: bad size"); 104 if (u.u_uid != 0 && 105 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 106 fs->fs_dsize * fs->fs_minfree / 100) 107 goto nospace; 108 if (bprev == 0) 109 panic("realloccg: bad bprev"); 110 cg = dtog(fs, bprev); 111 bno = fragextend(ip, cg, (long)bprev, osize, nsize); 112 if (bno != 0) { 113 bp = bread(ip->i_dev, fsbtodb(fs, bno), osize); 114 if (bp->b_flags & B_ERROR) { 115 brelse(bp); 116 return (NULL); 117 } 118 brealloc(bp, nsize); 119 blkclr(bp->b_un.b_addr + osize, nsize - osize); 120 return (bp); 121 } 122 if (bpref >= fs->fs_size) 123 bpref = 0; 124 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, nsize, alloccg); 125 if (bno > 0) { 126 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize); 127 if (obp->b_flags & B_ERROR) { 128 brelse(obp); 129 return (NULL); 130 } 131 bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize); 132 bcopy(obp->b_un.b_addr, bp->b_un.b_addr, osize); 133 blkclr(bp->b_un.b_addr + osize, nsize - osize); 134 brelse(obp); 135 fre(ip, bprev, (off_t)osize); 136 return (bp); 137 } 138 nospace: 139 /* 140 * no space available 141 */ 142 fserr(fs, "file system full"); 143 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 144 u.u_error = ENOSPC; 145 return (NULL); 146 } 147 148 /* 149 * Allocate an inode in the file system. 150 * 151 * A preference may be optionally specified. If a preference is given 152 * the following hierarchy is used to allocate an inode: 153 * 1) allocate the requested inode. 154 * 2) allocate an inode in the same cylinder group. 155 * 3) quadradically rehash into other cylinder groups, until an 156 * available inode is located. 157 * If no inode preference is given the following heirarchy is used 158 * to allocate an inode: 159 * 1) allocate an inode in cylinder group 0. 160 * 2) quadradically rehash into other cylinder groups, until an 161 * available inode is located. 162 */ 163 struct inode * 164 ialloc(pip, ipref, mode) 165 register struct inode *pip; 166 ino_t ipref; 167 int mode; 168 { 169 ino_t ino; 170 register struct fs *fs; 171 register struct inode *ip; 172 int cg; 173 174 fs = pip->i_fs; 175 if (fs->fs_cstotal.cs_nifree == 0) 176 goto noinodes; 177 if (ipref >= fs->fs_ncg * fs->fs_ipg) 178 ipref = 0; 179 cg = itog(fs, ipref); 180 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 181 if (ino == 0) 182 goto noinodes; 183 ip = iget(pip->i_dev, pip->i_fs, ino); 184 if (ip == NULL) { 185 ifree(ip, ino, 0); 186 return (NULL); 187 } 188 if (ip->i_mode) 189 panic("ialloc: dup alloc"); 190 return (ip); 191 noinodes: 192 fserr(fs, "out of inodes"); 193 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 194 u.u_error = ENOSPC; 195 return (NULL); 196 } 197 198 /* 199 * Find a cylinder to place a directory. 200 * 201 * The policy implemented by this algorithm is to select from 202 * among those cylinder groups with above the average number of 203 * free inodes, the one with the smallest number of directories. 204 */ 205 dirpref(fs) 206 register struct fs *fs; 207 { 208 int cg, minndir, mincg, avgifree; 209 210 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 211 minndir = fs->fs_ipg; 212 mincg = 0; 213 for (cg = 0; cg < fs->fs_ncg; cg++) 214 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 215 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 216 mincg = cg; 217 minndir = fs->fs_cs(fs, cg).cs_ndir; 218 } 219 return (fs->fs_ipg * mincg); 220 } 221 222 /* 223 * Select a cylinder to place a large block of data. 224 * 225 * The policy implemented by this algorithm is to maintain a 226 * rotor that sweeps the cylinder groups. When a block is 227 * needed, the rotor is advanced until a cylinder group with 228 * greater than the average number of free blocks is found. 229 */ 230 daddr_t 231 blkpref(fs) 232 register struct fs *fs; 233 { 234 int cg, avgbfree; 235 236 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 237 for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) 238 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 239 fs->fs_cgrotor = cg; 240 return (fs->fs_fpg * cg + fs->fs_frag); 241 } 242 for (cg = 0; cg <= fs->fs_cgrotor; cg++) 243 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 244 fs->fs_cgrotor = cg; 245 return (fs->fs_fpg * cg + fs->fs_frag); 246 } 247 return (NULL); 248 } 249 250 /* 251 * Implement the cylinder overflow algorithm. 252 * 253 * The policy implemented by this algorithm is: 254 * 1) allocate the block in its requested cylinder group. 255 * 2) quadradically rehash on the cylinder group number. 256 * 3) brute force search for a free block. 257 */ 258 /*VARARGS5*/ 259 u_long 260 hashalloc(ip, cg, pref, size, allocator) 261 struct inode *ip; 262 int cg; 263 long pref; 264 int size; /* size for data blocks, mode for inodes */ 265 u_long (*allocator)(); 266 { 267 register struct fs *fs; 268 long result; 269 int i, icg = cg; 270 271 fs = ip->i_fs; 272 /* 273 * 1: preferred cylinder group 274 */ 275 result = (*allocator)(ip, cg, pref, size); 276 if (result) 277 return (result); 278 /* 279 * 2: quadratic rehash 280 */ 281 for (i = 1; i < fs->fs_ncg; i *= 2) { 282 cg += i; 283 if (cg >= fs->fs_ncg) 284 cg -= fs->fs_ncg; 285 result = (*allocator)(ip, cg, 0, size); 286 if (result) 287 return (result); 288 } 289 /* 290 * 3: brute force search 291 */ 292 cg = icg; 293 for (i = 0; i < fs->fs_ncg; i++) { 294 result = (*allocator)(ip, cg, 0, size); 295 if (result) 296 return (result); 297 cg++; 298 if (cg == fs->fs_ncg) 299 cg = 0; 300 } 301 return (NULL); 302 } 303 304 /* 305 * Determine whether a fragment can be extended. 306 * 307 * Check to see if the necessary fragments are available, and 308 * if they are, allocate them. 309 */ 310 daddr_t 311 fragextend(ip, cg, bprev, osize, nsize) 312 struct inode *ip; 313 int cg; 314 long bprev; 315 int osize, nsize; 316 { 317 register struct fs *fs; 318 register struct buf *bp; 319 register struct cg *cgp; 320 long bno; 321 int frags, bbase; 322 int i; 323 324 fs = ip->i_fs; 325 if (fs->fs_cs(fs, cg).cs_nffree < nsize - osize) 326 return (NULL); 327 frags = numfrags(fs, nsize); 328 bbase = fragoff(fs, bprev); 329 if (bbase > (bprev + frags - 1) % fs->fs_frag) { 330 /* cannot extend across a block boundry */ 331 return (NULL); 332 } 333 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 334 cgp = bp->b_un.b_cg; 335 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 336 brelse(bp); 337 return (NULL); 338 } 339 bno = dtogd(fs, bprev); 340 for (i = numfrags(fs, osize); i < frags; i++) 341 if (isclr(cgp->cg_free, bno + i)) { 342 brelse(bp); 343 return (NULL); 344 } 345 /* 346 * the current fragment can be extended 347 * deduct the count on fragment being extended into 348 * increase the count on the remaining fragment (if any) 349 * allocate the extended piece 350 */ 351 for (i = frags; i < fs->fs_frag - bbase; i++) 352 if (isclr(cgp->cg_free, bno + i)) 353 break; 354 cgp->cg_frsum[i - numfrags(fs, osize)]--; 355 if (i != frags) 356 cgp->cg_frsum[i - frags]++; 357 for (i = numfrags(fs, osize); i < frags; i++) { 358 clrbit(cgp->cg_free, bno + i); 359 cgp->cg_cs.cs_nffree--; 360 fs->fs_cstotal.cs_nffree--; 361 fs->fs_cs(fs, cg).cs_nffree--; 362 } 363 fs->fs_fmod++; 364 bdwrite(bp); 365 return (bprev); 366 } 367 368 /* 369 * Determine whether a block can be allocated. 370 * 371 * Check to see if a block of the apprpriate size is available, 372 * and if it is, allocate it. 373 */ 374 daddr_t 375 alloccg(ip, cg, bpref, size) 376 struct inode *ip; 377 int cg; 378 daddr_t bpref; 379 int size; 380 { 381 register struct fs *fs; 382 register struct buf *bp; 383 register struct cg *cgp; 384 int bno, frags; 385 int allocsiz; 386 register int i; 387 388 fs = ip->i_fs; 389 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 390 return (NULL); 391 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 392 cgp = bp->b_un.b_cg; 393 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 394 brelse(bp); 395 return (NULL); 396 } 397 if (size == fs->fs_bsize) { 398 bno = alloccgblk(fs, cgp, bpref); 399 bdwrite(bp); 400 return (bno); 401 } 402 /* 403 * check to see if any fragments are already available 404 * allocsiz is the size which will be allocated, hacking 405 * it down to a smaller size if necessary 406 */ 407 frags = numfrags(fs, size); 408 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 409 if (cgp->cg_frsum[allocsiz] != 0) 410 break; 411 if (allocsiz == fs->fs_frag) { 412 /* 413 * no fragments were available, so a block will be 414 * allocated, and hacked up 415 */ 416 if (cgp->cg_cs.cs_nbfree == 0) { 417 brelse(bp); 418 return (NULL); 419 } 420 bno = alloccgblk(fs, cgp, bpref); 421 bpref = dtogd(fs, bno); 422 for (i = frags; i < fs->fs_frag; i++) 423 setbit(cgp->cg_free, bpref + i); 424 i = fs->fs_frag - frags; 425 cgp->cg_cs.cs_nffree += i; 426 fs->fs_cstotal.cs_nffree += i; 427 fs->fs_cs(fs, cg).cs_nffree += i; 428 cgp->cg_frsum[i]++; 429 bdwrite(bp); 430 return (bno); 431 } 432 bno = mapsearch(fs, cgp, bpref, allocsiz); 433 if (bno < 0) 434 return (NULL); 435 for (i = 0; i < frags; i++) 436 clrbit(cgp->cg_free, bno + i); 437 cgp->cg_cs.cs_nffree -= frags; 438 fs->fs_cstotal.cs_nffree -= frags; 439 fs->fs_cs(fs, cg).cs_nffree -= frags; 440 cgp->cg_frsum[allocsiz]--; 441 if (frags != allocsiz) 442 cgp->cg_frsum[allocsiz - frags]++; 443 bdwrite(bp); 444 return (cg * fs->fs_fpg + bno); 445 } 446 447 /* 448 * Allocate a block in a cylinder group. 449 * 450 * This algorithm implements the following policy: 451 * 1) allocate the requested block. 452 * 2) allocate a rotationally optimal block in the same cylinder. 453 * 3) allocate the next available block on the block rotor for the 454 * specified cylinder group. 455 * Note that this routine only allocates fs_bsize blocks; these 456 * blocks may be fragmented by the routine that allocates them. 457 */ 458 daddr_t 459 alloccgblk(fs, cgp, bpref) 460 register struct fs *fs; 461 register struct cg *cgp; 462 daddr_t bpref; 463 { 464 daddr_t bno; 465 int cylno, pos, delta; 466 short *cylbp; 467 register int i; 468 469 if (bpref == 0) { 470 bpref = cgp->cg_rotor; 471 goto norot; 472 } 473 bpref &= ~(fs->fs_frag - 1); 474 bpref = dtogd(fs, bpref); 475 /* 476 * if the requested block is available, use it 477 */ 478 if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) { 479 bno = bpref; 480 goto gotit; 481 } 482 /* 483 * check for a block available on the same cylinder 484 */ 485 cylno = cbtocylno(fs, bpref); 486 if (cgp->cg_btot[cylno] == 0) 487 goto norot; 488 if (fs->fs_cpc == 0) { 489 /* 490 * block layout info is not available, so just have 491 * to take any block in this cylinder. 492 */ 493 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 494 goto norot; 495 } 496 /* 497 * find a block that is rotationally optimal 498 */ 499 cylbp = cgp->cg_b[cylno]; 500 if (fs->fs_rotdelay == 0) { 501 pos = cbtorpos(fs, bpref); 502 } else { 503 /* 504 * here we convert ms of delay to frags as: 505 * (frags) = (ms) * (rev/sec) * (sect/rev) / 506 * ((sect/frag) * (ms/sec)) 507 * then round up to the next rotational position 508 */ 509 bpref += fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 510 (NSPF(fs) * 1000); 511 pos = cbtorpos(fs, bpref); 512 pos = (pos + 1) % NRPOS; 513 } 514 /* 515 * check the summary information to see if a block is 516 * available in the requested cylinder starting at the 517 * optimal rotational position and proceeding around. 518 */ 519 for (i = pos; i < NRPOS; i++) 520 if (cylbp[i] > 0) 521 break; 522 if (i == NRPOS) 523 for (i = 0; i < pos; i++) 524 if (cylbp[i] > 0) 525 break; 526 if (cylbp[i] > 0) { 527 /* 528 * found a rotational position, now find the actual 529 * block. A panic if none is actually there. 530 */ 531 pos = cylno % fs->fs_cpc; 532 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 533 if (fs->fs_postbl[pos][i] == -1) 534 panic("alloccgblk: cyl groups corrupted"); 535 for (i = fs->fs_postbl[pos][i];; ) { 536 if (isblock(fs, cgp->cg_free, bno + i)) { 537 bno = (bno + i) * fs->fs_frag; 538 goto gotit; 539 } 540 delta = fs->fs_rotbl[i]; 541 if (delta <= 0 || delta > MAXBPC - i) 542 break; 543 i += delta; 544 } 545 panic("alloccgblk: can't find blk in cyl"); 546 } 547 norot: 548 /* 549 * no blocks in the requested cylinder, so take next 550 * available one in this cylinder group. 551 */ 552 bno = mapsearch(fs, cgp, bpref, fs->fs_frag); 553 if (bno < 0) 554 return (NULL); 555 cgp->cg_rotor = bno; 556 gotit: 557 clrblock(fs, cgp->cg_free, bno/fs->fs_frag); 558 cgp->cg_cs.cs_nbfree--; 559 fs->fs_cstotal.cs_nbfree--; 560 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 561 cylno = cbtocylno(fs, bno); 562 cgp->cg_b[cylno][cbtorpos(fs, bno)]--; 563 cgp->cg_btot[cylno]--; 564 fs->fs_fmod++; 565 return (cgp->cg_cgx * fs->fs_fpg + bno); 566 } 567 568 /* 569 * Determine whether an inode can be allocated. 570 * 571 * Check to see if an inode is available, and if it is, 572 * allocate it using the following policy: 573 * 1) allocate the requested inode. 574 * 2) allocate the next available inode after the requested 575 * inode in the specified cylinder group. 576 */ 577 ino_t 578 ialloccg(ip, cg, ipref, mode) 579 struct inode *ip; 580 int cg; 581 daddr_t ipref; 582 int mode; 583 { 584 register struct fs *fs; 585 register struct buf *bp; 586 register struct cg *cgp; 587 int i; 588 589 fs = ip->i_fs; 590 if (fs->fs_cs(fs, cg).cs_nifree == 0) 591 return (NULL); 592 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 593 cgp = bp->b_un.b_cg; 594 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 595 brelse(bp); 596 return (NULL); 597 } 598 if (ipref) { 599 ipref %= fs->fs_ipg; 600 if (isclr(cgp->cg_iused, ipref)) 601 goto gotit; 602 } else 603 ipref = cgp->cg_irotor; 604 for (i = 0; i < fs->fs_ipg; i++) { 605 ipref++; 606 if (ipref >= fs->fs_ipg) 607 ipref = 0; 608 if (isclr(cgp->cg_iused, ipref)) { 609 cgp->cg_irotor = ipref; 610 goto gotit; 611 } 612 } 613 brelse(bp); 614 return (NULL); 615 gotit: 616 setbit(cgp->cg_iused, ipref); 617 cgp->cg_cs.cs_nifree--; 618 fs->fs_cstotal.cs_nifree--; 619 fs->fs_cs(fs, cg).cs_nifree--; 620 fs->fs_fmod++; 621 if ((mode & IFMT) == IFDIR) { 622 cgp->cg_cs.cs_ndir++; 623 fs->fs_cstotal.cs_ndir++; 624 fs->fs_cs(fs, cg).cs_ndir++; 625 } 626 bdwrite(bp); 627 return (cg * fs->fs_ipg + ipref); 628 } 629 630 /* 631 * Free a block or fragment. 632 * 633 * The specified block or fragment is placed back in the 634 * free map. If a fragment is deallocated, a possible 635 * block reassembly is checked. 636 */ 637 fre(ip, bno, size) 638 register struct inode *ip; 639 daddr_t bno; 640 off_t size; 641 { 642 register struct fs *fs; 643 register struct cg *cgp; 644 register struct buf *bp; 645 int cg, blk, frags, bbase; 646 register int i; 647 648 fs = ip->i_fs; 649 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) 650 panic("free: bad size"); 651 cg = dtog(fs, bno); 652 if (badblock(fs, bno)) { 653 printf("bad block %d, ino %d\n", bno, ip->i_number); 654 return; 655 } 656 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 657 cgp = bp->b_un.b_cg; 658 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 659 brelse(bp); 660 return; 661 } 662 bno = dtogd(fs, bno); 663 if (size == fs->fs_bsize) { 664 if (isblock(fs, cgp->cg_free, bno/fs->fs_frag)) { 665 printf("free block %d, fs %s\n", bno, fs->fs_fsmnt); 666 panic("free: freeing free block"); 667 } 668 setblock(fs, cgp->cg_free, bno/fs->fs_frag); 669 cgp->cg_cs.cs_nbfree++; 670 fs->fs_cstotal.cs_nbfree++; 671 fs->fs_cs(fs, cg).cs_nbfree++; 672 i = cbtocylno(fs, bno); 673 cgp->cg_b[i][cbtorpos(fs, bno)]++; 674 cgp->cg_btot[i]++; 675 } else { 676 bbase = bno - (bno % fs->fs_frag); 677 /* 678 * decrement the counts associated with the old frags 679 */ 680 blk = blkmap(fs, cgp->cg_free, bbase); 681 fragacct(fs, blk, cgp->cg_frsum, -1); 682 /* 683 * deallocate the fragment 684 */ 685 frags = numfrags(fs, size); 686 for (i = 0; i < frags; i++) { 687 if (isset(cgp->cg_free, bno + i)) 688 panic("free: freeing free frag"); 689 setbit(cgp->cg_free, bno + i); 690 } 691 cgp->cg_cs.cs_nffree += i; 692 fs->fs_cstotal.cs_nffree += i; 693 fs->fs_cs(fs, cg).cs_nffree += i; 694 /* 695 * add back in counts associated with the new frags 696 */ 697 blk = blkmap(fs, cgp->cg_free, bbase); 698 fragacct(fs, blk, cgp->cg_frsum, 1); 699 /* 700 * if a complete block has been reassembled, account for it 701 */ 702 if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) { 703 cgp->cg_cs.cs_nffree -= fs->fs_frag; 704 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 705 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 706 cgp->cg_cs.cs_nbfree++; 707 fs->fs_cstotal.cs_nbfree++; 708 fs->fs_cs(fs, cg).cs_nbfree++; 709 i = cbtocylno(fs, bbase); 710 cgp->cg_b[i][cbtorpos(fs, bbase)]++; 711 cgp->cg_btot[i]++; 712 } 713 } 714 fs->fs_fmod++; 715 bdwrite(bp); 716 } 717 718 /* 719 * Free an inode. 720 * 721 * The specified inode is placed back in the free map. 722 */ 723 ifree(ip, ino, mode) 724 struct inode *ip; 725 ino_t ino; 726 int mode; 727 { 728 register struct fs *fs; 729 register struct cg *cgp; 730 register struct buf *bp; 731 int cg; 732 733 fs = ip->i_fs; 734 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) 735 panic("ifree: range"); 736 cg = itog(fs, ino); 737 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 738 cgp = bp->b_un.b_cg; 739 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 740 brelse(bp); 741 return; 742 } 743 ino %= fs->fs_ipg; 744 if (isclr(cgp->cg_iused, ino)) 745 panic("ifree: freeing free inode"); 746 clrbit(cgp->cg_iused, ino); 747 cgp->cg_cs.cs_nifree++; 748 fs->fs_cstotal.cs_nifree++; 749 fs->fs_cs(fs, cg).cs_nifree++; 750 if ((mode & IFMT) == IFDIR) { 751 cgp->cg_cs.cs_ndir--; 752 fs->fs_cstotal.cs_ndir--; 753 fs->fs_cs(fs, cg).cs_ndir--; 754 } 755 fs->fs_fmod++; 756 bdwrite(bp); 757 } 758 759 /* 760 * Find a block of the specified size in the specified cylinder group. 761 * 762 * It is a panic if a request is made to find a block if none are 763 * available. 764 */ 765 daddr_t 766 mapsearch(fs, cgp, bpref, allocsiz) 767 register struct fs *fs; 768 register struct cg *cgp; 769 daddr_t bpref; 770 int allocsiz; 771 { 772 daddr_t bno; 773 int start, len, loc, i; 774 int blk, field, subfield, pos; 775 776 /* 777 * find the fragment by searching through the free block 778 * map for an appropriate bit pattern 779 */ 780 if (bpref) 781 start = dtogd(fs, bpref) / NBBY; 782 else 783 start = cgp->cg_frotor / NBBY; 784 len = howmany(fs->fs_fpg, NBBY) - start; 785 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 786 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 787 if (loc == 0) { 788 len = start + 1; 789 start = 0; 790 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 791 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 792 if (loc == 0) { 793 panic("alloccg: map corrupted"); 794 return (-1); 795 } 796 } 797 bno = (start + len - loc) * NBBY; 798 cgp->cg_frotor = bno; 799 /* 800 * found the byte in the map 801 * sift through the bits to find the selected frag 802 */ 803 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 804 blk = blkmap(fs, cgp->cg_free, bno); 805 blk <<= 1; 806 field = around[allocsiz]; 807 subfield = inside[allocsiz]; 808 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 809 if ((blk & field) == subfield) 810 return (bno + pos); 811 field <<= 1; 812 subfield <<= 1; 813 } 814 } 815 panic("alloccg: block not in map"); 816 return (-1); 817 } 818 819 /* 820 * Update the frsum fields to reflect addition or deletion 821 * of some frags. 822 */ 823 fragacct(fs, fragmap, fraglist, cnt) 824 struct fs *fs; 825 int fragmap; 826 long fraglist[]; 827 int cnt; 828 { 829 int inblk; 830 register int field, subfield; 831 register int siz, pos; 832 833 inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; 834 fragmap <<= 1; 835 for (siz = 1; siz < fs->fs_frag; siz++) { 836 if ((inblk & (1 << (siz + (fs->fs_frag % NBBY)))) == 0) 837 continue; 838 field = around[siz]; 839 subfield = inside[siz]; 840 for (pos = siz; pos <= fs->fs_frag; pos++) { 841 if ((fragmap & field) == subfield) { 842 fraglist[siz] += cnt; 843 pos += siz; 844 field <<= siz; 845 subfield <<= siz; 846 } 847 field <<= 1; 848 subfield <<= 1; 849 } 850 } 851 } 852 853 /* 854 * Check that a specified block number is in range. 855 */ 856 badblock(fs, bn) 857 register struct fs *fs; 858 daddr_t bn; 859 { 860 861 if ((unsigned)bn >= fs->fs_size) { 862 printf("bad block %d, ", bn); 863 fserr(fs, "bad block"); 864 return (1); 865 } 866 return (0); 867 } 868 869 /* 870 * Getfs maps a device number into a pointer to the incore super block. 871 * 872 * The algorithm is a linear search through the mount table. A 873 * consistency check of the super block magic number is performed. 874 * 875 * panic: no fs -- the device is not mounted. 876 * this "cannot happen" 877 */ 878 struct fs * 879 getfs(dev) 880 dev_t dev; 881 { 882 register struct mount *mp; 883 register struct fs *fs; 884 885 for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) { 886 if (mp->m_bufp == NULL || mp->m_dev != dev) 887 continue; 888 fs = mp->m_bufp->b_un.b_fs; 889 if (fs->fs_magic != FS_MAGIC) 890 panic("getfs: bad magic"); 891 return (fs); 892 } 893 panic("getfs: no fs"); 894 return (NULL); 895 } 896 897 /* 898 * Fserr prints the name of a file system with an error diagnostic. 899 * 900 * The form of the error message is: 901 * fs: error message 902 */ 903 fserr(fs, cp) 904 struct fs *fs; 905 char *cp; 906 { 907 908 printf("%s: %s\n", fs->fs_fsmnt, cp); 909 } 910 911 /* 912 * Getfsx returns the index in the file system 913 * table of the specified device. The swap device 914 * is also assigned a pseudo-index. The index may 915 * be used as a compressed indication of the location 916 * of a block, recording 917 * <getfsx(dev),blkno> 918 * rather than 919 * <dev, blkno> 920 * provided the information need remain valid only 921 * as long as the file system is mounted. 922 */ 923 getfsx(dev) 924 dev_t dev; 925 { 926 register struct mount *mp; 927 928 if (dev == swapdev) 929 return (MSWAPX); 930 for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++) 931 if (mp->m_dev == dev) 932 return (mp - &mount[0]); 933 return (-1); 934 } 935 936 /* 937 * Update is the internal name of 'sync'. It goes through the disk 938 * queues to initiate sandbagged IO; goes through the inodes to write 939 * modified nodes; and it goes through the mount table to initiate 940 * the writing of the modified super blocks. 941 */ 942 update(flag) 943 int flag; 944 { 945 register struct inode *ip; 946 register struct mount *mp; 947 register struct buf *bp; 948 struct fs *fs; 949 int i, blks; 950 951 if (updlock) 952 return; 953 updlock++; 954 /* 955 * Write back modified superblocks. 956 * Consistency check that the superblock 957 * of each file system is still in the buffer cache. 958 */ 959 for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) { 960 if (mp->m_bufp == NULL) 961 continue; 962 fs = mp->m_bufp->b_un.b_fs; 963 if (fs->fs_fmod == 0) 964 continue; 965 if (fs->fs_ronly != 0) 966 panic("update: rofs mod"); 967 bp = getblk(mp->m_dev, SBLOCK, SBSIZE); 968 if (bp->b_un.b_fs != fs || fs->fs_magic != FS_MAGIC) 969 panic("update: bad b_fs"); 970 fs->fs_fmod = 0; 971 fs->fs_time = time; 972 bwrite(bp); 973 blks = howmany(fs->fs_cssize, fs->fs_fsize); 974 for (i = 0; i < blks; i += fs->fs_frag) { 975 bp = getblk(mp->m_dev, 976 fsbtodb(fs, fs->fs_csaddr + i), 977 blks - i < fs->fs_frag ? 978 (blks - i) * fs->fs_fsize : 979 fs->fs_bsize); 980 bwrite(bp); 981 } 982 } 983 /* 984 * Write back each (modified) inode. 985 */ 986 for (ip = inode; ip < inodeNINODE; ip++) { 987 if ((ip->i_flag & ILOCK) != 0 || ip->i_count == 0) 988 continue; 989 ip->i_flag |= ILOCK; 990 ip->i_count++; 991 iupdat(ip, &time, &time, 0); 992 iput(ip); 993 } 994 updlock = 0; 995 /* 996 * Force stale buffer cache information to be flushed, 997 * for all devices. 998 */ 999 bflush(NODEV); 1000 } 1001 1002 /* 1003 * block operations 1004 * 1005 * check if a block is available 1006 */ 1007 isblock(fs, cp, h) 1008 struct fs *fs; 1009 unsigned char *cp; 1010 int h; 1011 { 1012 unsigned char mask; 1013 1014 switch (fs->fs_frag) { 1015 case 8: 1016 return (cp[h] == 0xff); 1017 case 4: 1018 mask = 0x0f << ((h & 0x1) << 2); 1019 return ((cp[h >> 1] & mask) == mask); 1020 case 2: 1021 mask = 0x03 << ((h & 0x3) << 1); 1022 return ((cp[h >> 2] & mask) == mask); 1023 case 1: 1024 mask = 0x01 << (h & 0x7); 1025 return ((cp[h >> 3] & mask) == mask); 1026 default: 1027 panic("isblock"); 1028 return (NULL); 1029 } 1030 } 1031 1032 /* 1033 * take a block out of the map 1034 */ 1035 clrblock(fs, cp, h) 1036 struct fs *fs; 1037 unsigned char *cp; 1038 int h; 1039 { 1040 switch ((fs)->fs_frag) { 1041 case 8: 1042 cp[h] = 0; 1043 return; 1044 case 4: 1045 cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); 1046 return; 1047 case 2: 1048 cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); 1049 return; 1050 case 1: 1051 cp[h >> 3] &= ~(0x01 << (h & 0x7)); 1052 return; 1053 default: 1054 panic("clrblock"); 1055 return; 1056 } 1057 } 1058 1059 /* 1060 * put a block into the map 1061 */ 1062 setblock(fs, cp, h) 1063 struct fs *fs; 1064 unsigned char *cp; 1065 int h; 1066 { 1067 switch (fs->fs_frag) { 1068 case 8: 1069 cp[h] = 0xff; 1070 return; 1071 case 4: 1072 cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); 1073 return; 1074 case 2: 1075 cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); 1076 return; 1077 case 1: 1078 cp[h >> 3] |= (0x01 << (h & 0x7)); 1079 return; 1080 default: 1081 panic("setblock"); 1082 return; 1083 } 1084 } 1085