1 /* Copyright (c) 1981 Regents of the University of California */ 2 3 static char vers[] = "@(#)lfs_alloc.c 1.19 02/26/82"; 4 5 /* alloc.c 4.8 81/03/08 */ 6 7 #include "../h/param.h" 8 #include "../h/systm.h" 9 #include "../h/mount.h" 10 #include "../h/fs.h" 11 #include "../h/conf.h" 12 #include "../h/buf.h" 13 #include "../h/inode.h" 14 #include "../h/user.h" 15 16 extern u_long hashalloc(); 17 extern ino_t ialloccg(); 18 extern daddr_t alloccg(); 19 extern daddr_t alloccgblk(); 20 extern daddr_t fragextend(); 21 extern daddr_t blkpref(); 22 extern daddr_t mapsearch(); 23 extern int inside[], around[]; 24 extern unsigned char *fragtbl[]; 25 26 /* 27 * Allocate a block in the file system. 28 * 29 * The size of the requested block is given, which must be some 30 * multiple of fs_fsize and <= fs_bsize. 31 * A preference may be optionally specified. If a preference is given 32 * the following hierarchy is used to allocate a block: 33 * 1) allocate the requested block. 34 * 2) allocate a rotationally optimal block in the same cylinder. 35 * 3) allocate a block in the same cylinder group. 36 * 4) quadradically rehash into other cylinder groups, until an 37 * available block is located. 38 * If no block preference is given the following heirarchy is used 39 * to allocate a block: 40 * 1) allocate a block in the cylinder group that contains the 41 * inode for the file. 42 * 2) quadradically rehash into other cylinder groups, until an 43 * available block is located. 44 */ 45 struct buf * 46 alloc(ip, bpref, size) 47 register struct inode *ip; 48 daddr_t bpref; 49 int size; 50 { 51 daddr_t bno; 52 register struct fs *fs; 53 register struct buf *bp; 54 int cg; 55 56 fs = ip->i_fs; 57 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) 58 panic("alloc: bad size"); 59 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 60 goto nospace; 61 if (u.u_uid != 0 && 62 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 63 fs->fs_dsize * fs->fs_minfree / 100) 64 goto nospace; 65 if (bpref >= fs->fs_size) 66 bpref = 0; 67 if (bpref == 0) 68 cg = itog(fs, ip->i_number); 69 else 70 cg = dtog(fs, bpref); 71 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, alloccg); 72 if (bno == 0) 73 goto nospace; 74 bp = getblk(ip->i_dev, fsbtodb(fs, bno), size); 75 clrbuf(bp); 76 return (bp); 77 nospace: 78 fserr(fs, "file system full"); 79 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 80 u.u_error = ENOSPC; 81 return (NULL); 82 } 83 84 /* 85 * Reallocate a fragment to a bigger size 86 * 87 * The number and size of the old block is given, and a preference 88 * and new size is also specified. The allocator attempts to extend 89 * the original block. Failing that, the regular block allocator is 90 * invoked to get an appropriate block. 91 */ 92 struct buf * 93 realloccg(ip, bprev, bpref, osize, nsize) 94 register struct inode *ip; 95 daddr_t bprev, bpref; 96 int osize, nsize; 97 { 98 daddr_t bno; 99 register struct fs *fs; 100 register struct buf *bp, *obp; 101 caddr_t cp; 102 int cg; 103 104 fs = ip->i_fs; 105 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 106 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) 107 panic("realloccg: bad size"); 108 if (u.u_uid != 0 && 109 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 110 fs->fs_dsize * fs->fs_minfree / 100) 111 goto nospace; 112 if (bprev != 0) 113 cg = dtog(fs, bprev); 114 else 115 panic("realloccg: bad bprev"); 116 bno = fragextend(ip, cg, (long)bprev, osize, nsize); 117 if (bno != 0) { 118 bp = bread(ip->i_dev, fsbtodb(fs, bno), osize); 119 if (bp->b_flags & B_ERROR) { 120 brelse(bp); 121 return 0; 122 } 123 bp->b_bcount = nsize; 124 blkclr(bp->b_un.b_addr + osize, nsize - osize); 125 return (bp); 126 } 127 if (bpref >= fs->fs_size) 128 bpref = 0; 129 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, nsize, alloccg); 130 if (bno != 0) { 131 /* 132 * make a new copy 133 */ 134 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize); 135 if (obp->b_flags & B_ERROR) { 136 brelse(obp); 137 return 0; 138 } 139 bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize); 140 cp = bp->b_un.b_addr; 141 bp->b_un.b_addr = obp->b_un.b_addr; 142 obp->b_un.b_addr = cp; 143 obp->b_flags |= B_INVAL; 144 brelse(obp); 145 fre(ip, bprev, (off_t)osize); 146 blkclr(bp->b_un.b_addr + osize, nsize - osize); 147 return(bp); 148 } 149 nospace: 150 /* 151 * no space available 152 */ 153 fserr(fs, "file system full"); 154 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 155 u.u_error = ENOSPC; 156 return (NULL); 157 } 158 159 /* 160 * Allocate an inode in the file system. 161 * 162 * A preference may be optionally specified. If a preference is given 163 * the following hierarchy is used to allocate an inode: 164 * 1) allocate the requested inode. 165 * 2) allocate an inode in the same cylinder group. 166 * 3) quadradically rehash into other cylinder groups, until an 167 * available inode is located. 168 * If no inode preference is given the following heirarchy is used 169 * to allocate an inode: 170 * 1) allocate an inode in cylinder group 0. 171 * 2) quadradically rehash into other cylinder groups, until an 172 * available inode is located. 173 */ 174 struct inode * 175 ialloc(pip, ipref, mode) 176 register struct inode *pip; 177 ino_t ipref; 178 int mode; 179 { 180 ino_t ino; 181 register struct fs *fs; 182 register struct inode *ip; 183 int cg; 184 185 fs = pip->i_fs; 186 if (fs->fs_cstotal.cs_nifree == 0) 187 goto noinodes; 188 if (ipref >= fs->fs_ncg * fs->fs_ipg) 189 ipref = 0; 190 cg = itog(fs, ipref); 191 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 192 if (ino == 0) 193 goto noinodes; 194 ip = iget(pip->i_dev, pip->i_fs, ino); 195 if (ip == NULL) { 196 ifree(ip, ino, 0); 197 return (NULL); 198 } 199 if (ip->i_mode) 200 panic("ialloc: dup alloc"); 201 return (ip); 202 noinodes: 203 fserr(fs, "out of inodes"); 204 uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt); 205 u.u_error = ENOSPC; 206 return (NULL); 207 } 208 209 /* 210 * Find a cylinder to place a directory. 211 * 212 * The policy implemented by this algorithm is to select from 213 * among those cylinder groups with above the average number of 214 * free inodes, the one with the smallest number of directories. 215 */ 216 dirpref(fs) 217 register struct fs *fs; 218 { 219 int cg, minndir, mincg, avgifree; 220 221 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 222 minndir = fs->fs_ipg; 223 mincg = 0; 224 for (cg = 0; cg < fs->fs_ncg; cg++) 225 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 226 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 227 mincg = cg; 228 minndir = fs->fs_cs(fs, cg).cs_ndir; 229 } 230 return (fs->fs_ipg * mincg); 231 } 232 233 /* 234 * Select a cylinder to place a large block of data. 235 * 236 * The policy implemented by this algorithm is to maintain a 237 * rotor that sweeps the cylinder groups. When a block is 238 * needed, the rotor is advanced until a cylinder group with 239 * greater than the average number of free blocks is found. 240 */ 241 daddr_t 242 blkpref(fs) 243 register struct fs *fs; 244 { 245 int cg, avgbfree; 246 247 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 248 for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) 249 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 250 fs->fs_cgrotor = cg; 251 return (fs->fs_fpg * cg + fs->fs_frag); 252 } 253 for (cg = 0; cg <= fs->fs_cgrotor; cg++) 254 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 255 fs->fs_cgrotor = cg; 256 return (fs->fs_fpg * cg + fs->fs_frag); 257 } 258 return (0); 259 } 260 261 /* 262 * Implement the cylinder overflow algorithm. 263 * 264 * The policy implemented by this algorithm is: 265 * 1) allocate the block in its requested cylinder group. 266 * 2) quadradically rehash on the cylinder group number. 267 * 3) brute force search for a free block. 268 */ 269 /*VARARGS5*/ 270 u_long 271 hashalloc(ip, cg, pref, size, allocator) 272 struct inode *ip; 273 int cg; 274 long pref; 275 int size; /* size for data blocks, mode for inodes */ 276 u_long (*allocator)(); 277 { 278 register struct fs *fs; 279 long result; 280 int i, icg = cg; 281 282 fs = ip->i_fs; 283 /* 284 * 1: preferred cylinder group 285 */ 286 result = (*allocator)(ip, cg, pref, size); 287 if (result) 288 return (result); 289 /* 290 * 2: quadratic rehash 291 */ 292 for (i = 1; i < fs->fs_ncg; i *= 2) { 293 cg += i; 294 if (cg >= fs->fs_ncg) 295 cg -= fs->fs_ncg; 296 result = (*allocator)(ip, cg, 0, size); 297 if (result) 298 return (result); 299 } 300 /* 301 * 3: brute force search 302 */ 303 cg = icg; 304 for (i = 0; i < fs->fs_ncg; i++) { 305 result = (*allocator)(ip, cg, 0, size); 306 if (result) 307 return (result); 308 cg++; 309 if (cg == fs->fs_ncg) 310 cg = 0; 311 } 312 return (0); 313 } 314 315 /* 316 * Determine whether a fragment can be extended. 317 * 318 * Check to see if the necessary fragments are available, and 319 * if they are, allocate them. 320 */ 321 daddr_t 322 fragextend(ip, cg, bprev, osize, nsize) 323 struct inode *ip; 324 int cg; 325 long bprev; 326 int osize, nsize; 327 { 328 register struct fs *fs; 329 register struct buf *bp; 330 register struct cg *cgp; 331 long bno; 332 int frags, bbase; 333 int i; 334 335 fs = ip->i_fs; 336 frags = numfrags(fs, nsize); 337 bbase = fragoff(fs, bprev); 338 if (bbase > (bprev + frags - 1) % fs->fs_frag) { 339 /* cannot extend across a block boundry */ 340 return (0); 341 } 342 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 343 if (bp->b_flags & B_ERROR) { 344 brelse(bp); 345 return 0; 346 } 347 cgp = bp->b_un.b_cg; 348 bno = dtogd(fs, bprev); 349 for (i = numfrags(fs, osize); i < frags; i++) 350 if (isclr(cgp->cg_free, bno + i)) { 351 brelse(bp); 352 return (0); 353 } 354 /* 355 * the current fragment can be extended 356 * deduct the count on fragment being extended into 357 * increase the count on the remaining fragment (if any) 358 * allocate the extended piece 359 */ 360 for (i = frags; i < fs->fs_frag - bbase; i++) 361 if (isclr(cgp->cg_free, bno + i)) 362 break; 363 cgp->cg_frsum[i - numfrags(fs, osize)]--; 364 if (i != frags) 365 cgp->cg_frsum[i - frags]++; 366 for (i = numfrags(fs, osize); i < frags; i++) { 367 clrbit(cgp->cg_free, bno + i); 368 cgp->cg_cs.cs_nffree--; 369 fs->fs_cstotal.cs_nffree--; 370 fs->fs_cs(fs, cg).cs_nffree--; 371 } 372 fs->fs_fmod++; 373 bdwrite(bp); 374 return (bprev); 375 } 376 377 /* 378 * Determine whether a block can be allocated. 379 * 380 * Check to see if a block of the apprpriate size is available, 381 * and if it is, allocate it. 382 */ 383 daddr_t 384 alloccg(ip, cg, bpref, size) 385 struct inode *ip; 386 int cg; 387 daddr_t bpref; 388 int size; 389 { 390 register struct fs *fs; 391 register struct buf *bp; 392 register struct cg *cgp; 393 int bno, frags; 394 int allocsiz; 395 register int i; 396 397 fs = ip->i_fs; 398 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 399 return (0); 400 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 401 if (bp->b_flags & B_ERROR) { 402 brelse(bp); 403 return 0; 404 } 405 cgp = bp->b_un.b_cg; 406 if (size == fs->fs_bsize) { 407 bno = alloccgblk(fs, cgp, bpref); 408 bdwrite(bp); 409 return (bno); 410 } 411 /* 412 * check to see if any fragments are already available 413 * allocsiz is the size which will be allocated, hacking 414 * it down to a smaller size if necessary 415 */ 416 frags = numfrags(fs, size); 417 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 418 if (cgp->cg_frsum[allocsiz] != 0) 419 break; 420 if (allocsiz == fs->fs_frag) { 421 /* 422 * no fragments were available, so a block will be 423 * allocated, and hacked up 424 */ 425 if (cgp->cg_cs.cs_nbfree == 0) { 426 brelse(bp); 427 return (0); 428 } 429 bno = alloccgblk(fs, cgp, bpref); 430 bpref = dtogd(fs, bno); 431 for (i = frags; i < fs->fs_frag; i++) 432 setbit(cgp->cg_free, bpref + i); 433 i = fs->fs_frag - frags; 434 cgp->cg_cs.cs_nffree += i; 435 fs->fs_cstotal.cs_nffree += i; 436 fs->fs_cs(fs, cg).cs_nffree += i; 437 cgp->cg_frsum[i]++; 438 bdwrite(bp); 439 return (bno); 440 } 441 bno = mapsearch(fs, cgp, bpref, allocsiz); 442 if (bno == 0) 443 return (0); 444 for (i = 0; i < frags; i++) 445 clrbit(cgp->cg_free, bno + i); 446 cgp->cg_cs.cs_nffree -= frags; 447 fs->fs_cstotal.cs_nffree -= frags; 448 fs->fs_cs(fs, cg).cs_nffree -= frags; 449 cgp->cg_frsum[allocsiz]--; 450 if (frags != allocsiz) 451 cgp->cg_frsum[allocsiz - frags]++; 452 bdwrite(bp); 453 return (cg * fs->fs_fpg + bno); 454 } 455 456 /* 457 * Allocate a block in a cylinder group. 458 * 459 * This algorithm implements the following policy: 460 * 1) allocate the requested block. 461 * 2) allocate a rotationally optimal block in the same cylinder. 462 * 3) allocate the next available block on the block rotor for the 463 * specified cylinder group. 464 * Note that this routine only allocates fs_bsize blocks; these 465 * blocks may be fragmented by the routine that allocates them. 466 */ 467 daddr_t 468 alloccgblk(fs, cgp, bpref) 469 register struct fs *fs; 470 register struct cg *cgp; 471 daddr_t bpref; 472 { 473 daddr_t bno; 474 int cylno, pos; 475 short *cylbp; 476 register int i; 477 478 if (bpref == 0) { 479 bpref = cgp->cg_rotor; 480 goto norot; 481 } 482 bpref &= ~(fs->fs_frag - 1); 483 bpref = dtogd(fs, bpref); 484 /* 485 * if the requested block is available, use it 486 */ 487 if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) { 488 bno = bpref; 489 goto gotit; 490 } 491 /* 492 * check for a block available on the same cylinder 493 */ 494 cylno = cbtocylno(fs, bpref); 495 if (cgp->cg_btot[cylno] == 0) 496 goto norot; 497 if (fs->fs_cpc == 0) { 498 /* 499 * block layout info is not available, so just have 500 * to take any block in this cylinder. 501 */ 502 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 503 goto norot; 504 } 505 /* 506 * find a block that is rotationally optimal 507 */ 508 cylbp = cgp->cg_b[cylno]; 509 if (fs->fs_rotdelay == 0) { 510 pos = cbtorpos(fs, bpref); 511 } else { 512 /* 513 * here we convert ms of delay to frags as: 514 * (frags) = (ms) * (rev/sec) * (sect/rev) / 515 * ((sect/frag) * (ms/sec)) 516 * then round up to the next rotational position 517 */ 518 bpref += fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 519 (NSPF(fs) * 1000); 520 pos = cbtorpos(fs, bpref); 521 pos = (pos + 1) % NRPOS; 522 } 523 /* 524 * check the summary information to see if a block is 525 * available in the requested cylinder starting at the 526 * optimal rotational position and proceeding around. 527 */ 528 for (i = pos; i < NRPOS; i++) 529 if (cylbp[i] > 0) 530 break; 531 if (i == NRPOS) 532 for (i = 0; i < pos; i++) 533 if (cylbp[i] > 0) 534 break; 535 if (cylbp[i] > 0) { 536 /* 537 * found a rotational position, now find the actual 538 * block. A panic if none is actually there. 539 */ 540 pos = cylno % fs->fs_cpc; 541 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 542 if (fs->fs_postbl[pos][i] == -1) 543 panic("alloccgblk: cyl groups corrupted"); 544 for (i = fs->fs_postbl[pos][i]; ; i += fs->fs_rotbl[i]) { 545 if (isblock(fs, cgp->cg_free, bno + i)) { 546 bno = (bno + i) * fs->fs_frag; 547 goto gotit; 548 } 549 if (fs->fs_rotbl[i] == 0) 550 break; 551 } 552 panic("alloccgblk: can't find blk in cyl"); 553 } 554 norot: 555 /* 556 * no blocks in the requested cylinder, so take next 557 * available one in this cylinder group. 558 */ 559 bno = mapsearch(fs, cgp, bpref, fs->fs_frag); 560 if (bno == 0) 561 return (0); 562 cgp->cg_rotor = bno; 563 gotit: 564 clrblock(fs, cgp->cg_free, bno/fs->fs_frag); 565 cgp->cg_cs.cs_nbfree--; 566 fs->fs_cstotal.cs_nbfree--; 567 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 568 cylno = cbtocylno(fs, bno); 569 cgp->cg_b[cylno][cbtorpos(fs, bno)]--; 570 cgp->cg_btot[cylno]--; 571 fs->fs_fmod++; 572 return (cgp->cg_cgx * fs->fs_fpg + bno); 573 } 574 575 /* 576 * Determine whether an inode can be allocated. 577 * 578 * Check to see if an inode is available, and if it is, 579 * allocate it using the following policy: 580 * 1) allocate the requested inode. 581 * 2) allocate the next available inode after the requested 582 * inode in the specified cylinder group. 583 */ 584 ino_t 585 ialloccg(ip, cg, ipref, mode) 586 struct inode *ip; 587 int cg; 588 daddr_t ipref; 589 int mode; 590 { 591 register struct fs *fs; 592 register struct buf *bp; 593 register struct cg *cgp; 594 int i; 595 596 fs = ip->i_fs; 597 if (fs->fs_cs(fs, cg).cs_nifree == 0) 598 return (0); 599 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 600 if (bp->b_flags & B_ERROR) { 601 brelse(bp); 602 return 0; 603 } 604 cgp = bp->b_un.b_cg; 605 if (ipref) { 606 ipref %= fs->fs_ipg; 607 if (isclr(cgp->cg_iused, ipref)) 608 goto gotit; 609 } else 610 ipref = cgp->cg_irotor; 611 for (i = 0; i < fs->fs_ipg; i++) { 612 ipref++; 613 if (ipref >= fs->fs_ipg) 614 ipref = 0; 615 if (isclr(cgp->cg_iused, ipref)) { 616 cgp->cg_irotor = ipref; 617 goto gotit; 618 } 619 } 620 brelse(bp); 621 return (0); 622 gotit: 623 setbit(cgp->cg_iused, ipref); 624 cgp->cg_cs.cs_nifree--; 625 fs->fs_cstotal.cs_nifree--; 626 fs->fs_cs(fs, cg).cs_nifree--; 627 fs->fs_fmod++; 628 if ((mode & IFMT) == IFDIR) { 629 cgp->cg_cs.cs_ndir++; 630 fs->fs_cstotal.cs_ndir++; 631 fs->fs_cs(fs, cg).cs_ndir++; 632 } 633 bdwrite(bp); 634 return (cg * fs->fs_ipg + ipref); 635 } 636 637 /* 638 * Free a block or fragment. 639 * 640 * The specified block or fragment is placed back in the 641 * free map. If a fragment is deallocated, a possible 642 * block reassembly is checked. 643 */ 644 fre(ip, bno, size) 645 register struct inode *ip; 646 daddr_t bno; 647 off_t size; 648 { 649 register struct fs *fs; 650 register struct cg *cgp; 651 register struct buf *bp; 652 int cg, blk, frags, bbase; 653 register int i; 654 655 fs = ip->i_fs; 656 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) 657 panic("free: bad size"); 658 cg = dtog(fs, bno); 659 if (badblock(fs, bno)) 660 return; 661 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 662 if (bp->b_flags & B_ERROR) { 663 brelse(bp); 664 return; 665 } 666 cgp = bp->b_un.b_cg; 667 bno = dtogd(fs, bno); 668 if (size == fs->fs_bsize) { 669 if (isblock(fs, cgp->cg_free, bno/fs->fs_frag)) 670 panic("free: freeing free block"); 671 setblock(fs, cgp->cg_free, bno/fs->fs_frag); 672 cgp->cg_cs.cs_nbfree++; 673 fs->fs_cstotal.cs_nbfree++; 674 fs->fs_cs(fs, cg).cs_nbfree++; 675 i = cbtocylno(fs, bno); 676 cgp->cg_b[i][cbtorpos(fs, bno)]++; 677 cgp->cg_btot[i]++; 678 } else { 679 bbase = bno - (bno % fs->fs_frag); 680 /* 681 * decrement the counts associated with the old frags 682 */ 683 blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & 684 (0xff >> (NBBY - fs->fs_frag))); 685 fragacct(fs, blk, cgp->cg_frsum, -1); 686 /* 687 * deallocate the fragment 688 */ 689 frags = numfrags(fs, size); 690 for (i = 0; i < frags; i++) { 691 if (isset(cgp->cg_free, bno + i)) 692 panic("free: freeing free frag"); 693 setbit(cgp->cg_free, bno + i); 694 cgp->cg_cs.cs_nffree++; 695 fs->fs_cstotal.cs_nffree++; 696 fs->fs_cs(fs, cg).cs_nffree++; 697 } 698 /* 699 * add back in counts associated with the new frags 700 */ 701 blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & 702 (0xff >> (NBBY - fs->fs_frag))); 703 fragacct(fs, blk, cgp->cg_frsum, 1); 704 /* 705 * if a complete block has been reassembled, account for it 706 */ 707 if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) { 708 cgp->cg_cs.cs_nffree -= fs->fs_frag; 709 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 710 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 711 cgp->cg_cs.cs_nbfree++; 712 fs->fs_cstotal.cs_nbfree++; 713 fs->fs_cs(fs, cg).cs_nbfree++; 714 i = cbtocylno(fs, bbase); 715 cgp->cg_b[i][cbtorpos(fs, bbase)]++; 716 cgp->cg_btot[i]++; 717 } 718 } 719 fs->fs_fmod++; 720 bdwrite(bp); 721 } 722 723 /* 724 * Free an inode. 725 * 726 * The specified inode is placed back in the free map. 727 */ 728 ifree(ip, ino, mode) 729 struct inode *ip; 730 ino_t ino; 731 int mode; 732 { 733 register struct fs *fs; 734 register struct cg *cgp; 735 register struct buf *bp; 736 int cg; 737 738 fs = ip->i_fs; 739 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) 740 panic("ifree: range"); 741 cg = itog(fs, ino); 742 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 743 if (bp->b_flags & B_ERROR) { 744 brelse(bp); 745 return; 746 } 747 cgp = bp->b_un.b_cg; 748 ino %= fs->fs_ipg; 749 if (isclr(cgp->cg_iused, ino)) 750 panic("ifree: freeing free inode"); 751 clrbit(cgp->cg_iused, ino); 752 cgp->cg_cs.cs_nifree++; 753 fs->fs_cstotal.cs_nifree++; 754 fs->fs_cs(fs, cg).cs_nifree++; 755 if ((mode & IFMT) == IFDIR) { 756 cgp->cg_cs.cs_ndir--; 757 fs->fs_cstotal.cs_ndir--; 758 fs->fs_cs(fs, cg).cs_ndir--; 759 } 760 fs->fs_fmod++; 761 bdwrite(bp); 762 } 763 764 /* 765 * Find a block of the specified size in the specified cylinder group. 766 * 767 * It is a panic if a request is made to find a block if none are 768 * available. 769 */ 770 daddr_t 771 mapsearch(fs, cgp, bpref, allocsiz) 772 register struct fs *fs; 773 register struct cg *cgp; 774 daddr_t bpref; 775 int allocsiz; 776 { 777 daddr_t bno; 778 int start, len, loc, i; 779 int blk, field, subfield, pos; 780 781 /* 782 * find the fragment by searching through the free block 783 * map for an appropriate bit pattern 784 */ 785 if (bpref) 786 start = dtogd(fs, bpref) / NBBY; 787 else 788 start = cgp->cg_frotor / NBBY; 789 len = howmany(fs->fs_fpg, NBBY) - start; 790 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 791 1 << (allocsiz - 1)); 792 if (loc == 0) { 793 loc = fs->fs_dblkno / NBBY; 794 len = start - loc + 1; 795 start = loc; 796 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 797 1 << (allocsiz - 1)); 798 if (loc == 0) { 799 panic("alloccg: map corrupted"); 800 return (0); 801 } 802 } 803 bno = (start + len - loc) * NBBY; 804 cgp->cg_frotor = bno; 805 /* 806 * found the byte in the map 807 * sift through the bits to find the selected frag 808 */ 809 for (i = 0; i < NBBY; i += fs->fs_frag) { 810 blk = (cgp->cg_free[bno / NBBY] >> i) & 811 (0xff >> NBBY - fs->fs_frag); 812 blk <<= 1; 813 field = around[allocsiz]; 814 subfield = inside[allocsiz]; 815 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 816 if ((blk & field) == subfield) { 817 return (bno + i + pos); 818 } 819 field <<= 1; 820 subfield <<= 1; 821 } 822 } 823 panic("alloccg: block not in map"); 824 return (0); 825 } 826 827 /* 828 * Update the frsum fields to reflect addition or deletion 829 * of some frags. 830 */ 831 fragacct(fs, fragmap, fraglist, cnt) 832 struct fs *fs; 833 int fragmap; 834 long fraglist[]; 835 int cnt; 836 { 837 int inblk; 838 register int field, subfield; 839 register int siz, pos; 840 841 inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; 842 fragmap <<= 1; 843 for (siz = 1; siz < fs->fs_frag; siz++) { 844 if (((1 << siz) & inblk) == 0) 845 continue; 846 field = around[siz]; 847 subfield = inside[siz]; 848 for (pos = siz; pos <= fs->fs_frag; pos++) { 849 if ((fragmap & field) == subfield) { 850 fraglist[siz] += cnt; 851 pos += siz; 852 field <<= siz; 853 subfield <<= siz; 854 } 855 field <<= 1; 856 subfield <<= 1; 857 } 858 } 859 } 860 861 /* 862 * Check that a specified block number is in range. 863 */ 864 badblock(fs, bn) 865 register struct fs *fs; 866 daddr_t bn; 867 { 868 869 if ((unsigned)bn >= fs->fs_size || bn < cgdmin(fs, dtog(fs, bn))) { 870 fserr(fs, "bad block"); 871 return (1); 872 } 873 return (0); 874 } 875 876 /* 877 * Getfs maps a device number into a pointer to the incore super block. 878 * 879 * The algorithm is a linear search through the mount table. A 880 * consistency check of the super block magic number is performed. 881 * 882 * panic: no fs -- the device is not mounted. 883 * this "cannot happen" 884 */ 885 struct fs * 886 getfs(dev) 887 dev_t dev; 888 { 889 register struct mount *mp; 890 register struct fs *fs; 891 892 for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) 893 if (mp->m_bufp != NULL && mp->m_dev == dev) { 894 fs = mp->m_bufp->b_un.b_fs; 895 if (fs->fs_magic != FS_MAGIC) 896 panic("getfs: bad magic"); 897 return (fs); 898 } 899 panic("getfs: no fs"); 900 return (NULL); 901 } 902 903 /* 904 * Fserr prints the name of a file system with an error diagnostic. 905 * 906 * The form of the error message is: 907 * fs: error message 908 */ 909 fserr(fs, cp) 910 struct fs *fs; 911 char *cp; 912 { 913 914 printf("%s: %s\n", fs->fs_fsmnt, cp); 915 } 916 917 /* 918 * Getfsx returns the index in the file system 919 * table of the specified device. The swap device 920 * is also assigned a pseudo-index. The index may 921 * be used as a compressed indication of the location 922 * of a block, recording 923 * <getfsx(dev),blkno> 924 * rather than 925 * <dev, blkno> 926 * provided the information need remain valid only 927 * as long as the file system is mounted. 928 */ 929 getfsx(dev) 930 dev_t dev; 931 { 932 register struct mount *mp; 933 934 if (dev == swapdev) 935 return (MSWAPX); 936 for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++) 937 if (mp->m_dev == dev) 938 return (mp - &mount[0]); 939 return (-1); 940 } 941 942 /* 943 * Update is the internal name of 'sync'. It goes through the disk 944 * queues to initiate sandbagged IO; goes through the inodes to write 945 * modified nodes; and it goes through the mount table to initiate 946 * the writing of the modified super blocks. 947 */ 948 update() 949 { 950 register struct inode *ip; 951 register struct mount *mp; 952 register struct buf *bp; 953 struct fs *fs; 954 time_t tim; 955 int i, blks; 956 957 if (updlock) 958 return; 959 updlock++; 960 /* 961 * Write back modified superblocks. 962 * Consistency check that the superblock 963 * of each file system is still in the buffer cache. 964 */ 965 for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) 966 if (mp->m_bufp != NULL) { 967 fs = mp->m_bufp->b_un.b_fs; 968 if (fs->fs_fmod == 0) 969 continue; 970 if (fs->fs_ronly != 0) 971 panic("update: rofs mod"); 972 bp = getblk(mp->m_dev, SBLOCK, SBSIZE); 973 fs->fs_fmod = 0; 974 fs->fs_time = TIME; 975 if (bp->b_un.b_fs != fs) 976 panic("update: bad b_fs"); 977 bwrite(bp); 978 blks = howmany(fs->fs_cssize, fs->fs_bsize); 979 for (i = 0; i < blks; i++) { 980 bp = getblk(mp->m_dev, 981 fsbtodb(fs, fs->fs_csaddr + (i * fs->fs_frag)), 982 fs->fs_bsize); 983 bwrite(bp); 984 } 985 } 986 /* 987 * Write back each (modified) inode. 988 */ 989 for (ip = inode; ip < inodeNINODE; ip++) 990 if((ip->i_flag&ILOCK)==0 && ip->i_count) { 991 ip->i_flag |= ILOCK; 992 ip->i_count++; 993 tim = TIME; 994 iupdat(ip, &tim, &tim, 0); 995 iput(ip); 996 } 997 updlock = 0; 998 /* 999 * Force stale buffer cache information to be flushed, 1000 * for all devices. 1001 */ 1002 bflush(NODEV); 1003 } 1004 1005 /* 1006 * block operations 1007 * 1008 * check if a block is available 1009 */ 1010 isblock(fs, cp, h) 1011 struct fs *fs; 1012 unsigned char *cp; 1013 int h; 1014 { 1015 unsigned char mask; 1016 1017 switch (fs->fs_frag) { 1018 case 8: 1019 return (cp[h] == 0xff); 1020 case 4: 1021 mask = 0x0f << ((h & 0x1) << 2); 1022 return ((cp[h >> 1] & mask) == mask); 1023 case 2: 1024 mask = 0x03 << ((h & 0x3) << 1); 1025 return ((cp[h >> 2] & mask) == mask); 1026 case 1: 1027 mask = 0x01 << (h & 0x7); 1028 return ((cp[h >> 3] & mask) == mask); 1029 default: 1030 panic("isblock bad fs_frag"); 1031 return; 1032 } 1033 } 1034 1035 /* 1036 * take a block out of the map 1037 */ 1038 clrblock(fs, cp, h) 1039 struct fs *fs; 1040 unsigned char *cp; 1041 int h; 1042 { 1043 switch ((fs)->fs_frag) { 1044 case 8: 1045 cp[h] = 0; 1046 return; 1047 case 4: 1048 cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); 1049 return; 1050 case 2: 1051 cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); 1052 return; 1053 case 1: 1054 cp[h >> 3] &= ~(0x01 << (h & 0x7)); 1055 return; 1056 default: 1057 panic("clrblock bad fs_frag"); 1058 return; 1059 } 1060 } 1061 1062 /* 1063 * put a block into the map 1064 */ 1065 setblock(fs, cp, h) 1066 struct fs *fs; 1067 unsigned char *cp; 1068 int h; 1069 { 1070 switch (fs->fs_frag) { 1071 case 8: 1072 cp[h] = 0xff; 1073 return; 1074 case 4: 1075 cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); 1076 return; 1077 case 2: 1078 cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); 1079 return; 1080 case 1: 1081 cp[h >> 3] |= (0x01 << (h & 0x7)); 1082 return; 1083 default: 1084 panic("setblock bad fs_frag"); 1085 return; 1086 } 1087 } 1088