1 /* lfs_alloc.c 2.20 82/12/17 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/mount.h" 6 #include "../h/fs.h" 7 #include "../h/conf.h" 8 #include "../h/buf.h" 9 #include "../h/inode.h" 10 #include "../h/dir.h" 11 #include "../h/user.h" 12 #include "../h/quota.h" 13 #include "../h/kernel.h" 14 15 extern u_long hashalloc(); 16 extern ino_t ialloccg(); 17 extern daddr_t alloccg(); 18 extern daddr_t alloccgblk(); 19 extern daddr_t fragextend(); 20 extern daddr_t blkpref(); 21 extern daddr_t mapsearch(); 22 extern int inside[], around[]; 23 extern unsigned char *fragtbl[]; 24 25 /* 26 * Allocate a block in the file system. 27 * 28 * The size of the requested block is given, which must be some 29 * multiple of fs_fsize and <= fs_bsize. 30 * A preference may be optionally specified. If a preference is given 31 * the following hierarchy is used to allocate a block: 32 * 1) allocate the requested block. 33 * 2) allocate a rotationally optimal block in the same cylinder. 34 * 3) allocate a block in the same cylinder group. 35 * 4) quadradically rehash into other cylinder groups, until an 36 * available block is located. 37 * If no block preference is given the following heirarchy is used 38 * to allocate a block: 39 * 1) allocate a block in the cylinder group that contains the 40 * inode for the file. 41 * 2) quadradically rehash into other cylinder groups, until an 42 * available block is located. 43 */ 44 struct buf * 45 alloc(ip, bpref, size) 46 register struct inode *ip; 47 daddr_t bpref; 48 int size; 49 { 50 daddr_t bno; 51 register struct fs *fs; 52 register struct buf *bp; 53 int cg; 54 55 fs = ip->i_fs; 56 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 57 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 58 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 59 panic("alloc: bad size"); 60 } 61 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 62 goto nospace; 63 if (u.u_uid != 0 && 64 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 65 fs->fs_dsize * fs->fs_minfree / 100) 66 goto nospace; 67 #ifdef QUOTA 68 if (chkdq(ip, (long)((unsigned)size/DEV_BSIZE), 0)) 69 return(NULL); 70 #endif 71 if (bpref >= fs->fs_size) 72 bpref = 0; 73 if (bpref == 0) 74 cg = itog(fs, ip->i_number); 75 else 76 cg = dtog(fs, bpref); 77 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, 78 (u_long (*)())alloccg); 79 if (bno <= 0) 80 goto nospace; 81 bp = getblk(ip->i_dev, fsbtodb(fs, bno), size); 82 clrbuf(bp); 83 return (bp); 84 nospace: 85 fserr(fs, "file system full"); 86 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 87 u.u_error = ENOSPC; 88 return (NULL); 89 } 90 91 /* 92 * Reallocate a fragment to a bigger size 93 * 94 * The number and size of the old block is given, and a preference 95 * and new size is also specified. The allocator attempts to extend 96 * the original block. Failing that, the regular block allocator is 97 * invoked to get an appropriate block. 98 */ 99 struct buf * 100 realloccg(ip, bprev, bpref, osize, nsize) 101 register struct inode *ip; 102 daddr_t bprev, bpref; 103 int osize, nsize; 104 { 105 daddr_t bno; 106 register struct fs *fs; 107 register struct buf *bp, *obp; 108 int cg; 109 110 fs = ip->i_fs; 111 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 112 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 113 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 114 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 115 panic("realloccg: bad size"); 116 } 117 if (u.u_uid != 0 && 118 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 119 fs->fs_dsize * fs->fs_minfree / 100) 120 goto nospace; 121 if (bprev == 0) { 122 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 123 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 124 panic("realloccg: bad bprev"); 125 } 126 #ifdef QUOTA 127 if (chkdq(ip, (long)((unsigned)(nsize-osize)/DEV_BSIZE), 0)) 128 return(NULL); 129 #endif 130 cg = dtog(fs, bprev); 131 bno = fragextend(ip, cg, (long)bprev, osize, nsize); 132 if (bno != 0) { 133 do { 134 bp = bread(ip->i_dev, fsbtodb(fs, bno), osize); 135 if (bp->b_flags & B_ERROR) { 136 brelse(bp); 137 return (NULL); 138 } 139 } while (brealloc(bp, nsize) == 0); 140 bp->b_flags |= B_DONE; 141 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 142 return (bp); 143 } 144 if (bpref >= fs->fs_size) 145 bpref = 0; 146 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, nsize, 147 (u_long (*)())alloccg); 148 if (bno > 0) { 149 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize); 150 if (obp->b_flags & B_ERROR) { 151 brelse(obp); 152 return (NULL); 153 } 154 bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize); 155 bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize); 156 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 157 brelse(obp); 158 free(ip, bprev, (off_t)osize); 159 return (bp); 160 } 161 nospace: 162 /* 163 * no space available 164 */ 165 fserr(fs, "file system full"); 166 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 167 u.u_error = ENOSPC; 168 return (NULL); 169 } 170 171 /* 172 * Allocate an inode in the file system. 173 * 174 * A preference may be optionally specified. If a preference is given 175 * the following hierarchy is used to allocate an inode: 176 * 1) allocate the requested inode. 177 * 2) allocate an inode in the same cylinder group. 178 * 3) quadradically rehash into other cylinder groups, until an 179 * available inode is located. 180 * If no inode preference is given the following heirarchy is used 181 * to allocate an inode: 182 * 1) allocate an inode in cylinder group 0. 183 * 2) quadradically rehash into other cylinder groups, until an 184 * available inode is located. 185 */ 186 struct inode * 187 ialloc(pip, ipref, mode) 188 register struct inode *pip; 189 ino_t ipref; 190 int mode; 191 { 192 ino_t ino; 193 register struct fs *fs; 194 register struct inode *ip; 195 int cg; 196 197 fs = pip->i_fs; 198 if (fs->fs_cstotal.cs_nifree == 0) 199 goto noinodes; 200 #ifdef QUOTA 201 if (chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0)) 202 return(NULL); 203 #endif 204 if (ipref >= fs->fs_ncg * fs->fs_ipg) 205 ipref = 0; 206 cg = itog(fs, ipref); 207 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 208 if (ino == 0) 209 goto noinodes; 210 ip = iget(pip->i_dev, pip->i_fs, ino); 211 if (ip == NULL) { 212 ifree(ip, ino, 0); 213 return (NULL); 214 } 215 if (ip->i_mode) { 216 printf("mode = 0%o, inum = %d, fs = %s\n", 217 ip->i_mode, ip->i_number, fs->fs_fsmnt); 218 panic("ialloc: dup alloc"); 219 } 220 return (ip); 221 noinodes: 222 fserr(fs, "out of inodes"); 223 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 224 u.u_error = ENOSPC; 225 return (NULL); 226 } 227 228 /* 229 * Find a cylinder to place a directory. 230 * 231 * The policy implemented by this algorithm is to select from 232 * among those cylinder groups with above the average number of 233 * free inodes, the one with the smallest number of directories. 234 */ 235 ino_t 236 dirpref(fs) 237 register struct fs *fs; 238 { 239 int cg, minndir, mincg, avgifree; 240 241 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 242 minndir = fs->fs_ipg; 243 mincg = 0; 244 for (cg = 0; cg < fs->fs_ncg; cg++) 245 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 246 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 247 mincg = cg; 248 minndir = fs->fs_cs(fs, cg).cs_ndir; 249 } 250 return ((ino_t)(fs->fs_ipg * mincg)); 251 } 252 253 /* 254 * Select the desired position for the next block in a file. The file is 255 * logically divided into sections. The first section is composed of the 256 * direct blocks. Each additional section contains fs_maxbpg blocks. 257 * 258 * If no blocks have been allocated in the first section, the policy is to 259 * request a block in the same cylinder group as the inode that describes 260 * the file. If no blocks have been allocated in any other section, the 261 * policy is to place the section in a cylinder group with a greater than 262 * average number of free blocks. An appropriate cylinder group is found 263 * by maintaining a rotor that sweeps the cylinder groups. When a new 264 * group of blocks is needed, the rotor is advanced until a cylinder group 265 * with greater than the average number of free blocks is found. 266 * 267 * If a section is already partially allocated, the policy is to 268 * contiguously allocate fs_maxcontig blocks. The end of one of these 269 * contiguous blocks and the beginning of the next is physically separated 270 * so that the disk head will be in transit between them for at least 271 * fs_rotdelay milliseconds. This is to allow time for the processor to 272 * schedule another I/O transfer. 273 */ 274 daddr_t 275 blkpref(ip, lbn, indx, bap) 276 struct inode *ip; 277 daddr_t lbn; 278 int indx; 279 daddr_t *bap; 280 { 281 register struct fs *fs; 282 int cg, avgbfree; 283 daddr_t nextblk; 284 285 fs = ip->i_fs; 286 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 287 if (lbn < NDADDR) { 288 cg = itog(fs, ip->i_number); 289 return (fs->fs_fpg * cg + fs->fs_frag); 290 } 291 /* 292 * Find a cylinder with greater than average number of 293 * unused data blocks. 294 */ 295 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 296 for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) 297 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 298 fs->fs_cgrotor = cg; 299 return (fs->fs_fpg * cg + fs->fs_frag); 300 } 301 for (cg = 0; cg <= fs->fs_cgrotor; cg++) 302 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 303 fs->fs_cgrotor = cg; 304 return (fs->fs_fpg * cg + fs->fs_frag); 305 } 306 return (NULL); 307 } 308 /* 309 * One or more previous blocks have been laid out. If less 310 * than fs_maxcontig previous blocks are contiguous, the 311 * next block is requested contiguously, otherwise it is 312 * requested rotationally delayed by fs_rotdelay milliseconds. 313 */ 314 nextblk = bap[indx - 1] + fs->fs_frag; 315 if (indx > fs->fs_maxcontig && 316 bap[indx - fs->fs_maxcontig] + fs->fs_frag * fs->fs_maxcontig 317 != nextblk) 318 return (nextblk); 319 if (fs->fs_rotdelay != 0) 320 /* 321 * Here we convert ms of delay to frags as: 322 * (frags) = (ms) * (rev/sec) * (sect/rev) / 323 * ((sect/frag) * (ms/sec)) 324 * then round up to the next block. 325 */ 326 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 327 (NSPF(fs) * 1000), fs->fs_frag); 328 return (nextblk); 329 } 330 331 /* 332 * Implement the cylinder overflow algorithm. 333 * 334 * The policy implemented by this algorithm is: 335 * 1) allocate the block in its requested cylinder group. 336 * 2) quadradically rehash on the cylinder group number. 337 * 3) brute force search for a free block. 338 */ 339 /*VARARGS5*/ 340 u_long 341 hashalloc(ip, cg, pref, size, allocator) 342 struct inode *ip; 343 int cg; 344 long pref; 345 int size; /* size for data blocks, mode for inodes */ 346 u_long (*allocator)(); 347 { 348 register struct fs *fs; 349 long result; 350 int i, icg = cg; 351 352 fs = ip->i_fs; 353 /* 354 * 1: preferred cylinder group 355 */ 356 result = (*allocator)(ip, cg, pref, size); 357 if (result) 358 return (result); 359 /* 360 * 2: quadratic rehash 361 */ 362 for (i = 1; i < fs->fs_ncg; i *= 2) { 363 cg += i; 364 if (cg >= fs->fs_ncg) 365 cg -= fs->fs_ncg; 366 result = (*allocator)(ip, cg, 0, size); 367 if (result) 368 return (result); 369 } 370 /* 371 * 3: brute force search 372 */ 373 cg = icg; 374 for (i = 0; i < fs->fs_ncg; i++) { 375 result = (*allocator)(ip, cg, 0, size); 376 if (result) 377 return (result); 378 cg++; 379 if (cg == fs->fs_ncg) 380 cg = 0; 381 } 382 return (NULL); 383 } 384 385 /* 386 * Determine whether a fragment can be extended. 387 * 388 * Check to see if the necessary fragments are available, and 389 * if they are, allocate them. 390 */ 391 daddr_t 392 fragextend(ip, cg, bprev, osize, nsize) 393 struct inode *ip; 394 int cg; 395 long bprev; 396 int osize, nsize; 397 { 398 register struct fs *fs; 399 register struct buf *bp; 400 register struct cg *cgp; 401 long bno; 402 int frags, bbase; 403 int i; 404 405 fs = ip->i_fs; 406 if (fs->fs_cs(fs, cg).cs_nffree < nsize - osize) 407 return (NULL); 408 frags = numfrags(fs, nsize); 409 bbase = fragoff(fs, bprev); 410 if (bbase > (bprev + frags - 1) % fs->fs_frag) { 411 /* cannot extend across a block boundry */ 412 return (NULL); 413 } 414 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_bsize); 415 cgp = bp->b_un.b_cg; 416 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 417 brelse(bp); 418 return (NULL); 419 } 420 cgp->cg_time = time.tv_sec; 421 bno = dtogd(fs, bprev); 422 for (i = numfrags(fs, osize); i < frags; i++) 423 if (isclr(cgp->cg_free, bno + i)) { 424 brelse(bp); 425 return (NULL); 426 } 427 /* 428 * the current fragment can be extended 429 * deduct the count on fragment being extended into 430 * increase the count on the remaining fragment (if any) 431 * allocate the extended piece 432 */ 433 for (i = frags; i < fs->fs_frag - bbase; i++) 434 if (isclr(cgp->cg_free, bno + i)) 435 break; 436 cgp->cg_frsum[i - numfrags(fs, osize)]--; 437 if (i != frags) 438 cgp->cg_frsum[i - frags]++; 439 for (i = numfrags(fs, osize); i < frags; i++) { 440 clrbit(cgp->cg_free, bno + i); 441 cgp->cg_cs.cs_nffree--; 442 fs->fs_cstotal.cs_nffree--; 443 fs->fs_cs(fs, cg).cs_nffree--; 444 } 445 fs->fs_fmod++; 446 bdwrite(bp); 447 return (bprev); 448 } 449 450 /* 451 * Determine whether a block can be allocated. 452 * 453 * Check to see if a block of the apprpriate size is available, 454 * and if it is, allocate it. 455 */ 456 daddr_t 457 alloccg(ip, cg, bpref, size) 458 struct inode *ip; 459 int cg; 460 daddr_t bpref; 461 int size; 462 { 463 register struct fs *fs; 464 register struct buf *bp; 465 register struct cg *cgp; 466 int bno, frags; 467 int allocsiz; 468 register int i; 469 470 fs = ip->i_fs; 471 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 472 return (NULL); 473 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_bsize); 474 cgp = bp->b_un.b_cg; 475 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 476 brelse(bp); 477 return (NULL); 478 } 479 cgp->cg_time = time.tv_sec; 480 if (size == fs->fs_bsize) { 481 bno = alloccgblk(fs, cgp, bpref); 482 bdwrite(bp); 483 return (bno); 484 } 485 /* 486 * check to see if any fragments are already available 487 * allocsiz is the size which will be allocated, hacking 488 * it down to a smaller size if necessary 489 */ 490 frags = numfrags(fs, size); 491 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 492 if (cgp->cg_frsum[allocsiz] != 0) 493 break; 494 if (allocsiz == fs->fs_frag) { 495 /* 496 * no fragments were available, so a block will be 497 * allocated, and hacked up 498 */ 499 if (cgp->cg_cs.cs_nbfree == 0) { 500 brelse(bp); 501 return (NULL); 502 } 503 bno = alloccgblk(fs, cgp, bpref); 504 bpref = dtogd(fs, bno); 505 for (i = frags; i < fs->fs_frag; i++) 506 setbit(cgp->cg_free, bpref + i); 507 i = fs->fs_frag - frags; 508 cgp->cg_cs.cs_nffree += i; 509 fs->fs_cstotal.cs_nffree += i; 510 fs->fs_cs(fs, cg).cs_nffree += i; 511 fs->fs_fmod++; 512 cgp->cg_frsum[i]++; 513 bdwrite(bp); 514 return (bno); 515 } 516 bno = mapsearch(fs, cgp, bpref, allocsiz); 517 if (bno < 0) 518 return (NULL); 519 for (i = 0; i < frags; i++) 520 clrbit(cgp->cg_free, bno + i); 521 cgp->cg_cs.cs_nffree -= frags; 522 fs->fs_cstotal.cs_nffree -= frags; 523 fs->fs_cs(fs, cg).cs_nffree -= frags; 524 fs->fs_fmod++; 525 cgp->cg_frsum[allocsiz]--; 526 if (frags != allocsiz) 527 cgp->cg_frsum[allocsiz - frags]++; 528 bdwrite(bp); 529 return (cg * fs->fs_fpg + bno); 530 } 531 532 /* 533 * Allocate a block in a cylinder group. 534 * 535 * This algorithm implements the following policy: 536 * 1) allocate the requested block. 537 * 2) allocate a rotationally optimal block in the same cylinder. 538 * 3) allocate the next available block on the block rotor for the 539 * specified cylinder group. 540 * Note that this routine only allocates fs_bsize blocks; these 541 * blocks may be fragmented by the routine that allocates them. 542 */ 543 daddr_t 544 alloccgblk(fs, cgp, bpref) 545 register struct fs *fs; 546 register struct cg *cgp; 547 daddr_t bpref; 548 { 549 daddr_t bno; 550 int cylno, pos, delta; 551 short *cylbp; 552 register int i; 553 554 if (bpref == 0) { 555 bpref = cgp->cg_rotor; 556 goto norot; 557 } 558 bpref &= ~(fs->fs_frag - 1); 559 bpref = dtogd(fs, bpref); 560 /* 561 * if the requested block is available, use it 562 */ 563 if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) { 564 bno = bpref; 565 goto gotit; 566 } 567 /* 568 * check for a block available on the same cylinder 569 */ 570 cylno = cbtocylno(fs, bpref); 571 if (cgp->cg_btot[cylno] == 0) 572 goto norot; 573 if (fs->fs_cpc == 0) { 574 /* 575 * block layout info is not available, so just have 576 * to take any block in this cylinder. 577 */ 578 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 579 goto norot; 580 } 581 /* 582 * check the summary information to see if a block is 583 * available in the requested cylinder starting at the 584 * requested rotational position and proceeding around. 585 */ 586 cylbp = cgp->cg_b[cylno]; 587 pos = cbtorpos(fs, bpref); 588 for (i = pos; i < NRPOS; i++) 589 if (cylbp[i] > 0) 590 break; 591 if (i == NRPOS) 592 for (i = 0; i < pos; i++) 593 if (cylbp[i] > 0) 594 break; 595 if (cylbp[i] > 0) { 596 /* 597 * found a rotational position, now find the actual 598 * block. A panic if none is actually there. 599 */ 600 pos = cylno % fs->fs_cpc; 601 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 602 if (fs->fs_postbl[pos][i] == -1) { 603 printf("pos = %d, i = %d, fs = %s\n", 604 pos, i, fs->fs_fsmnt); 605 panic("alloccgblk: cyl groups corrupted"); 606 } 607 for (i = fs->fs_postbl[pos][i];; ) { 608 if (isblock(fs, cgp->cg_free, bno + i)) { 609 bno = (bno + i) * fs->fs_frag; 610 goto gotit; 611 } 612 delta = fs->fs_rotbl[i]; 613 if (delta <= 0 || delta > MAXBPC - i) 614 break; 615 i += delta; 616 } 617 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 618 panic("alloccgblk: can't find blk in cyl"); 619 } 620 norot: 621 /* 622 * no blocks in the requested cylinder, so take next 623 * available one in this cylinder group. 624 */ 625 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 626 if (bno < 0) 627 return (NULL); 628 cgp->cg_rotor = bno; 629 gotit: 630 clrblock(fs, cgp->cg_free, (long)(bno/fs->fs_frag)); 631 cgp->cg_cs.cs_nbfree--; 632 fs->fs_cstotal.cs_nbfree--; 633 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 634 cylno = cbtocylno(fs, bno); 635 cgp->cg_b[cylno][cbtorpos(fs, bno)]--; 636 cgp->cg_btot[cylno]--; 637 fs->fs_fmod++; 638 return (cgp->cg_cgx * fs->fs_fpg + bno); 639 } 640 641 /* 642 * Determine whether an inode can be allocated. 643 * 644 * Check to see if an inode is available, and if it is, 645 * allocate it using the following policy: 646 * 1) allocate the requested inode. 647 * 2) allocate the next available inode after the requested 648 * inode in the specified cylinder group. 649 */ 650 ino_t 651 ialloccg(ip, cg, ipref, mode) 652 struct inode *ip; 653 int cg; 654 daddr_t ipref; 655 int mode; 656 { 657 register struct fs *fs; 658 register struct buf *bp; 659 register struct cg *cgp; 660 int i; 661 662 fs = ip->i_fs; 663 if (fs->fs_cs(fs, cg).cs_nifree == 0) 664 return (NULL); 665 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_bsize); 666 cgp = bp->b_un.b_cg; 667 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 668 brelse(bp); 669 return (NULL); 670 } 671 cgp->cg_time = time.tv_sec; 672 if (ipref) { 673 ipref %= fs->fs_ipg; 674 if (isclr(cgp->cg_iused, ipref)) 675 goto gotit; 676 } else 677 ipref = cgp->cg_irotor; 678 for (i = 0; i < fs->fs_ipg; i++) { 679 ipref++; 680 if (ipref >= fs->fs_ipg) 681 ipref = 0; 682 if (isclr(cgp->cg_iused, ipref)) { 683 cgp->cg_irotor = ipref; 684 goto gotit; 685 } 686 } 687 brelse(bp); 688 return (NULL); 689 gotit: 690 setbit(cgp->cg_iused, ipref); 691 cgp->cg_cs.cs_nifree--; 692 fs->fs_cstotal.cs_nifree--; 693 fs->fs_cs(fs, cg).cs_nifree--; 694 fs->fs_fmod++; 695 if ((mode & IFMT) == IFDIR) { 696 cgp->cg_cs.cs_ndir++; 697 fs->fs_cstotal.cs_ndir++; 698 fs->fs_cs(fs, cg).cs_ndir++; 699 } 700 bdwrite(bp); 701 return (cg * fs->fs_ipg + ipref); 702 } 703 704 /* 705 * Free a block or fragment. 706 * 707 * The specified block or fragment is placed back in the 708 * free map. If a fragment is deallocated, a possible 709 * block reassembly is checked. 710 */ 711 free(ip, bno, size) 712 register struct inode *ip; 713 daddr_t bno; 714 off_t size; 715 { 716 register struct fs *fs; 717 register struct cg *cgp; 718 register struct buf *bp; 719 int cg, blk, frags, bbase; 720 register int i; 721 722 fs = ip->i_fs; 723 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 724 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 725 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 726 panic("free: bad size"); 727 } 728 cg = dtog(fs, bno); 729 if (badblock(fs, bno)) { 730 printf("bad block %d, ino %d\n", bno, ip->i_number); 731 return; 732 } 733 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_bsize); 734 cgp = bp->b_un.b_cg; 735 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 736 brelse(bp); 737 return; 738 } 739 cgp->cg_time = time.tv_sec; 740 bno = dtogd(fs, bno); 741 if (size == fs->fs_bsize) { 742 if (isblock(fs, cgp->cg_free, bno/fs->fs_frag)) { 743 printf("dev = 0x%x, block = %d, fs = %s\n", 744 ip->i_dev, bno, fs->fs_fsmnt); 745 panic("free: freeing free block"); 746 } 747 setblock(fs, cgp->cg_free, bno/fs->fs_frag); 748 cgp->cg_cs.cs_nbfree++; 749 fs->fs_cstotal.cs_nbfree++; 750 fs->fs_cs(fs, cg).cs_nbfree++; 751 i = cbtocylno(fs, bno); 752 cgp->cg_b[i][cbtorpos(fs, bno)]++; 753 cgp->cg_btot[i]++; 754 } else { 755 bbase = bno - (bno % fs->fs_frag); 756 /* 757 * decrement the counts associated with the old frags 758 */ 759 blk = blkmap(fs, cgp->cg_free, bbase); 760 fragacct(fs, blk, cgp->cg_frsum, -1); 761 /* 762 * deallocate the fragment 763 */ 764 frags = numfrags(fs, size); 765 for (i = 0; i < frags; i++) { 766 if (isset(cgp->cg_free, bno + i)) { 767 printf("dev = 0x%x, block = %d, fs = %s\n", 768 ip->i_dev, bno + i, fs->fs_fsmnt); 769 panic("free: freeing free frag"); 770 } 771 setbit(cgp->cg_free, bno + i); 772 } 773 cgp->cg_cs.cs_nffree += i; 774 fs->fs_cstotal.cs_nffree += i; 775 fs->fs_cs(fs, cg).cs_nffree += i; 776 /* 777 * add back in counts associated with the new frags 778 */ 779 blk = blkmap(fs, cgp->cg_free, bbase); 780 fragacct(fs, blk, cgp->cg_frsum, 1); 781 /* 782 * if a complete block has been reassembled, account for it 783 */ 784 if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) { 785 cgp->cg_cs.cs_nffree -= fs->fs_frag; 786 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 787 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 788 cgp->cg_cs.cs_nbfree++; 789 fs->fs_cstotal.cs_nbfree++; 790 fs->fs_cs(fs, cg).cs_nbfree++; 791 i = cbtocylno(fs, bbase); 792 cgp->cg_b[i][cbtorpos(fs, bbase)]++; 793 cgp->cg_btot[i]++; 794 } 795 } 796 fs->fs_fmod++; 797 bdwrite(bp); 798 } 799 800 /* 801 * Free an inode. 802 * 803 * The specified inode is placed back in the free map. 804 */ 805 ifree(ip, ino, mode) 806 struct inode *ip; 807 ino_t ino; 808 int mode; 809 { 810 register struct fs *fs; 811 register struct cg *cgp; 812 register struct buf *bp; 813 int cg; 814 815 fs = ip->i_fs; 816 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) { 817 printf("dev = 0x%x, ino = %d, fs = %s\n", 818 ip->i_dev, ino, fs->fs_fsmnt); 819 panic("ifree: range"); 820 } 821 cg = itog(fs, ino); 822 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_bsize); 823 cgp = bp->b_un.b_cg; 824 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 825 brelse(bp); 826 return; 827 } 828 cgp->cg_time = time.tv_sec; 829 ino %= fs->fs_ipg; 830 if (isclr(cgp->cg_iused, ino)) { 831 printf("dev = 0x%x, ino = %d, fs = %s\n", 832 ip->i_dev, ino, fs->fs_fsmnt); 833 panic("ifree: freeing free inode"); 834 } 835 clrbit(cgp->cg_iused, ino); 836 cgp->cg_cs.cs_nifree++; 837 fs->fs_cstotal.cs_nifree++; 838 fs->fs_cs(fs, cg).cs_nifree++; 839 if ((mode & IFMT) == IFDIR) { 840 cgp->cg_cs.cs_ndir--; 841 fs->fs_cstotal.cs_ndir--; 842 fs->fs_cs(fs, cg).cs_ndir--; 843 } 844 fs->fs_fmod++; 845 bdwrite(bp); 846 } 847 848 /* 849 * Find a block of the specified size in the specified cylinder group. 850 * 851 * It is a panic if a request is made to find a block if none are 852 * available. 853 */ 854 daddr_t 855 mapsearch(fs, cgp, bpref, allocsiz) 856 register struct fs *fs; 857 register struct cg *cgp; 858 daddr_t bpref; 859 int allocsiz; 860 { 861 daddr_t bno; 862 int start, len, loc, i; 863 int blk, field, subfield, pos; 864 865 /* 866 * find the fragment by searching through the free block 867 * map for an appropriate bit pattern 868 */ 869 if (bpref) 870 start = dtogd(fs, bpref) / NBBY; 871 else 872 start = cgp->cg_frotor / NBBY; 873 len = howmany(fs->fs_fpg, NBBY) - start; 874 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 875 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 876 if (loc == 0) { 877 len = start + 1; 878 start = 0; 879 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 880 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 881 if (loc == 0) 882 return (-1); 883 } 884 bno = (start + len - loc) * NBBY; 885 cgp->cg_frotor = bno; 886 /* 887 * found the byte in the map 888 * sift through the bits to find the selected frag 889 */ 890 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 891 blk = blkmap(fs, cgp->cg_free, bno); 892 blk <<= 1; 893 field = around[allocsiz]; 894 subfield = inside[allocsiz]; 895 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 896 if ((blk & field) == subfield) 897 return (bno + pos); 898 field <<= 1; 899 subfield <<= 1; 900 } 901 } 902 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 903 panic("alloccg: block not in map"); 904 return (-1); 905 } 906 907 /* 908 * Fserr prints the name of a file system with an error diagnostic. 909 * 910 * The form of the error message is: 911 * fs: error message 912 */ 913 fserr(fs, cp) 914 struct fs *fs; 915 char *cp; 916 { 917 918 printf("%s: %s\n", fs->fs_fsmnt, cp); 919 } 920