1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)lfs_alloc.c 7.16 (Berkeley) 01/05/90 18 */ 19 20 #include "param.h" 21 #include "systm.h" 22 #include "mount.h" 23 #include "buf.h" 24 #include "user.h" 25 #include "vnode.h" 26 #include "kernel.h" 27 #include "syslog.h" 28 #include "cmap.h" 29 #include "../ufs/quota.h" 30 #include "../ufs/inode.h" 31 #include "../ufs/fs.h" 32 33 extern u_long hashalloc(); 34 extern ino_t ialloccg(); 35 extern daddr_t alloccg(); 36 extern daddr_t alloccgblk(); 37 extern daddr_t fragextend(); 38 extern daddr_t blkpref(); 39 extern daddr_t mapsearch(); 40 extern int inside[], around[]; 41 extern unsigned char *fragtbl[]; 42 43 /* 44 * Allocate a block in the file system. 45 * 46 * The size of the requested block is given, which must be some 47 * multiple of fs_fsize and <= fs_bsize. 48 * A preference may be optionally specified. If a preference is given 49 * the following hierarchy is used to allocate a block: 50 * 1) allocate the requested block. 51 * 2) allocate a rotationally optimal block in the same cylinder. 52 * 3) allocate a block in the same cylinder group. 53 * 4) quadradically rehash into other cylinder groups, until an 54 * available block is located. 55 * If no block preference is given the following heirarchy is used 56 * to allocate a block: 57 * 1) allocate a block in the cylinder group that contains the 58 * inode for the file. 59 * 2) quadradically rehash into other cylinder groups, until an 60 * available block is located. 61 */ 62 alloc(ip, lbn, bpref, size, bnp) 63 register struct inode *ip; 64 daddr_t lbn, bpref; 65 int size; 66 daddr_t *bnp; 67 { 68 daddr_t bno; 69 register struct fs *fs; 70 register struct buf *bp; 71 int cg, error; 72 73 *bnp = 0; 74 fs = ip->i_fs; 75 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 76 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 77 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 78 panic("alloc: bad size"); 79 } 80 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 81 goto nospace; 82 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 83 goto nospace; 84 #ifdef QUOTA 85 if (error = chkdq(ip, (long)btodb(size), 0)) 86 return (error); 87 #endif 88 if (bpref >= fs->fs_size) 89 bpref = 0; 90 if (bpref == 0) 91 cg = itog(fs, ip->i_number); 92 else 93 cg = dtog(fs, bpref); 94 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, 95 (u_long (*)())alloccg); 96 if (bno > 0) { 97 ip->i_blocks += btodb(size); 98 ip->i_flag |= IUPD|ICHG; 99 *bnp = bno; 100 return (0); 101 } 102 nospace: 103 fserr(fs, "file system full"); 104 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 105 return (ENOSPC); 106 } 107 108 /* 109 * Reallocate a fragment to a bigger size 110 * 111 * The number and size of the old block is given, and a preference 112 * and new size is also specified. The allocator attempts to extend 113 * the original block. Failing that, the regular block allocator is 114 * invoked to get an appropriate block. 115 */ 116 realloccg(ip, lbprev, bpref, osize, nsize, bpp) 117 register struct inode *ip; 118 off_t lbprev; 119 daddr_t bpref; 120 int osize, nsize; 121 struct buf **bpp; 122 { 123 register struct fs *fs; 124 struct buf *bp, *obp; 125 int cg, request; 126 daddr_t bprev, bno, bn; 127 int i, error, count; 128 129 *bpp = 0; 130 fs = ip->i_fs; 131 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 132 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 133 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 134 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 135 panic("realloccg: bad size"); 136 } 137 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 138 goto nospace; 139 if ((bprev = ip->i_db[lbprev]) == 0) { 140 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 141 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 142 panic("realloccg: bad bprev"); 143 } 144 #ifdef QUOTA 145 if (error = chkdq(ip, (long)btodb(nsize - osize), 0)) 146 return (error); 147 #endif 148 /* 149 * Allocate the extra space in the buffer. 150 */ 151 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 152 brelse(bp); 153 return (error); 154 } 155 brealloc(bp, nsize); 156 bp->b_flags |= B_DONE; 157 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 158 /* 159 * Check for extension in the existing location. 160 */ 161 cg = dtog(fs, bprev); 162 if (bno = fragextend(ip, cg, (long)bprev, osize, nsize)) { 163 if (bp->b_blkno != fsbtodb(fs, bno)) 164 panic("bad blockno"); 165 ip->i_blocks += btodb(nsize - osize); 166 ip->i_flag |= IUPD|ICHG; 167 *bpp = bp; 168 return (0); 169 } 170 /* 171 * Allocate a new disk location. 172 */ 173 if (bpref >= fs->fs_size) 174 bpref = 0; 175 switch ((int)fs->fs_optim) { 176 case FS_OPTSPACE: 177 /* 178 * Allocate an exact sized fragment. Although this makes 179 * best use of space, we will waste time relocating it if 180 * the file continues to grow. If the fragmentation is 181 * less than half of the minimum free reserve, we choose 182 * to begin optimizing for time. 183 */ 184 request = nsize; 185 if (fs->fs_minfree < 5 || 186 fs->fs_cstotal.cs_nffree > 187 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 188 break; 189 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 190 fs->fs_fsmnt); 191 fs->fs_optim = FS_OPTTIME; 192 break; 193 case FS_OPTTIME: 194 /* 195 * At this point we have discovered a file that is trying 196 * to grow a small fragment to a larger fragment. To save 197 * time, we allocate a full sized block, then free the 198 * unused portion. If the file continues to grow, the 199 * `fragextend' call above will be able to grow it in place 200 * without further copying. If aberrant programs cause 201 * disk fragmentation to grow within 2% of the free reserve, 202 * we choose to begin optimizing for space. 203 */ 204 request = fs->fs_bsize; 205 if (fs->fs_cstotal.cs_nffree < 206 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 207 break; 208 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 209 fs->fs_fsmnt); 210 fs->fs_optim = FS_OPTSPACE; 211 break; 212 default: 213 printf("dev = 0x%x, optim = %d, fs = %s\n", 214 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 215 panic("realloccg: bad optim"); 216 /* NOTREACHED */ 217 } 218 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request, 219 (u_long (*)())alloccg); 220 if (bno > 0) { 221 bp->b_blkno = bn = fsbtodb(fs, bno); 222 count = howmany(osize, CLBYTES); 223 for (i = 0; i < count; i++) 224 munhash(ip->i_devvp, bn + i * CLBYTES / DEV_BSIZE); 225 blkfree(ip, bprev, (off_t)osize); 226 if (nsize < request) 227 blkfree(ip, bno + numfrags(fs, nsize), 228 (off_t)(request - nsize)); 229 ip->i_blocks += btodb(nsize - osize); 230 ip->i_flag |= IUPD|ICHG; 231 *bpp = bp; 232 return (0); 233 } 234 brelse(bp); 235 nospace: 236 /* 237 * no space available 238 */ 239 fserr(fs, "file system full"); 240 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 241 return (ENOSPC); 242 } 243 244 /* 245 * Allocate an inode in the file system. 246 * 247 * A preference may be optionally specified. If a preference is given 248 * the following hierarchy is used to allocate an inode: 249 * 1) allocate the requested inode. 250 * 2) allocate an inode in the same cylinder group. 251 * 3) quadradically rehash into other cylinder groups, until an 252 * available inode is located. 253 * If no inode preference is given the following heirarchy is used 254 * to allocate an inode: 255 * 1) allocate an inode in cylinder group 0. 256 * 2) quadradically rehash into other cylinder groups, until an 257 * available inode is located. 258 */ 259 ialloc(pip, ipref, mode, ipp) 260 register struct inode *pip; 261 ino_t ipref; 262 int mode; 263 struct inode **ipp; 264 { 265 ino_t ino; 266 register struct fs *fs; 267 register struct inode *ip; 268 int cg, error; 269 270 *ipp = 0; 271 fs = pip->i_fs; 272 if (fs->fs_cstotal.cs_nifree == 0) 273 goto noinodes; 274 #ifdef QUOTA 275 if (error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0)) 276 return (error); 277 #endif 278 if (ipref >= fs->fs_ncg * fs->fs_ipg) 279 ipref = 0; 280 cg = itog(fs, ipref); 281 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 282 if (ino == 0) 283 goto noinodes; 284 error = iget(pip, ino, ipp); 285 if (error) { 286 ifree(pip, ino, 0); 287 return (error); 288 } 289 ip = *ipp; 290 if (ip->i_mode) { 291 printf("mode = 0%o, inum = %d, fs = %s\n", 292 ip->i_mode, ip->i_number, fs->fs_fsmnt); 293 panic("ialloc: dup alloc"); 294 } 295 if (ip->i_blocks) { /* XXX */ 296 printf("free inode %s/%d had %d blocks\n", 297 fs->fs_fsmnt, ino, ip->i_blocks); 298 ip->i_blocks = 0; 299 } 300 ip->i_flags = 0; 301 /* 302 * Set up a new generation number for this inode. 303 */ 304 if (++nextgennumber < (u_long)time.tv_sec) 305 nextgennumber = time.tv_sec; 306 ip->i_gen = nextgennumber; 307 return (0); 308 noinodes: 309 fserr(fs, "out of inodes"); 310 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 311 return (ENOSPC); 312 } 313 314 /* 315 * Find a cylinder to place a directory. 316 * 317 * The policy implemented by this algorithm is to select from 318 * among those cylinder groups with above the average number of 319 * free inodes, the one with the smallest number of directories. 320 */ 321 ino_t 322 dirpref(fs) 323 register struct fs *fs; 324 { 325 int cg, minndir, mincg, avgifree; 326 327 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 328 minndir = fs->fs_ipg; 329 mincg = 0; 330 for (cg = 0; cg < fs->fs_ncg; cg++) 331 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 332 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 333 mincg = cg; 334 minndir = fs->fs_cs(fs, cg).cs_ndir; 335 } 336 return ((ino_t)(fs->fs_ipg * mincg)); 337 } 338 339 /* 340 * Select the desired position for the next block in a file. The file is 341 * logically divided into sections. The first section is composed of the 342 * direct blocks. Each additional section contains fs_maxbpg blocks. 343 * 344 * If no blocks have been allocated in the first section, the policy is to 345 * request a block in the same cylinder group as the inode that describes 346 * the file. If no blocks have been allocated in any other section, the 347 * policy is to place the section in a cylinder group with a greater than 348 * average number of free blocks. An appropriate cylinder group is found 349 * by using a rotor that sweeps the cylinder groups. When a new group of 350 * blocks is needed, the sweep begins in the cylinder group following the 351 * cylinder group from which the previous allocation was made. The sweep 352 * continues until a cylinder group with greater than the average number 353 * of free blocks is found. If the allocation is for the first block in an 354 * indirect block, the information on the previous allocation is unavailable; 355 * here a best guess is made based upon the logical block number being 356 * allocated. 357 * 358 * If a section is already partially allocated, the policy is to 359 * contiguously allocate fs_maxcontig blocks. The end of one of these 360 * contiguous blocks and the beginning of the next is physically separated 361 * so that the disk head will be in transit between them for at least 362 * fs_rotdelay milliseconds. This is to allow time for the processor to 363 * schedule another I/O transfer. 364 */ 365 daddr_t 366 blkpref(ip, lbn, indx, bap) 367 struct inode *ip; 368 daddr_t lbn; 369 int indx; 370 daddr_t *bap; 371 { 372 register struct fs *fs; 373 register int cg; 374 int avgbfree, startcg; 375 daddr_t nextblk; 376 377 fs = ip->i_fs; 378 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 379 if (lbn < NDADDR) { 380 cg = itog(fs, ip->i_number); 381 return (fs->fs_fpg * cg + fs->fs_frag); 382 } 383 /* 384 * Find a cylinder with greater than average number of 385 * unused data blocks. 386 */ 387 if (indx == 0 || bap[indx - 1] == 0) 388 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg; 389 else 390 startcg = dtog(fs, bap[indx - 1]) + 1; 391 startcg %= fs->fs_ncg; 392 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 393 for (cg = startcg; cg < fs->fs_ncg; cg++) 394 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 395 fs->fs_cgrotor = cg; 396 return (fs->fs_fpg * cg + fs->fs_frag); 397 } 398 for (cg = 0; cg <= startcg; cg++) 399 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 400 fs->fs_cgrotor = cg; 401 return (fs->fs_fpg * cg + fs->fs_frag); 402 } 403 return (NULL); 404 } 405 /* 406 * One or more previous blocks have been laid out. If less 407 * than fs_maxcontig previous blocks are contiguous, the 408 * next block is requested contiguously, otherwise it is 409 * requested rotationally delayed by fs_rotdelay milliseconds. 410 */ 411 nextblk = bap[indx - 1] + fs->fs_frag; 412 if (indx > fs->fs_maxcontig && 413 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig) 414 != nextblk) 415 return (nextblk); 416 if (fs->fs_rotdelay != 0) 417 /* 418 * Here we convert ms of delay to frags as: 419 * (frags) = (ms) * (rev/sec) * (sect/rev) / 420 * ((sect/frag) * (ms/sec)) 421 * then round up to the next block. 422 */ 423 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 424 (NSPF(fs) * 1000), fs->fs_frag); 425 return (nextblk); 426 } 427 428 /* 429 * Implement the cylinder overflow algorithm. 430 * 431 * The policy implemented by this algorithm is: 432 * 1) allocate the block in its requested cylinder group. 433 * 2) quadradically rehash on the cylinder group number. 434 * 3) brute force search for a free block. 435 */ 436 /*VARARGS5*/ 437 u_long 438 hashalloc(ip, cg, pref, size, allocator) 439 struct inode *ip; 440 int cg; 441 long pref; 442 int size; /* size for data blocks, mode for inodes */ 443 u_long (*allocator)(); 444 { 445 register struct fs *fs; 446 long result; 447 int i, icg = cg; 448 449 fs = ip->i_fs; 450 /* 451 * 1: preferred cylinder group 452 */ 453 result = (*allocator)(ip, cg, pref, size); 454 if (result) 455 return (result); 456 /* 457 * 2: quadratic rehash 458 */ 459 for (i = 1; i < fs->fs_ncg; i *= 2) { 460 cg += i; 461 if (cg >= fs->fs_ncg) 462 cg -= fs->fs_ncg; 463 result = (*allocator)(ip, cg, 0, size); 464 if (result) 465 return (result); 466 } 467 /* 468 * 3: brute force search 469 * Note that we start at i == 2, since 0 was checked initially, 470 * and 1 is always checked in the quadratic rehash. 471 */ 472 cg = (icg + 2) % fs->fs_ncg; 473 for (i = 2; i < fs->fs_ncg; i++) { 474 result = (*allocator)(ip, cg, 0, size); 475 if (result) 476 return (result); 477 cg++; 478 if (cg == fs->fs_ncg) 479 cg = 0; 480 } 481 return (NULL); 482 } 483 484 /* 485 * Determine whether a fragment can be extended. 486 * 487 * Check to see if the necessary fragments are available, and 488 * if they are, allocate them. 489 */ 490 daddr_t 491 fragextend(ip, cg, bprev, osize, nsize) 492 struct inode *ip; 493 int cg; 494 long bprev; 495 int osize, nsize; 496 { 497 register struct fs *fs; 498 register struct cg *cgp; 499 struct buf *bp; 500 long bno; 501 int frags, bbase; 502 int i, error; 503 504 fs = ip->i_fs; 505 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 506 return (NULL); 507 frags = numfrags(fs, nsize); 508 bbase = fragnum(fs, bprev); 509 if (bbase > fragnum(fs, (bprev + frags - 1))) { 510 /* cannot extend across a block boundary */ 511 return (NULL); 512 } 513 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 514 (int)fs->fs_cgsize, NOCRED, &bp); 515 if (error) { 516 brelse(bp); 517 return (NULL); 518 } 519 cgp = bp->b_un.b_cg; 520 if (!cg_chkmagic(cgp)) { 521 brelse(bp); 522 return (NULL); 523 } 524 cgp->cg_time = time.tv_sec; 525 bno = dtogd(fs, bprev); 526 for (i = numfrags(fs, osize); i < frags; i++) 527 if (isclr(cg_blksfree(cgp), bno + i)) { 528 brelse(bp); 529 return (NULL); 530 } 531 /* 532 * the current fragment can be extended 533 * deduct the count on fragment being extended into 534 * increase the count on the remaining fragment (if any) 535 * allocate the extended piece 536 */ 537 for (i = frags; i < fs->fs_frag - bbase; i++) 538 if (isclr(cg_blksfree(cgp), bno + i)) 539 break; 540 cgp->cg_frsum[i - numfrags(fs, osize)]--; 541 if (i != frags) 542 cgp->cg_frsum[i - frags]++; 543 for (i = numfrags(fs, osize); i < frags; i++) { 544 clrbit(cg_blksfree(cgp), bno + i); 545 cgp->cg_cs.cs_nffree--; 546 fs->fs_cstotal.cs_nffree--; 547 fs->fs_cs(fs, cg).cs_nffree--; 548 } 549 fs->fs_fmod++; 550 bdwrite(bp); 551 return (bprev); 552 } 553 554 /* 555 * Determine whether a block can be allocated. 556 * 557 * Check to see if a block of the apprpriate size is available, 558 * and if it is, allocate it. 559 */ 560 daddr_t 561 alloccg(ip, cg, bpref, size) 562 struct inode *ip; 563 int cg; 564 daddr_t bpref; 565 int size; 566 { 567 register struct fs *fs; 568 register struct cg *cgp; 569 struct buf *bp; 570 register int i; 571 int error, bno, frags, allocsiz; 572 573 fs = ip->i_fs; 574 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 575 return (NULL); 576 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 577 (int)fs->fs_cgsize, NOCRED, &bp); 578 if (error) { 579 brelse(bp); 580 return (NULL); 581 } 582 cgp = bp->b_un.b_cg; 583 if (!cg_chkmagic(cgp) || 584 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 585 brelse(bp); 586 return (NULL); 587 } 588 cgp->cg_time = time.tv_sec; 589 if (size == fs->fs_bsize) { 590 bno = alloccgblk(fs, cgp, bpref); 591 bdwrite(bp); 592 return (bno); 593 } 594 /* 595 * check to see if any fragments are already available 596 * allocsiz is the size which will be allocated, hacking 597 * it down to a smaller size if necessary 598 */ 599 frags = numfrags(fs, size); 600 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 601 if (cgp->cg_frsum[allocsiz] != 0) 602 break; 603 if (allocsiz == fs->fs_frag) { 604 /* 605 * no fragments were available, so a block will be 606 * allocated, and hacked up 607 */ 608 if (cgp->cg_cs.cs_nbfree == 0) { 609 brelse(bp); 610 return (NULL); 611 } 612 bno = alloccgblk(fs, cgp, bpref); 613 bpref = dtogd(fs, bno); 614 for (i = frags; i < fs->fs_frag; i++) 615 setbit(cg_blksfree(cgp), bpref + i); 616 i = fs->fs_frag - frags; 617 cgp->cg_cs.cs_nffree += i; 618 fs->fs_cstotal.cs_nffree += i; 619 fs->fs_cs(fs, cg).cs_nffree += i; 620 fs->fs_fmod++; 621 cgp->cg_frsum[i]++; 622 bdwrite(bp); 623 return (bno); 624 } 625 bno = mapsearch(fs, cgp, bpref, allocsiz); 626 if (bno < 0) { 627 brelse(bp); 628 return (NULL); 629 } 630 for (i = 0; i < frags; i++) 631 clrbit(cg_blksfree(cgp), bno + i); 632 cgp->cg_cs.cs_nffree -= frags; 633 fs->fs_cstotal.cs_nffree -= frags; 634 fs->fs_cs(fs, cg).cs_nffree -= frags; 635 fs->fs_fmod++; 636 cgp->cg_frsum[allocsiz]--; 637 if (frags != allocsiz) 638 cgp->cg_frsum[allocsiz - frags]++; 639 bdwrite(bp); 640 return (cg * fs->fs_fpg + bno); 641 } 642 643 /* 644 * Allocate a block in a cylinder group. 645 * 646 * This algorithm implements the following policy: 647 * 1) allocate the requested block. 648 * 2) allocate a rotationally optimal block in the same cylinder. 649 * 3) allocate the next available block on the block rotor for the 650 * specified cylinder group. 651 * Note that this routine only allocates fs_bsize blocks; these 652 * blocks may be fragmented by the routine that allocates them. 653 */ 654 daddr_t 655 alloccgblk(fs, cgp, bpref) 656 register struct fs *fs; 657 register struct cg *cgp; 658 daddr_t bpref; 659 { 660 daddr_t bno; 661 int cylno, pos, delta; 662 short *cylbp; 663 register int i; 664 665 if (bpref == 0) { 666 bpref = cgp->cg_rotor; 667 goto norot; 668 } 669 bpref = blknum(fs, bpref); 670 bpref = dtogd(fs, bpref); 671 /* 672 * if the requested block is available, use it 673 */ 674 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 675 bno = bpref; 676 goto gotit; 677 } 678 /* 679 * check for a block available on the same cylinder 680 */ 681 cylno = cbtocylno(fs, bpref); 682 if (cg_blktot(cgp)[cylno] == 0) 683 goto norot; 684 if (fs->fs_cpc == 0) { 685 /* 686 * block layout info is not available, so just have 687 * to take any block in this cylinder. 688 */ 689 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 690 goto norot; 691 } 692 /* 693 * check the summary information to see if a block is 694 * available in the requested cylinder starting at the 695 * requested rotational position and proceeding around. 696 */ 697 cylbp = cg_blks(fs, cgp, cylno); 698 pos = cbtorpos(fs, bpref); 699 for (i = pos; i < fs->fs_nrpos; i++) 700 if (cylbp[i] > 0) 701 break; 702 if (i == fs->fs_nrpos) 703 for (i = 0; i < pos; i++) 704 if (cylbp[i] > 0) 705 break; 706 if (cylbp[i] > 0) { 707 /* 708 * found a rotational position, now find the actual 709 * block. A panic if none is actually there. 710 */ 711 pos = cylno % fs->fs_cpc; 712 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 713 if (fs_postbl(fs, pos)[i] == -1) { 714 printf("pos = %d, i = %d, fs = %s\n", 715 pos, i, fs->fs_fsmnt); 716 panic("alloccgblk: cyl groups corrupted"); 717 } 718 for (i = fs_postbl(fs, pos)[i];; ) { 719 if (isblock(fs, cg_blksfree(cgp), bno + i)) { 720 bno = blkstofrags(fs, (bno + i)); 721 goto gotit; 722 } 723 delta = fs_rotbl(fs)[i]; 724 if (delta <= 0 || 725 delta + i > fragstoblks(fs, fs->fs_fpg)) 726 break; 727 i += delta; 728 } 729 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 730 panic("alloccgblk: can't find blk in cyl"); 731 } 732 norot: 733 /* 734 * no blocks in the requested cylinder, so take next 735 * available one in this cylinder group. 736 */ 737 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 738 if (bno < 0) 739 return (NULL); 740 cgp->cg_rotor = bno; 741 gotit: 742 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno)); 743 cgp->cg_cs.cs_nbfree--; 744 fs->fs_cstotal.cs_nbfree--; 745 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 746 cylno = cbtocylno(fs, bno); 747 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 748 cg_blktot(cgp)[cylno]--; 749 fs->fs_fmod++; 750 return (cgp->cg_cgx * fs->fs_fpg + bno); 751 } 752 753 /* 754 * Determine whether an inode can be allocated. 755 * 756 * Check to see if an inode is available, and if it is, 757 * allocate it using the following policy: 758 * 1) allocate the requested inode. 759 * 2) allocate the next available inode after the requested 760 * inode in the specified cylinder group. 761 */ 762 ino_t 763 ialloccg(ip, cg, ipref, mode) 764 struct inode *ip; 765 int cg; 766 daddr_t ipref; 767 int mode; 768 { 769 register struct fs *fs; 770 register struct cg *cgp; 771 struct buf *bp; 772 int error, start, len, loc, map, i; 773 774 fs = ip->i_fs; 775 if (fs->fs_cs(fs, cg).cs_nifree == 0) 776 return (NULL); 777 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 778 (int)fs->fs_cgsize, NOCRED, &bp); 779 if (error) { 780 brelse(bp); 781 return (NULL); 782 } 783 cgp = bp->b_un.b_cg; 784 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 785 brelse(bp); 786 return (NULL); 787 } 788 cgp->cg_time = time.tv_sec; 789 if (ipref) { 790 ipref %= fs->fs_ipg; 791 if (isclr(cg_inosused(cgp), ipref)) 792 goto gotit; 793 } 794 start = cgp->cg_irotor / NBBY; 795 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 796 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 797 if (loc == 0) { 798 len = start + 1; 799 start = 0; 800 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 801 if (loc == 0) { 802 printf("cg = %s, irotor = %d, fs = %s\n", 803 cg, cgp->cg_irotor, fs->fs_fsmnt); 804 panic("ialloccg: map corrupted"); 805 /* NOTREACHED */ 806 } 807 } 808 i = start + len - loc; 809 map = cg_inosused(cgp)[i]; 810 ipref = i * NBBY; 811 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 812 if ((map & i) == 0) { 813 cgp->cg_irotor = ipref; 814 goto gotit; 815 } 816 } 817 printf("fs = %s\n", fs->fs_fsmnt); 818 panic("ialloccg: block not in map"); 819 /* NOTREACHED */ 820 gotit: 821 setbit(cg_inosused(cgp), ipref); 822 cgp->cg_cs.cs_nifree--; 823 fs->fs_cstotal.cs_nifree--; 824 fs->fs_cs(fs, cg).cs_nifree--; 825 fs->fs_fmod++; 826 if ((mode & IFMT) == IFDIR) { 827 cgp->cg_cs.cs_ndir++; 828 fs->fs_cstotal.cs_ndir++; 829 fs->fs_cs(fs, cg).cs_ndir++; 830 } 831 bdwrite(bp); 832 return (cg * fs->fs_ipg + ipref); 833 } 834 835 /* 836 * Free a block or fragment. 837 * 838 * The specified block or fragment is placed back in the 839 * free map. If a fragment is deallocated, a possible 840 * block reassembly is checked. 841 */ 842 blkfree(ip, bno, size) 843 register struct inode *ip; 844 daddr_t bno; 845 off_t size; 846 { 847 register struct fs *fs; 848 register struct cg *cgp; 849 struct buf *bp; 850 int error, cg, blk, frags, bbase; 851 register int i; 852 853 fs = ip->i_fs; 854 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 855 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 856 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 857 panic("blkfree: bad size"); 858 } 859 cg = dtog(fs, bno); 860 if (badblock(fs, bno)) { 861 printf("bad block %d, ino %d\n", bno, ip->i_number); 862 return; 863 } 864 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 865 (int)fs->fs_cgsize, NOCRED, &bp); 866 if (error) { 867 brelse(bp); 868 return; 869 } 870 cgp = bp->b_un.b_cg; 871 if (!cg_chkmagic(cgp)) { 872 brelse(bp); 873 return; 874 } 875 cgp->cg_time = time.tv_sec; 876 bno = dtogd(fs, bno); 877 if (size == fs->fs_bsize) { 878 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) { 879 printf("dev = 0x%x, block = %d, fs = %s\n", 880 ip->i_dev, bno, fs->fs_fsmnt); 881 panic("blkfree: freeing free block"); 882 } 883 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); 884 cgp->cg_cs.cs_nbfree++; 885 fs->fs_cstotal.cs_nbfree++; 886 fs->fs_cs(fs, cg).cs_nbfree++; 887 i = cbtocylno(fs, bno); 888 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 889 cg_blktot(cgp)[i]++; 890 } else { 891 bbase = bno - fragnum(fs, bno); 892 /* 893 * decrement the counts associated with the old frags 894 */ 895 blk = blkmap(fs, cg_blksfree(cgp), bbase); 896 fragacct(fs, blk, cgp->cg_frsum, -1); 897 /* 898 * deallocate the fragment 899 */ 900 frags = numfrags(fs, size); 901 for (i = 0; i < frags; i++) { 902 if (isset(cg_blksfree(cgp), bno + i)) { 903 printf("dev = 0x%x, block = %d, fs = %s\n", 904 ip->i_dev, bno + i, fs->fs_fsmnt); 905 panic("blkfree: freeing free frag"); 906 } 907 setbit(cg_blksfree(cgp), bno + i); 908 } 909 cgp->cg_cs.cs_nffree += i; 910 fs->fs_cstotal.cs_nffree += i; 911 fs->fs_cs(fs, cg).cs_nffree += i; 912 /* 913 * add back in counts associated with the new frags 914 */ 915 blk = blkmap(fs, cg_blksfree(cgp), bbase); 916 fragacct(fs, blk, cgp->cg_frsum, 1); 917 /* 918 * if a complete block has been reassembled, account for it 919 */ 920 if (isblock(fs, cg_blksfree(cgp), 921 (daddr_t)fragstoblks(fs, bbase))) { 922 cgp->cg_cs.cs_nffree -= fs->fs_frag; 923 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 924 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 925 cgp->cg_cs.cs_nbfree++; 926 fs->fs_cstotal.cs_nbfree++; 927 fs->fs_cs(fs, cg).cs_nbfree++; 928 i = cbtocylno(fs, bbase); 929 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 930 cg_blktot(cgp)[i]++; 931 } 932 } 933 fs->fs_fmod++; 934 bdwrite(bp); 935 } 936 937 /* 938 * Free an inode. 939 * 940 * The specified inode is placed back in the free map. 941 */ 942 ifree(ip, ino, mode) 943 struct inode *ip; 944 ino_t ino; 945 int mode; 946 { 947 register struct fs *fs; 948 register struct cg *cgp; 949 struct buf *bp; 950 int error, cg; 951 952 fs = ip->i_fs; 953 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) { 954 printf("dev = 0x%x, ino = %d, fs = %s\n", 955 ip->i_dev, ino, fs->fs_fsmnt); 956 panic("ifree: range"); 957 } 958 cg = itog(fs, ino); 959 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 960 (int)fs->fs_cgsize, NOCRED, &bp); 961 if (error) { 962 brelse(bp); 963 return; 964 } 965 cgp = bp->b_un.b_cg; 966 if (!cg_chkmagic(cgp)) { 967 brelse(bp); 968 return; 969 } 970 cgp->cg_time = time.tv_sec; 971 ino %= fs->fs_ipg; 972 if (isclr(cg_inosused(cgp), ino)) { 973 printf("dev = 0x%x, ino = %d, fs = %s\n", 974 ip->i_dev, ino, fs->fs_fsmnt); 975 panic("ifree: freeing free inode"); 976 } 977 clrbit(cg_inosused(cgp), ino); 978 if (ino < cgp->cg_irotor) 979 cgp->cg_irotor = ino; 980 cgp->cg_cs.cs_nifree++; 981 fs->fs_cstotal.cs_nifree++; 982 fs->fs_cs(fs, cg).cs_nifree++; 983 if ((mode & IFMT) == IFDIR) { 984 cgp->cg_cs.cs_ndir--; 985 fs->fs_cstotal.cs_ndir--; 986 fs->fs_cs(fs, cg).cs_ndir--; 987 } 988 fs->fs_fmod++; 989 bdwrite(bp); 990 } 991 992 /* 993 * Find a block of the specified size in the specified cylinder group. 994 * 995 * It is a panic if a request is made to find a block if none are 996 * available. 997 */ 998 daddr_t 999 mapsearch(fs, cgp, bpref, allocsiz) 1000 register struct fs *fs; 1001 register struct cg *cgp; 1002 daddr_t bpref; 1003 int allocsiz; 1004 { 1005 daddr_t bno; 1006 int start, len, loc, i; 1007 int blk, field, subfield, pos; 1008 1009 /* 1010 * find the fragment by searching through the free block 1011 * map for an appropriate bit pattern 1012 */ 1013 if (bpref) 1014 start = dtogd(fs, bpref) / NBBY; 1015 else 1016 start = cgp->cg_frotor / NBBY; 1017 len = howmany(fs->fs_fpg, NBBY) - start; 1018 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start], 1019 (u_char *)fragtbl[fs->fs_frag], 1020 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1021 if (loc == 0) { 1022 len = start + 1; 1023 start = 0; 1024 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0], 1025 (u_char *)fragtbl[fs->fs_frag], 1026 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1027 if (loc == 0) { 1028 printf("start = %d, len = %d, fs = %s\n", 1029 start, len, fs->fs_fsmnt); 1030 panic("alloccg: map corrupted"); 1031 /* NOTREACHED */ 1032 } 1033 } 1034 bno = (start + len - loc) * NBBY; 1035 cgp->cg_frotor = bno; 1036 /* 1037 * found the byte in the map 1038 * sift through the bits to find the selected frag 1039 */ 1040 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1041 blk = blkmap(fs, cg_blksfree(cgp), bno); 1042 blk <<= 1; 1043 field = around[allocsiz]; 1044 subfield = inside[allocsiz]; 1045 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1046 if ((blk & field) == subfield) 1047 return (bno + pos); 1048 field <<= 1; 1049 subfield <<= 1; 1050 } 1051 } 1052 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1053 panic("alloccg: block not in map"); 1054 return (-1); 1055 } 1056 1057 /* 1058 * Check that a specified block number is in range. 1059 */ 1060 badblock(fs, bn) 1061 register struct fs *fs; 1062 daddr_t bn; 1063 { 1064 1065 if ((unsigned)bn >= fs->fs_size) { 1066 printf("bad block %d, ", bn); 1067 fserr(fs, "bad block"); 1068 return (1); 1069 } 1070 return (0); 1071 } 1072 1073 /* 1074 * Fserr prints the name of a file system with an error diagnostic. 1075 * 1076 * The form of the error message is: 1077 * fs: error message 1078 */ 1079 fserr(fs, cp) 1080 struct fs *fs; 1081 char *cp; 1082 { 1083 1084 log(LOG_ERR, "%s: %s\n", fs->fs_fsmnt, cp); 1085 } 1086