1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_alloc.c 7.22 (Berkeley) 09/01/90 8 */ 9 10 #include "param.h" 11 #include "systm.h" 12 #include "buf.h" 13 #include "user.h" 14 #include "vnode.h" 15 #include "kernel.h" 16 #include "syslog.h" 17 #include "cmap.h" 18 #include "../ufs/quota.h" 19 #include "../ufs/inode.h" 20 #include "../ufs/fs.h" 21 22 extern u_long hashalloc(); 23 extern ino_t ialloccg(); 24 extern daddr_t alloccg(); 25 extern daddr_t alloccgblk(); 26 extern daddr_t fragextend(); 27 extern daddr_t blkpref(); 28 extern daddr_t mapsearch(); 29 extern int inside[], around[]; 30 extern unsigned char *fragtbl[]; 31 32 /* 33 * Allocate a block in the file system. 34 * 35 * The size of the requested block is given, which must be some 36 * multiple of fs_fsize and <= fs_bsize. 37 * A preference may be optionally specified. If a preference is given 38 * the following hierarchy is used to allocate a block: 39 * 1) allocate the requested block. 40 * 2) allocate a rotationally optimal block in the same cylinder. 41 * 3) allocate a block in the same cylinder group. 42 * 4) quadradically rehash into other cylinder groups, until an 43 * available block is located. 44 * If no block preference is given the following heirarchy is used 45 * to allocate a block: 46 * 1) allocate a block in the cylinder group that contains the 47 * inode for the file. 48 * 2) quadradically rehash into other cylinder groups, until an 49 * available block is located. 50 */ 51 alloc(ip, lbn, bpref, size, bnp) 52 register struct inode *ip; 53 daddr_t lbn, bpref; 54 int size; 55 daddr_t *bnp; 56 { 57 daddr_t bno; 58 register struct fs *fs; 59 register struct buf *bp; 60 int cg, error; 61 struct ucred *cred = u.u_cred; /* XXX */ 62 63 *bnp = 0; 64 fs = ip->i_fs; 65 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 66 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 67 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 68 panic("alloc: bad size"); 69 } 70 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 71 goto nospace; 72 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 73 goto nospace; 74 #ifdef QUOTA 75 if (error = chkdq(ip, (long)btodb(size), cred, 0)) 76 return (error); 77 #endif 78 if (bpref >= fs->fs_size) 79 bpref = 0; 80 if (bpref == 0) 81 cg = itog(fs, ip->i_number); 82 else 83 cg = dtog(fs, bpref); 84 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, 85 (u_long (*)())alloccg); 86 if (bno > 0) { 87 ip->i_blocks += btodb(size); 88 ip->i_flag |= IUPD|ICHG; 89 *bnp = bno; 90 return (0); 91 } 92 #ifdef QUOTA 93 /* 94 * Restore user's disk quota because allocation failed. 95 */ 96 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 97 #endif 98 nospace: 99 fserr(fs, cred->cr_uid, "file system full"); 100 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 101 return (ENOSPC); 102 } 103 104 /* 105 * Reallocate a fragment to a bigger size 106 * 107 * The number and size of the old block is given, and a preference 108 * and new size is also specified. The allocator attempts to extend 109 * the original block. Failing that, the regular block allocator is 110 * invoked to get an appropriate block. 111 */ 112 realloccg(ip, lbprev, bpref, osize, nsize, bpp) 113 register struct inode *ip; 114 off_t lbprev; 115 daddr_t bpref; 116 int osize, nsize; 117 struct buf **bpp; 118 { 119 register struct fs *fs; 120 struct buf *bp, *obp; 121 int cg, request; 122 daddr_t bprev, bno, bn; 123 int i, error, count; 124 struct ucred *cred = u.u_cred; /* XXX */ 125 126 *bpp = 0; 127 fs = ip->i_fs; 128 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 129 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 130 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 131 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 132 panic("realloccg: bad size"); 133 } 134 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 135 goto nospace; 136 if ((bprev = ip->i_db[lbprev]) == 0) { 137 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 138 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 139 panic("realloccg: bad bprev"); 140 } 141 /* 142 * Allocate the extra space in the buffer. 143 */ 144 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 145 brelse(bp); 146 return (error); 147 } 148 #ifdef QUOTA 149 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) { 150 brelse(bp); 151 return (error); 152 } 153 #endif 154 allocbuf(bp, nsize); 155 bp->b_flags |= B_DONE; 156 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 157 /* 158 * Check for extension in the existing location. 159 */ 160 cg = dtog(fs, bprev); 161 if (bno = fragextend(ip, cg, (long)bprev, osize, nsize)) { 162 if (bp->b_blkno != fsbtodb(fs, bno)) 163 panic("bad blockno"); 164 ip->i_blocks += btodb(nsize - osize); 165 ip->i_flag |= IUPD|ICHG; 166 *bpp = bp; 167 return (0); 168 } 169 /* 170 * Allocate a new disk location. 171 */ 172 if (bpref >= fs->fs_size) 173 bpref = 0; 174 switch ((int)fs->fs_optim) { 175 case FS_OPTSPACE: 176 /* 177 * Allocate an exact sized fragment. Although this makes 178 * best use of space, we will waste time relocating it if 179 * the file continues to grow. If the fragmentation is 180 * less than half of the minimum free reserve, we choose 181 * to begin optimizing for time. 182 */ 183 request = nsize; 184 if (fs->fs_minfree < 5 || 185 fs->fs_cstotal.cs_nffree > 186 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 187 break; 188 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 189 fs->fs_fsmnt); 190 fs->fs_optim = FS_OPTTIME; 191 break; 192 case FS_OPTTIME: 193 /* 194 * At this point we have discovered a file that is trying 195 * to grow a small fragment to a larger fragment. To save 196 * time, we allocate a full sized block, then free the 197 * unused portion. If the file continues to grow, the 198 * `fragextend' call above will be able to grow it in place 199 * without further copying. If aberrant programs cause 200 * disk fragmentation to grow within 2% of the free reserve, 201 * we choose to begin optimizing for space. 202 */ 203 request = fs->fs_bsize; 204 if (fs->fs_cstotal.cs_nffree < 205 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 206 break; 207 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 208 fs->fs_fsmnt); 209 fs->fs_optim = FS_OPTSPACE; 210 break; 211 default: 212 printf("dev = 0x%x, optim = %d, fs = %s\n", 213 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 214 panic("realloccg: bad optim"); 215 /* NOTREACHED */ 216 } 217 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request, 218 (u_long (*)())alloccg); 219 if (bno > 0) { 220 bp->b_blkno = bn = fsbtodb(fs, bno); 221 count = howmany(osize, CLBYTES); 222 for (i = 0; i < count; i++) 223 munhash(ip->i_devvp, bn + i * CLBYTES / DEV_BSIZE); 224 blkfree(ip, bprev, (off_t)osize); 225 if (nsize < request) 226 blkfree(ip, bno + numfrags(fs, nsize), 227 (off_t)(request - nsize)); 228 ip->i_blocks += btodb(nsize - osize); 229 ip->i_flag |= IUPD|ICHG; 230 *bpp = bp; 231 return (0); 232 } 233 #ifdef QUOTA 234 /* 235 * Restore user's disk quota because allocation failed. 236 */ 237 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 238 #endif 239 brelse(bp); 240 nospace: 241 /* 242 * no space available 243 */ 244 fserr(fs, cred->cr_uid, "file system full"); 245 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 246 return (ENOSPC); 247 } 248 249 /* 250 * Allocate an inode in the file system. 251 * 252 * A preference may be optionally specified. If a preference is given 253 * the following hierarchy is used to allocate an inode: 254 * 1) allocate the requested inode. 255 * 2) allocate an inode in the same cylinder group. 256 * 3) quadradically rehash into other cylinder groups, until an 257 * available inode is located. 258 * If no inode preference is given the following heirarchy is used 259 * to allocate an inode: 260 * 1) allocate an inode in cylinder group 0. 261 * 2) quadradically rehash into other cylinder groups, until an 262 * available inode is located. 263 */ 264 ialloc(pip, ipref, mode, cred, ipp) 265 register struct inode *pip; 266 ino_t ipref; 267 int mode; 268 struct ucred *cred; 269 struct inode **ipp; 270 { 271 ino_t ino; 272 register struct fs *fs; 273 register struct inode *ip; 274 int cg, error; 275 276 *ipp = 0; 277 fs = pip->i_fs; 278 if (fs->fs_cstotal.cs_nifree == 0) 279 goto noinodes; 280 if (ipref >= fs->fs_ncg * fs->fs_ipg) 281 ipref = 0; 282 cg = itog(fs, ipref); 283 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 284 if (ino == 0) 285 goto noinodes; 286 error = iget(pip, ino, ipp); 287 if (error) { 288 ifree(pip, ino, mode); 289 return (error); 290 } 291 ip = *ipp; 292 if (ip->i_mode) { 293 printf("mode = 0%o, inum = %d, fs = %s\n", 294 ip->i_mode, ip->i_number, fs->fs_fsmnt); 295 panic("ialloc: dup alloc"); 296 } 297 if (ip->i_blocks) { /* XXX */ 298 printf("free inode %s/%d had %d blocks\n", 299 fs->fs_fsmnt, ino, ip->i_blocks); 300 ip->i_blocks = 0; 301 } 302 ip->i_flags = 0; 303 /* 304 * Set up a new generation number for this inode. 305 */ 306 if (++nextgennumber < (u_long)time.tv_sec) 307 nextgennumber = time.tv_sec; 308 ip->i_gen = nextgennumber; 309 return (0); 310 noinodes: 311 fserr(fs, cred->cr_uid, "out of inodes"); 312 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 313 return (ENOSPC); 314 } 315 316 /* 317 * Find a cylinder to place a directory. 318 * 319 * The policy implemented by this algorithm is to select from 320 * among those cylinder groups with above the average number of 321 * free inodes, the one with the smallest number of directories. 322 */ 323 ino_t 324 dirpref(fs) 325 register struct fs *fs; 326 { 327 int cg, minndir, mincg, avgifree; 328 329 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 330 minndir = fs->fs_ipg; 331 mincg = 0; 332 for (cg = 0; cg < fs->fs_ncg; cg++) 333 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 334 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 335 mincg = cg; 336 minndir = fs->fs_cs(fs, cg).cs_ndir; 337 } 338 return ((ino_t)(fs->fs_ipg * mincg)); 339 } 340 341 /* 342 * Select the desired position for the next block in a file. The file is 343 * logically divided into sections. The first section is composed of the 344 * direct blocks. Each additional section contains fs_maxbpg blocks. 345 * 346 * If no blocks have been allocated in the first section, the policy is to 347 * request a block in the same cylinder group as the inode that describes 348 * the file. If no blocks have been allocated in any other section, the 349 * policy is to place the section in a cylinder group with a greater than 350 * average number of free blocks. An appropriate cylinder group is found 351 * by using a rotor that sweeps the cylinder groups. When a new group of 352 * blocks is needed, the sweep begins in the cylinder group following the 353 * cylinder group from which the previous allocation was made. The sweep 354 * continues until a cylinder group with greater than the average number 355 * of free blocks is found. If the allocation is for the first block in an 356 * indirect block, the information on the previous allocation is unavailable; 357 * here a best guess is made based upon the logical block number being 358 * allocated. 359 * 360 * If a section is already partially allocated, the policy is to 361 * contiguously allocate fs_maxcontig blocks. The end of one of these 362 * contiguous blocks and the beginning of the next is physically separated 363 * so that the disk head will be in transit between them for at least 364 * fs_rotdelay milliseconds. This is to allow time for the processor to 365 * schedule another I/O transfer. 366 */ 367 daddr_t 368 blkpref(ip, lbn, indx, bap) 369 struct inode *ip; 370 daddr_t lbn; 371 int indx; 372 daddr_t *bap; 373 { 374 register struct fs *fs; 375 register int cg; 376 int avgbfree, startcg; 377 daddr_t nextblk; 378 379 fs = ip->i_fs; 380 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 381 if (lbn < NDADDR) { 382 cg = itog(fs, ip->i_number); 383 return (fs->fs_fpg * cg + fs->fs_frag); 384 } 385 /* 386 * Find a cylinder with greater than average number of 387 * unused data blocks. 388 */ 389 if (indx == 0 || bap[indx - 1] == 0) 390 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg; 391 else 392 startcg = dtog(fs, bap[indx - 1]) + 1; 393 startcg %= fs->fs_ncg; 394 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 395 for (cg = startcg; cg < fs->fs_ncg; cg++) 396 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 397 fs->fs_cgrotor = cg; 398 return (fs->fs_fpg * cg + fs->fs_frag); 399 } 400 for (cg = 0; cg <= startcg; cg++) 401 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 402 fs->fs_cgrotor = cg; 403 return (fs->fs_fpg * cg + fs->fs_frag); 404 } 405 return (NULL); 406 } 407 /* 408 * One or more previous blocks have been laid out. If less 409 * than fs_maxcontig previous blocks are contiguous, the 410 * next block is requested contiguously, otherwise it is 411 * requested rotationally delayed by fs_rotdelay milliseconds. 412 */ 413 nextblk = bap[indx - 1] + fs->fs_frag; 414 if (indx > fs->fs_maxcontig && 415 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig) 416 != nextblk) 417 return (nextblk); 418 if (fs->fs_rotdelay != 0) 419 /* 420 * Here we convert ms of delay to frags as: 421 * (frags) = (ms) * (rev/sec) * (sect/rev) / 422 * ((sect/frag) * (ms/sec)) 423 * then round up to the next block. 424 */ 425 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 426 (NSPF(fs) * 1000), fs->fs_frag); 427 return (nextblk); 428 } 429 430 /* 431 * Implement the cylinder overflow algorithm. 432 * 433 * The policy implemented by this algorithm is: 434 * 1) allocate the block in its requested cylinder group. 435 * 2) quadradically rehash on the cylinder group number. 436 * 3) brute force search for a free block. 437 */ 438 /*VARARGS5*/ 439 u_long 440 hashalloc(ip, cg, pref, size, allocator) 441 struct inode *ip; 442 int cg; 443 long pref; 444 int size; /* size for data blocks, mode for inodes */ 445 u_long (*allocator)(); 446 { 447 register struct fs *fs; 448 long result; 449 int i, icg = cg; 450 451 fs = ip->i_fs; 452 /* 453 * 1: preferred cylinder group 454 */ 455 result = (*allocator)(ip, cg, pref, size); 456 if (result) 457 return (result); 458 /* 459 * 2: quadratic rehash 460 */ 461 for (i = 1; i < fs->fs_ncg; i *= 2) { 462 cg += i; 463 if (cg >= fs->fs_ncg) 464 cg -= fs->fs_ncg; 465 result = (*allocator)(ip, cg, 0, size); 466 if (result) 467 return (result); 468 } 469 /* 470 * 3: brute force search 471 * Note that we start at i == 2, since 0 was checked initially, 472 * and 1 is always checked in the quadratic rehash. 473 */ 474 cg = (icg + 2) % fs->fs_ncg; 475 for (i = 2; i < fs->fs_ncg; i++) { 476 result = (*allocator)(ip, cg, 0, size); 477 if (result) 478 return (result); 479 cg++; 480 if (cg == fs->fs_ncg) 481 cg = 0; 482 } 483 return (NULL); 484 } 485 486 /* 487 * Determine whether a fragment can be extended. 488 * 489 * Check to see if the necessary fragments are available, and 490 * if they are, allocate them. 491 */ 492 daddr_t 493 fragextend(ip, cg, bprev, osize, nsize) 494 struct inode *ip; 495 int cg; 496 long bprev; 497 int osize, nsize; 498 { 499 register struct fs *fs; 500 register struct cg *cgp; 501 struct buf *bp; 502 long bno; 503 int frags, bbase; 504 int i, error; 505 506 fs = ip->i_fs; 507 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 508 return (NULL); 509 frags = numfrags(fs, nsize); 510 bbase = fragnum(fs, bprev); 511 if (bbase > fragnum(fs, (bprev + frags - 1))) { 512 /* cannot extend across a block boundary */ 513 return (NULL); 514 } 515 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 516 (int)fs->fs_cgsize, NOCRED, &bp); 517 if (error) { 518 brelse(bp); 519 return (NULL); 520 } 521 cgp = bp->b_un.b_cg; 522 if (!cg_chkmagic(cgp)) { 523 brelse(bp); 524 return (NULL); 525 } 526 cgp->cg_time = time.tv_sec; 527 bno = dtogd(fs, bprev); 528 for (i = numfrags(fs, osize); i < frags; i++) 529 if (isclr(cg_blksfree(cgp), bno + i)) { 530 brelse(bp); 531 return (NULL); 532 } 533 /* 534 * the current fragment can be extended 535 * deduct the count on fragment being extended into 536 * increase the count on the remaining fragment (if any) 537 * allocate the extended piece 538 */ 539 for (i = frags; i < fs->fs_frag - bbase; i++) 540 if (isclr(cg_blksfree(cgp), bno + i)) 541 break; 542 cgp->cg_frsum[i - numfrags(fs, osize)]--; 543 if (i != frags) 544 cgp->cg_frsum[i - frags]++; 545 for (i = numfrags(fs, osize); i < frags; i++) { 546 clrbit(cg_blksfree(cgp), bno + i); 547 cgp->cg_cs.cs_nffree--; 548 fs->fs_cstotal.cs_nffree--; 549 fs->fs_cs(fs, cg).cs_nffree--; 550 } 551 fs->fs_fmod++; 552 bdwrite(bp); 553 return (bprev); 554 } 555 556 /* 557 * Determine whether a block can be allocated. 558 * 559 * Check to see if a block of the apprpriate size is available, 560 * and if it is, allocate it. 561 */ 562 daddr_t 563 alloccg(ip, cg, bpref, size) 564 struct inode *ip; 565 int cg; 566 daddr_t bpref; 567 int size; 568 { 569 register struct fs *fs; 570 register struct cg *cgp; 571 struct buf *bp; 572 register int i; 573 int error, bno, frags, allocsiz; 574 575 fs = ip->i_fs; 576 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 577 return (NULL); 578 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 579 (int)fs->fs_cgsize, NOCRED, &bp); 580 if (error) { 581 brelse(bp); 582 return (NULL); 583 } 584 cgp = bp->b_un.b_cg; 585 if (!cg_chkmagic(cgp) || 586 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 587 brelse(bp); 588 return (NULL); 589 } 590 cgp->cg_time = time.tv_sec; 591 if (size == fs->fs_bsize) { 592 bno = alloccgblk(fs, cgp, bpref); 593 bdwrite(bp); 594 return (bno); 595 } 596 /* 597 * check to see if any fragments are already available 598 * allocsiz is the size which will be allocated, hacking 599 * it down to a smaller size if necessary 600 */ 601 frags = numfrags(fs, size); 602 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 603 if (cgp->cg_frsum[allocsiz] != 0) 604 break; 605 if (allocsiz == fs->fs_frag) { 606 /* 607 * no fragments were available, so a block will be 608 * allocated, and hacked up 609 */ 610 if (cgp->cg_cs.cs_nbfree == 0) { 611 brelse(bp); 612 return (NULL); 613 } 614 bno = alloccgblk(fs, cgp, bpref); 615 bpref = dtogd(fs, bno); 616 for (i = frags; i < fs->fs_frag; i++) 617 setbit(cg_blksfree(cgp), bpref + i); 618 i = fs->fs_frag - frags; 619 cgp->cg_cs.cs_nffree += i; 620 fs->fs_cstotal.cs_nffree += i; 621 fs->fs_cs(fs, cg).cs_nffree += i; 622 fs->fs_fmod++; 623 cgp->cg_frsum[i]++; 624 bdwrite(bp); 625 return (bno); 626 } 627 bno = mapsearch(fs, cgp, bpref, allocsiz); 628 if (bno < 0) { 629 brelse(bp); 630 return (NULL); 631 } 632 for (i = 0; i < frags; i++) 633 clrbit(cg_blksfree(cgp), bno + i); 634 cgp->cg_cs.cs_nffree -= frags; 635 fs->fs_cstotal.cs_nffree -= frags; 636 fs->fs_cs(fs, cg).cs_nffree -= frags; 637 fs->fs_fmod++; 638 cgp->cg_frsum[allocsiz]--; 639 if (frags != allocsiz) 640 cgp->cg_frsum[allocsiz - frags]++; 641 bdwrite(bp); 642 return (cg * fs->fs_fpg + bno); 643 } 644 645 /* 646 * Allocate a block in a cylinder group. 647 * 648 * This algorithm implements the following policy: 649 * 1) allocate the requested block. 650 * 2) allocate a rotationally optimal block in the same cylinder. 651 * 3) allocate the next available block on the block rotor for the 652 * specified cylinder group. 653 * Note that this routine only allocates fs_bsize blocks; these 654 * blocks may be fragmented by the routine that allocates them. 655 */ 656 daddr_t 657 alloccgblk(fs, cgp, bpref) 658 register struct fs *fs; 659 register struct cg *cgp; 660 daddr_t bpref; 661 { 662 daddr_t bno; 663 int cylno, pos, delta; 664 short *cylbp; 665 register int i; 666 667 if (bpref == 0) { 668 bpref = cgp->cg_rotor; 669 goto norot; 670 } 671 bpref = blknum(fs, bpref); 672 bpref = dtogd(fs, bpref); 673 /* 674 * if the requested block is available, use it 675 */ 676 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 677 bno = bpref; 678 goto gotit; 679 } 680 /* 681 * check for a block available on the same cylinder 682 */ 683 cylno = cbtocylno(fs, bpref); 684 if (cg_blktot(cgp)[cylno] == 0) 685 goto norot; 686 if (fs->fs_cpc == 0) { 687 /* 688 * block layout info is not available, so just have 689 * to take any block in this cylinder. 690 */ 691 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 692 goto norot; 693 } 694 /* 695 * check the summary information to see if a block is 696 * available in the requested cylinder starting at the 697 * requested rotational position and proceeding around. 698 */ 699 cylbp = cg_blks(fs, cgp, cylno); 700 pos = cbtorpos(fs, bpref); 701 for (i = pos; i < fs->fs_nrpos; i++) 702 if (cylbp[i] > 0) 703 break; 704 if (i == fs->fs_nrpos) 705 for (i = 0; i < pos; i++) 706 if (cylbp[i] > 0) 707 break; 708 if (cylbp[i] > 0) { 709 /* 710 * found a rotational position, now find the actual 711 * block. A panic if none is actually there. 712 */ 713 pos = cylno % fs->fs_cpc; 714 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 715 if (fs_postbl(fs, pos)[i] == -1) { 716 printf("pos = %d, i = %d, fs = %s\n", 717 pos, i, fs->fs_fsmnt); 718 panic("alloccgblk: cyl groups corrupted"); 719 } 720 for (i = fs_postbl(fs, pos)[i];; ) { 721 if (isblock(fs, cg_blksfree(cgp), bno + i)) { 722 bno = blkstofrags(fs, (bno + i)); 723 goto gotit; 724 } 725 delta = fs_rotbl(fs)[i]; 726 if (delta <= 0 || 727 delta + i > fragstoblks(fs, fs->fs_fpg)) 728 break; 729 i += delta; 730 } 731 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 732 panic("alloccgblk: can't find blk in cyl"); 733 } 734 norot: 735 /* 736 * no blocks in the requested cylinder, so take next 737 * available one in this cylinder group. 738 */ 739 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 740 if (bno < 0) 741 return (NULL); 742 cgp->cg_rotor = bno; 743 gotit: 744 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno)); 745 cgp->cg_cs.cs_nbfree--; 746 fs->fs_cstotal.cs_nbfree--; 747 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 748 cylno = cbtocylno(fs, bno); 749 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 750 cg_blktot(cgp)[cylno]--; 751 fs->fs_fmod++; 752 return (cgp->cg_cgx * fs->fs_fpg + bno); 753 } 754 755 /* 756 * Determine whether an inode can be allocated. 757 * 758 * Check to see if an inode is available, and if it is, 759 * allocate it using the following policy: 760 * 1) allocate the requested inode. 761 * 2) allocate the next available inode after the requested 762 * inode in the specified cylinder group. 763 */ 764 ino_t 765 ialloccg(ip, cg, ipref, mode) 766 struct inode *ip; 767 int cg; 768 daddr_t ipref; 769 int mode; 770 { 771 register struct fs *fs; 772 register struct cg *cgp; 773 struct buf *bp; 774 int error, start, len, loc, map, i; 775 776 fs = ip->i_fs; 777 if (fs->fs_cs(fs, cg).cs_nifree == 0) 778 return (NULL); 779 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 780 (int)fs->fs_cgsize, NOCRED, &bp); 781 if (error) { 782 brelse(bp); 783 return (NULL); 784 } 785 cgp = bp->b_un.b_cg; 786 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 787 brelse(bp); 788 return (NULL); 789 } 790 cgp->cg_time = time.tv_sec; 791 if (ipref) { 792 ipref %= fs->fs_ipg; 793 if (isclr(cg_inosused(cgp), ipref)) 794 goto gotit; 795 } 796 start = cgp->cg_irotor / NBBY; 797 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 798 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 799 if (loc == 0) { 800 len = start + 1; 801 start = 0; 802 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 803 if (loc == 0) { 804 printf("cg = %s, irotor = %d, fs = %s\n", 805 cg, cgp->cg_irotor, fs->fs_fsmnt); 806 panic("ialloccg: map corrupted"); 807 /* NOTREACHED */ 808 } 809 } 810 i = start + len - loc; 811 map = cg_inosused(cgp)[i]; 812 ipref = i * NBBY; 813 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 814 if ((map & i) == 0) { 815 cgp->cg_irotor = ipref; 816 goto gotit; 817 } 818 } 819 printf("fs = %s\n", fs->fs_fsmnt); 820 panic("ialloccg: block not in map"); 821 /* NOTREACHED */ 822 gotit: 823 setbit(cg_inosused(cgp), ipref); 824 cgp->cg_cs.cs_nifree--; 825 fs->fs_cstotal.cs_nifree--; 826 fs->fs_cs(fs, cg).cs_nifree--; 827 fs->fs_fmod++; 828 if ((mode & IFMT) == IFDIR) { 829 cgp->cg_cs.cs_ndir++; 830 fs->fs_cstotal.cs_ndir++; 831 fs->fs_cs(fs, cg).cs_ndir++; 832 } 833 bdwrite(bp); 834 return (cg * fs->fs_ipg + ipref); 835 } 836 837 /* 838 * Free a block or fragment. 839 * 840 * The specified block or fragment is placed back in the 841 * free map. If a fragment is deallocated, a possible 842 * block reassembly is checked. 843 */ 844 blkfree(ip, bno, size) 845 register struct inode *ip; 846 daddr_t bno; 847 off_t size; 848 { 849 register struct fs *fs; 850 register struct cg *cgp; 851 struct buf *bp; 852 int error, cg, blk, frags, bbase; 853 register int i; 854 struct ucred *cred = u.u_cred; /* XXX */ 855 856 fs = ip->i_fs; 857 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 858 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 859 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 860 panic("blkfree: bad size"); 861 } 862 cg = dtog(fs, bno); 863 if ((unsigned)bno >= fs->fs_size) { 864 printf("bad block %d, ino %d\n", bno, ip->i_number); 865 fserr(fs, cred->cr_uid, "bad block"); 866 return; 867 } 868 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 869 (int)fs->fs_cgsize, NOCRED, &bp); 870 if (error) { 871 brelse(bp); 872 return; 873 } 874 cgp = bp->b_un.b_cg; 875 if (!cg_chkmagic(cgp)) { 876 brelse(bp); 877 return; 878 } 879 cgp->cg_time = time.tv_sec; 880 bno = dtogd(fs, bno); 881 if (size == fs->fs_bsize) { 882 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) { 883 printf("dev = 0x%x, block = %d, fs = %s\n", 884 ip->i_dev, bno, fs->fs_fsmnt); 885 panic("blkfree: freeing free block"); 886 } 887 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); 888 cgp->cg_cs.cs_nbfree++; 889 fs->fs_cstotal.cs_nbfree++; 890 fs->fs_cs(fs, cg).cs_nbfree++; 891 i = cbtocylno(fs, bno); 892 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 893 cg_blktot(cgp)[i]++; 894 } else { 895 bbase = bno - fragnum(fs, bno); 896 /* 897 * decrement the counts associated with the old frags 898 */ 899 blk = blkmap(fs, cg_blksfree(cgp), bbase); 900 fragacct(fs, blk, cgp->cg_frsum, -1); 901 /* 902 * deallocate the fragment 903 */ 904 frags = numfrags(fs, size); 905 for (i = 0; i < frags; i++) { 906 if (isset(cg_blksfree(cgp), bno + i)) { 907 printf("dev = 0x%x, block = %d, fs = %s\n", 908 ip->i_dev, bno + i, fs->fs_fsmnt); 909 panic("blkfree: freeing free frag"); 910 } 911 setbit(cg_blksfree(cgp), bno + i); 912 } 913 cgp->cg_cs.cs_nffree += i; 914 fs->fs_cstotal.cs_nffree += i; 915 fs->fs_cs(fs, cg).cs_nffree += i; 916 /* 917 * add back in counts associated with the new frags 918 */ 919 blk = blkmap(fs, cg_blksfree(cgp), bbase); 920 fragacct(fs, blk, cgp->cg_frsum, 1); 921 /* 922 * if a complete block has been reassembled, account for it 923 */ 924 if (isblock(fs, cg_blksfree(cgp), 925 (daddr_t)fragstoblks(fs, bbase))) { 926 cgp->cg_cs.cs_nffree -= fs->fs_frag; 927 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 928 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 929 cgp->cg_cs.cs_nbfree++; 930 fs->fs_cstotal.cs_nbfree++; 931 fs->fs_cs(fs, cg).cs_nbfree++; 932 i = cbtocylno(fs, bbase); 933 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 934 cg_blktot(cgp)[i]++; 935 } 936 } 937 fs->fs_fmod++; 938 bdwrite(bp); 939 } 940 941 /* 942 * Free an inode. 943 * 944 * The specified inode is placed back in the free map. 945 */ 946 ifree(ip, ino, mode) 947 struct inode *ip; 948 ino_t ino; 949 int mode; 950 { 951 register struct fs *fs; 952 register struct cg *cgp; 953 struct buf *bp; 954 int error, cg; 955 956 fs = ip->i_fs; 957 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) { 958 printf("dev = 0x%x, ino = %d, fs = %s\n", 959 ip->i_dev, ino, fs->fs_fsmnt); 960 panic("ifree: range"); 961 } 962 cg = itog(fs, ino); 963 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 964 (int)fs->fs_cgsize, NOCRED, &bp); 965 if (error) { 966 brelse(bp); 967 return; 968 } 969 cgp = bp->b_un.b_cg; 970 if (!cg_chkmagic(cgp)) { 971 brelse(bp); 972 return; 973 } 974 cgp->cg_time = time.tv_sec; 975 ino %= fs->fs_ipg; 976 if (isclr(cg_inosused(cgp), ino)) { 977 printf("dev = 0x%x, ino = %d, fs = %s\n", 978 ip->i_dev, ino, fs->fs_fsmnt); 979 panic("ifree: freeing free inode"); 980 } 981 clrbit(cg_inosused(cgp), ino); 982 if (ino < cgp->cg_irotor) 983 cgp->cg_irotor = ino; 984 cgp->cg_cs.cs_nifree++; 985 fs->fs_cstotal.cs_nifree++; 986 fs->fs_cs(fs, cg).cs_nifree++; 987 if ((mode & IFMT) == IFDIR) { 988 cgp->cg_cs.cs_ndir--; 989 fs->fs_cstotal.cs_ndir--; 990 fs->fs_cs(fs, cg).cs_ndir--; 991 } 992 fs->fs_fmod++; 993 bdwrite(bp); 994 } 995 996 /* 997 * Find a block of the specified size in the specified cylinder group. 998 * 999 * It is a panic if a request is made to find a block if none are 1000 * available. 1001 */ 1002 daddr_t 1003 mapsearch(fs, cgp, bpref, allocsiz) 1004 register struct fs *fs; 1005 register struct cg *cgp; 1006 daddr_t bpref; 1007 int allocsiz; 1008 { 1009 daddr_t bno; 1010 int start, len, loc, i; 1011 int blk, field, subfield, pos; 1012 1013 /* 1014 * find the fragment by searching through the free block 1015 * map for an appropriate bit pattern 1016 */ 1017 if (bpref) 1018 start = dtogd(fs, bpref) / NBBY; 1019 else 1020 start = cgp->cg_frotor / NBBY; 1021 len = howmany(fs->fs_fpg, NBBY) - start; 1022 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start], 1023 (u_char *)fragtbl[fs->fs_frag], 1024 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1025 if (loc == 0) { 1026 len = start + 1; 1027 start = 0; 1028 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0], 1029 (u_char *)fragtbl[fs->fs_frag], 1030 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1031 if (loc == 0) { 1032 printf("start = %d, len = %d, fs = %s\n", 1033 start, len, fs->fs_fsmnt); 1034 panic("alloccg: map corrupted"); 1035 /* NOTREACHED */ 1036 } 1037 } 1038 bno = (start + len - loc) * NBBY; 1039 cgp->cg_frotor = bno; 1040 /* 1041 * found the byte in the map 1042 * sift through the bits to find the selected frag 1043 */ 1044 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1045 blk = blkmap(fs, cg_blksfree(cgp), bno); 1046 blk <<= 1; 1047 field = around[allocsiz]; 1048 subfield = inside[allocsiz]; 1049 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1050 if ((blk & field) == subfield) 1051 return (bno + pos); 1052 field <<= 1; 1053 subfield <<= 1; 1054 } 1055 } 1056 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1057 panic("alloccg: block not in map"); 1058 return (-1); 1059 } 1060 1061 /* 1062 * Fserr prints the name of a file system with an error diagnostic. 1063 * 1064 * The form of the error message is: 1065 * fs: error message 1066 */ 1067 fserr(fs, uid, cp) 1068 struct fs *fs; 1069 uid_t uid; 1070 char *cp; 1071 { 1072 1073 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1074 } 1075