1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_alloc.c 7.20 (Berkeley) 06/28/90 8 */ 9 10 #include "param.h" 11 #include "systm.h" 12 #include "buf.h" 13 #include "user.h" 14 #include "vnode.h" 15 #include "kernel.h" 16 #include "syslog.h" 17 #include "cmap.h" 18 #include "../ufs/quota.h" 19 #include "../ufs/inode.h" 20 #include "../ufs/fs.h" 21 22 extern u_long hashalloc(); 23 extern ino_t ialloccg(); 24 extern daddr_t alloccg(); 25 extern daddr_t alloccgblk(); 26 extern daddr_t fragextend(); 27 extern daddr_t blkpref(); 28 extern daddr_t mapsearch(); 29 extern int inside[], around[]; 30 extern unsigned char *fragtbl[]; 31 32 /* 33 * Allocate a block in the file system. 34 * 35 * The size of the requested block is given, which must be some 36 * multiple of fs_fsize and <= fs_bsize. 37 * A preference may be optionally specified. If a preference is given 38 * the following hierarchy is used to allocate a block: 39 * 1) allocate the requested block. 40 * 2) allocate a rotationally optimal block in the same cylinder. 41 * 3) allocate a block in the same cylinder group. 42 * 4) quadradically rehash into other cylinder groups, until an 43 * available block is located. 44 * If no block preference is given the following heirarchy is used 45 * to allocate a block: 46 * 1) allocate a block in the cylinder group that contains the 47 * inode for the file. 48 * 2) quadradically rehash into other cylinder groups, until an 49 * available block is located. 50 */ 51 alloc(ip, lbn, bpref, size, bnp) 52 register struct inode *ip; 53 daddr_t lbn, bpref; 54 int size; 55 daddr_t *bnp; 56 { 57 daddr_t bno; 58 register struct fs *fs; 59 register struct buf *bp; 60 int cg, error; 61 struct ucred *cred = u.u_cred; /* XXX */ 62 63 *bnp = 0; 64 fs = ip->i_fs; 65 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 66 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 67 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 68 panic("alloc: bad size"); 69 } 70 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 71 goto nospace; 72 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 73 goto nospace; 74 #ifdef QUOTA 75 if (error = chkdq(ip, (long)btodb(size), cred, 0)) 76 return (error); 77 #endif 78 if (bpref >= fs->fs_size) 79 bpref = 0; 80 if (bpref == 0) 81 cg = itog(fs, ip->i_number); 82 else 83 cg = dtog(fs, bpref); 84 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, 85 (u_long (*)())alloccg); 86 if (bno > 0) { 87 ip->i_blocks += btodb(size); 88 ip->i_flag |= IUPD|ICHG; 89 *bnp = bno; 90 return (0); 91 } 92 nospace: 93 fserr(fs, cred->cr_uid, "file system full"); 94 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 95 return (ENOSPC); 96 } 97 98 /* 99 * Reallocate a fragment to a bigger size 100 * 101 * The number and size of the old block is given, and a preference 102 * and new size is also specified. The allocator attempts to extend 103 * the original block. Failing that, the regular block allocator is 104 * invoked to get an appropriate block. 105 */ 106 realloccg(ip, lbprev, bpref, osize, nsize, bpp) 107 register struct inode *ip; 108 off_t lbprev; 109 daddr_t bpref; 110 int osize, nsize; 111 struct buf **bpp; 112 { 113 register struct fs *fs; 114 struct buf *bp, *obp; 115 int cg, request; 116 daddr_t bprev, bno, bn; 117 int i, error, count; 118 struct ucred *cred = u.u_cred; /* XXX */ 119 120 *bpp = 0; 121 fs = ip->i_fs; 122 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 123 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 124 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 125 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 126 panic("realloccg: bad size"); 127 } 128 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 129 goto nospace; 130 if ((bprev = ip->i_db[lbprev]) == 0) { 131 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 132 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 133 panic("realloccg: bad bprev"); 134 } 135 #ifdef QUOTA 136 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) 137 return (error); 138 #endif 139 /* 140 * Allocate the extra space in the buffer. 141 */ 142 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 143 brelse(bp); 144 return (error); 145 } 146 brealloc(bp, nsize); 147 bp->b_flags |= B_DONE; 148 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 149 /* 150 * Check for extension in the existing location. 151 */ 152 cg = dtog(fs, bprev); 153 if (bno = fragextend(ip, cg, (long)bprev, osize, nsize)) { 154 if (bp->b_blkno != fsbtodb(fs, bno)) 155 panic("bad blockno"); 156 ip->i_blocks += btodb(nsize - osize); 157 ip->i_flag |= IUPD|ICHG; 158 *bpp = bp; 159 return (0); 160 } 161 /* 162 * Allocate a new disk location. 163 */ 164 if (bpref >= fs->fs_size) 165 bpref = 0; 166 switch ((int)fs->fs_optim) { 167 case FS_OPTSPACE: 168 /* 169 * Allocate an exact sized fragment. Although this makes 170 * best use of space, we will waste time relocating it if 171 * the file continues to grow. If the fragmentation is 172 * less than half of the minimum free reserve, we choose 173 * to begin optimizing for time. 174 */ 175 request = nsize; 176 if (fs->fs_minfree < 5 || 177 fs->fs_cstotal.cs_nffree > 178 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 179 break; 180 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 181 fs->fs_fsmnt); 182 fs->fs_optim = FS_OPTTIME; 183 break; 184 case FS_OPTTIME: 185 /* 186 * At this point we have discovered a file that is trying 187 * to grow a small fragment to a larger fragment. To save 188 * time, we allocate a full sized block, then free the 189 * unused portion. If the file continues to grow, the 190 * `fragextend' call above will be able to grow it in place 191 * without further copying. If aberrant programs cause 192 * disk fragmentation to grow within 2% of the free reserve, 193 * we choose to begin optimizing for space. 194 */ 195 request = fs->fs_bsize; 196 if (fs->fs_cstotal.cs_nffree < 197 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 198 break; 199 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 200 fs->fs_fsmnt); 201 fs->fs_optim = FS_OPTSPACE; 202 break; 203 default: 204 printf("dev = 0x%x, optim = %d, fs = %s\n", 205 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 206 panic("realloccg: bad optim"); 207 /* NOTREACHED */ 208 } 209 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request, 210 (u_long (*)())alloccg); 211 if (bno > 0) { 212 bp->b_blkno = bn = fsbtodb(fs, bno); 213 count = howmany(osize, CLBYTES); 214 for (i = 0; i < count; i++) 215 munhash(ip->i_devvp, bn + i * CLBYTES / DEV_BSIZE); 216 blkfree(ip, bprev, (off_t)osize); 217 if (nsize < request) 218 blkfree(ip, bno + numfrags(fs, nsize), 219 (off_t)(request - nsize)); 220 ip->i_blocks += btodb(nsize - osize); 221 ip->i_flag |= IUPD|ICHG; 222 *bpp = bp; 223 return (0); 224 } 225 brelse(bp); 226 nospace: 227 /* 228 * no space available 229 */ 230 fserr(fs, cred->cr_uid, "file system full"); 231 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 232 return (ENOSPC); 233 } 234 235 /* 236 * Allocate an inode in the file system. 237 * 238 * A preference may be optionally specified. If a preference is given 239 * the following hierarchy is used to allocate an inode: 240 * 1) allocate the requested inode. 241 * 2) allocate an inode in the same cylinder group. 242 * 3) quadradically rehash into other cylinder groups, until an 243 * available inode is located. 244 * If no inode preference is given the following heirarchy is used 245 * to allocate an inode: 246 * 1) allocate an inode in cylinder group 0. 247 * 2) quadradically rehash into other cylinder groups, until an 248 * available inode is located. 249 */ 250 ialloc(pip, ipref, mode, cred, ipp) 251 register struct inode *pip; 252 ino_t ipref; 253 int mode; 254 struct ucred *cred; 255 struct inode **ipp; 256 { 257 ino_t ino; 258 register struct fs *fs; 259 register struct inode *ip; 260 int cg, error; 261 262 *ipp = 0; 263 fs = pip->i_fs; 264 if (fs->fs_cstotal.cs_nifree == 0) 265 goto noinodes; 266 if (ipref >= fs->fs_ncg * fs->fs_ipg) 267 ipref = 0; 268 cg = itog(fs, ipref); 269 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 270 if (ino == 0) 271 goto noinodes; 272 error = iget(pip, ino, ipp); 273 if (error) { 274 ifree(pip, ino, mode); 275 return (error); 276 } 277 ip = *ipp; 278 if (ip->i_mode) { 279 printf("mode = 0%o, inum = %d, fs = %s\n", 280 ip->i_mode, ip->i_number, fs->fs_fsmnt); 281 panic("ialloc: dup alloc"); 282 } 283 if (ip->i_blocks) { /* XXX */ 284 printf("free inode %s/%d had %d blocks\n", 285 fs->fs_fsmnt, ino, ip->i_blocks); 286 ip->i_blocks = 0; 287 } 288 ip->i_flags = 0; 289 /* 290 * Set up a new generation number for this inode. 291 */ 292 if (++nextgennumber < (u_long)time.tv_sec) 293 nextgennumber = time.tv_sec; 294 ip->i_gen = nextgennumber; 295 return (0); 296 noinodes: 297 fserr(fs, cred->cr_uid, "out of inodes"); 298 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 299 return (ENOSPC); 300 } 301 302 /* 303 * Find a cylinder to place a directory. 304 * 305 * The policy implemented by this algorithm is to select from 306 * among those cylinder groups with above the average number of 307 * free inodes, the one with the smallest number of directories. 308 */ 309 ino_t 310 dirpref(fs) 311 register struct fs *fs; 312 { 313 int cg, minndir, mincg, avgifree; 314 315 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 316 minndir = fs->fs_ipg; 317 mincg = 0; 318 for (cg = 0; cg < fs->fs_ncg; cg++) 319 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 320 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 321 mincg = cg; 322 minndir = fs->fs_cs(fs, cg).cs_ndir; 323 } 324 return ((ino_t)(fs->fs_ipg * mincg)); 325 } 326 327 /* 328 * Select the desired position for the next block in a file. The file is 329 * logically divided into sections. The first section is composed of the 330 * direct blocks. Each additional section contains fs_maxbpg blocks. 331 * 332 * If no blocks have been allocated in the first section, the policy is to 333 * request a block in the same cylinder group as the inode that describes 334 * the file. If no blocks have been allocated in any other section, the 335 * policy is to place the section in a cylinder group with a greater than 336 * average number of free blocks. An appropriate cylinder group is found 337 * by using a rotor that sweeps the cylinder groups. When a new group of 338 * blocks is needed, the sweep begins in the cylinder group following the 339 * cylinder group from which the previous allocation was made. The sweep 340 * continues until a cylinder group with greater than the average number 341 * of free blocks is found. If the allocation is for the first block in an 342 * indirect block, the information on the previous allocation is unavailable; 343 * here a best guess is made based upon the logical block number being 344 * allocated. 345 * 346 * If a section is already partially allocated, the policy is to 347 * contiguously allocate fs_maxcontig blocks. The end of one of these 348 * contiguous blocks and the beginning of the next is physically separated 349 * so that the disk head will be in transit between them for at least 350 * fs_rotdelay milliseconds. This is to allow time for the processor to 351 * schedule another I/O transfer. 352 */ 353 daddr_t 354 blkpref(ip, lbn, indx, bap) 355 struct inode *ip; 356 daddr_t lbn; 357 int indx; 358 daddr_t *bap; 359 { 360 register struct fs *fs; 361 register int cg; 362 int avgbfree, startcg; 363 daddr_t nextblk; 364 365 fs = ip->i_fs; 366 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 367 if (lbn < NDADDR) { 368 cg = itog(fs, ip->i_number); 369 return (fs->fs_fpg * cg + fs->fs_frag); 370 } 371 /* 372 * Find a cylinder with greater than average number of 373 * unused data blocks. 374 */ 375 if (indx == 0 || bap[indx - 1] == 0) 376 startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg; 377 else 378 startcg = dtog(fs, bap[indx - 1]) + 1; 379 startcg %= fs->fs_ncg; 380 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 381 for (cg = startcg; cg < fs->fs_ncg; cg++) 382 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 383 fs->fs_cgrotor = cg; 384 return (fs->fs_fpg * cg + fs->fs_frag); 385 } 386 for (cg = 0; cg <= startcg; cg++) 387 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 388 fs->fs_cgrotor = cg; 389 return (fs->fs_fpg * cg + fs->fs_frag); 390 } 391 return (NULL); 392 } 393 /* 394 * One or more previous blocks have been laid out. If less 395 * than fs_maxcontig previous blocks are contiguous, the 396 * next block is requested contiguously, otherwise it is 397 * requested rotationally delayed by fs_rotdelay milliseconds. 398 */ 399 nextblk = bap[indx - 1] + fs->fs_frag; 400 if (indx > fs->fs_maxcontig && 401 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig) 402 != nextblk) 403 return (nextblk); 404 if (fs->fs_rotdelay != 0) 405 /* 406 * Here we convert ms of delay to frags as: 407 * (frags) = (ms) * (rev/sec) * (sect/rev) / 408 * ((sect/frag) * (ms/sec)) 409 * then round up to the next block. 410 */ 411 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 412 (NSPF(fs) * 1000), fs->fs_frag); 413 return (nextblk); 414 } 415 416 /* 417 * Implement the cylinder overflow algorithm. 418 * 419 * The policy implemented by this algorithm is: 420 * 1) allocate the block in its requested cylinder group. 421 * 2) quadradically rehash on the cylinder group number. 422 * 3) brute force search for a free block. 423 */ 424 /*VARARGS5*/ 425 u_long 426 hashalloc(ip, cg, pref, size, allocator) 427 struct inode *ip; 428 int cg; 429 long pref; 430 int size; /* size for data blocks, mode for inodes */ 431 u_long (*allocator)(); 432 { 433 register struct fs *fs; 434 long result; 435 int i, icg = cg; 436 437 fs = ip->i_fs; 438 /* 439 * 1: preferred cylinder group 440 */ 441 result = (*allocator)(ip, cg, pref, size); 442 if (result) 443 return (result); 444 /* 445 * 2: quadratic rehash 446 */ 447 for (i = 1; i < fs->fs_ncg; i *= 2) { 448 cg += i; 449 if (cg >= fs->fs_ncg) 450 cg -= fs->fs_ncg; 451 result = (*allocator)(ip, cg, 0, size); 452 if (result) 453 return (result); 454 } 455 /* 456 * 3: brute force search 457 * Note that we start at i == 2, since 0 was checked initially, 458 * and 1 is always checked in the quadratic rehash. 459 */ 460 cg = (icg + 2) % fs->fs_ncg; 461 for (i = 2; i < fs->fs_ncg; i++) { 462 result = (*allocator)(ip, cg, 0, size); 463 if (result) 464 return (result); 465 cg++; 466 if (cg == fs->fs_ncg) 467 cg = 0; 468 } 469 return (NULL); 470 } 471 472 /* 473 * Determine whether a fragment can be extended. 474 * 475 * Check to see if the necessary fragments are available, and 476 * if they are, allocate them. 477 */ 478 daddr_t 479 fragextend(ip, cg, bprev, osize, nsize) 480 struct inode *ip; 481 int cg; 482 long bprev; 483 int osize, nsize; 484 { 485 register struct fs *fs; 486 register struct cg *cgp; 487 struct buf *bp; 488 long bno; 489 int frags, bbase; 490 int i, error; 491 492 fs = ip->i_fs; 493 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 494 return (NULL); 495 frags = numfrags(fs, nsize); 496 bbase = fragnum(fs, bprev); 497 if (bbase > fragnum(fs, (bprev + frags - 1))) { 498 /* cannot extend across a block boundary */ 499 return (NULL); 500 } 501 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 502 (int)fs->fs_cgsize, NOCRED, &bp); 503 if (error) { 504 brelse(bp); 505 return (NULL); 506 } 507 cgp = bp->b_un.b_cg; 508 if (!cg_chkmagic(cgp)) { 509 brelse(bp); 510 return (NULL); 511 } 512 cgp->cg_time = time.tv_sec; 513 bno = dtogd(fs, bprev); 514 for (i = numfrags(fs, osize); i < frags; i++) 515 if (isclr(cg_blksfree(cgp), bno + i)) { 516 brelse(bp); 517 return (NULL); 518 } 519 /* 520 * the current fragment can be extended 521 * deduct the count on fragment being extended into 522 * increase the count on the remaining fragment (if any) 523 * allocate the extended piece 524 */ 525 for (i = frags; i < fs->fs_frag - bbase; i++) 526 if (isclr(cg_blksfree(cgp), bno + i)) 527 break; 528 cgp->cg_frsum[i - numfrags(fs, osize)]--; 529 if (i != frags) 530 cgp->cg_frsum[i - frags]++; 531 for (i = numfrags(fs, osize); i < frags; i++) { 532 clrbit(cg_blksfree(cgp), bno + i); 533 cgp->cg_cs.cs_nffree--; 534 fs->fs_cstotal.cs_nffree--; 535 fs->fs_cs(fs, cg).cs_nffree--; 536 } 537 fs->fs_fmod++; 538 bdwrite(bp); 539 return (bprev); 540 } 541 542 /* 543 * Determine whether a block can be allocated. 544 * 545 * Check to see if a block of the apprpriate size is available, 546 * and if it is, allocate it. 547 */ 548 daddr_t 549 alloccg(ip, cg, bpref, size) 550 struct inode *ip; 551 int cg; 552 daddr_t bpref; 553 int size; 554 { 555 register struct fs *fs; 556 register struct cg *cgp; 557 struct buf *bp; 558 register int i; 559 int error, bno, frags, allocsiz; 560 561 fs = ip->i_fs; 562 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 563 return (NULL); 564 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 565 (int)fs->fs_cgsize, NOCRED, &bp); 566 if (error) { 567 brelse(bp); 568 return (NULL); 569 } 570 cgp = bp->b_un.b_cg; 571 if (!cg_chkmagic(cgp) || 572 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 573 brelse(bp); 574 return (NULL); 575 } 576 cgp->cg_time = time.tv_sec; 577 if (size == fs->fs_bsize) { 578 bno = alloccgblk(fs, cgp, bpref); 579 bdwrite(bp); 580 return (bno); 581 } 582 /* 583 * check to see if any fragments are already available 584 * allocsiz is the size which will be allocated, hacking 585 * it down to a smaller size if necessary 586 */ 587 frags = numfrags(fs, size); 588 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 589 if (cgp->cg_frsum[allocsiz] != 0) 590 break; 591 if (allocsiz == fs->fs_frag) { 592 /* 593 * no fragments were available, so a block will be 594 * allocated, and hacked up 595 */ 596 if (cgp->cg_cs.cs_nbfree == 0) { 597 brelse(bp); 598 return (NULL); 599 } 600 bno = alloccgblk(fs, cgp, bpref); 601 bpref = dtogd(fs, bno); 602 for (i = frags; i < fs->fs_frag; i++) 603 setbit(cg_blksfree(cgp), bpref + i); 604 i = fs->fs_frag - frags; 605 cgp->cg_cs.cs_nffree += i; 606 fs->fs_cstotal.cs_nffree += i; 607 fs->fs_cs(fs, cg).cs_nffree += i; 608 fs->fs_fmod++; 609 cgp->cg_frsum[i]++; 610 bdwrite(bp); 611 return (bno); 612 } 613 bno = mapsearch(fs, cgp, bpref, allocsiz); 614 if (bno < 0) { 615 brelse(bp); 616 return (NULL); 617 } 618 for (i = 0; i < frags; i++) 619 clrbit(cg_blksfree(cgp), bno + i); 620 cgp->cg_cs.cs_nffree -= frags; 621 fs->fs_cstotal.cs_nffree -= frags; 622 fs->fs_cs(fs, cg).cs_nffree -= frags; 623 fs->fs_fmod++; 624 cgp->cg_frsum[allocsiz]--; 625 if (frags != allocsiz) 626 cgp->cg_frsum[allocsiz - frags]++; 627 bdwrite(bp); 628 return (cg * fs->fs_fpg + bno); 629 } 630 631 /* 632 * Allocate a block in a cylinder group. 633 * 634 * This algorithm implements the following policy: 635 * 1) allocate the requested block. 636 * 2) allocate a rotationally optimal block in the same cylinder. 637 * 3) allocate the next available block on the block rotor for the 638 * specified cylinder group. 639 * Note that this routine only allocates fs_bsize blocks; these 640 * blocks may be fragmented by the routine that allocates them. 641 */ 642 daddr_t 643 alloccgblk(fs, cgp, bpref) 644 register struct fs *fs; 645 register struct cg *cgp; 646 daddr_t bpref; 647 { 648 daddr_t bno; 649 int cylno, pos, delta; 650 short *cylbp; 651 register int i; 652 653 if (bpref == 0) { 654 bpref = cgp->cg_rotor; 655 goto norot; 656 } 657 bpref = blknum(fs, bpref); 658 bpref = dtogd(fs, bpref); 659 /* 660 * if the requested block is available, use it 661 */ 662 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 663 bno = bpref; 664 goto gotit; 665 } 666 /* 667 * check for a block available on the same cylinder 668 */ 669 cylno = cbtocylno(fs, bpref); 670 if (cg_blktot(cgp)[cylno] == 0) 671 goto norot; 672 if (fs->fs_cpc == 0) { 673 /* 674 * block layout info is not available, so just have 675 * to take any block in this cylinder. 676 */ 677 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 678 goto norot; 679 } 680 /* 681 * check the summary information to see if a block is 682 * available in the requested cylinder starting at the 683 * requested rotational position and proceeding around. 684 */ 685 cylbp = cg_blks(fs, cgp, cylno); 686 pos = cbtorpos(fs, bpref); 687 for (i = pos; i < fs->fs_nrpos; i++) 688 if (cylbp[i] > 0) 689 break; 690 if (i == fs->fs_nrpos) 691 for (i = 0; i < pos; i++) 692 if (cylbp[i] > 0) 693 break; 694 if (cylbp[i] > 0) { 695 /* 696 * found a rotational position, now find the actual 697 * block. A panic if none is actually there. 698 */ 699 pos = cylno % fs->fs_cpc; 700 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 701 if (fs_postbl(fs, pos)[i] == -1) { 702 printf("pos = %d, i = %d, fs = %s\n", 703 pos, i, fs->fs_fsmnt); 704 panic("alloccgblk: cyl groups corrupted"); 705 } 706 for (i = fs_postbl(fs, pos)[i];; ) { 707 if (isblock(fs, cg_blksfree(cgp), bno + i)) { 708 bno = blkstofrags(fs, (bno + i)); 709 goto gotit; 710 } 711 delta = fs_rotbl(fs)[i]; 712 if (delta <= 0 || 713 delta + i > fragstoblks(fs, fs->fs_fpg)) 714 break; 715 i += delta; 716 } 717 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 718 panic("alloccgblk: can't find blk in cyl"); 719 } 720 norot: 721 /* 722 * no blocks in the requested cylinder, so take next 723 * available one in this cylinder group. 724 */ 725 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 726 if (bno < 0) 727 return (NULL); 728 cgp->cg_rotor = bno; 729 gotit: 730 clrblock(fs, cg_blksfree(cgp), (long)fragstoblks(fs, bno)); 731 cgp->cg_cs.cs_nbfree--; 732 fs->fs_cstotal.cs_nbfree--; 733 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 734 cylno = cbtocylno(fs, bno); 735 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 736 cg_blktot(cgp)[cylno]--; 737 fs->fs_fmod++; 738 return (cgp->cg_cgx * fs->fs_fpg + bno); 739 } 740 741 /* 742 * Determine whether an inode can be allocated. 743 * 744 * Check to see if an inode is available, and if it is, 745 * allocate it using the following policy: 746 * 1) allocate the requested inode. 747 * 2) allocate the next available inode after the requested 748 * inode in the specified cylinder group. 749 */ 750 ino_t 751 ialloccg(ip, cg, ipref, mode) 752 struct inode *ip; 753 int cg; 754 daddr_t ipref; 755 int mode; 756 { 757 register struct fs *fs; 758 register struct cg *cgp; 759 struct buf *bp; 760 int error, start, len, loc, map, i; 761 762 fs = ip->i_fs; 763 if (fs->fs_cs(fs, cg).cs_nifree == 0) 764 return (NULL); 765 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 766 (int)fs->fs_cgsize, NOCRED, &bp); 767 if (error) { 768 brelse(bp); 769 return (NULL); 770 } 771 cgp = bp->b_un.b_cg; 772 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 773 brelse(bp); 774 return (NULL); 775 } 776 cgp->cg_time = time.tv_sec; 777 if (ipref) { 778 ipref %= fs->fs_ipg; 779 if (isclr(cg_inosused(cgp), ipref)) 780 goto gotit; 781 } 782 start = cgp->cg_irotor / NBBY; 783 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 784 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 785 if (loc == 0) { 786 len = start + 1; 787 start = 0; 788 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 789 if (loc == 0) { 790 printf("cg = %s, irotor = %d, fs = %s\n", 791 cg, cgp->cg_irotor, fs->fs_fsmnt); 792 panic("ialloccg: map corrupted"); 793 /* NOTREACHED */ 794 } 795 } 796 i = start + len - loc; 797 map = cg_inosused(cgp)[i]; 798 ipref = i * NBBY; 799 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 800 if ((map & i) == 0) { 801 cgp->cg_irotor = ipref; 802 goto gotit; 803 } 804 } 805 printf("fs = %s\n", fs->fs_fsmnt); 806 panic("ialloccg: block not in map"); 807 /* NOTREACHED */ 808 gotit: 809 setbit(cg_inosused(cgp), ipref); 810 cgp->cg_cs.cs_nifree--; 811 fs->fs_cstotal.cs_nifree--; 812 fs->fs_cs(fs, cg).cs_nifree--; 813 fs->fs_fmod++; 814 if ((mode & IFMT) == IFDIR) { 815 cgp->cg_cs.cs_ndir++; 816 fs->fs_cstotal.cs_ndir++; 817 fs->fs_cs(fs, cg).cs_ndir++; 818 } 819 bdwrite(bp); 820 return (cg * fs->fs_ipg + ipref); 821 } 822 823 /* 824 * Free a block or fragment. 825 * 826 * The specified block or fragment is placed back in the 827 * free map. If a fragment is deallocated, a possible 828 * block reassembly is checked. 829 */ 830 blkfree(ip, bno, size) 831 register struct inode *ip; 832 daddr_t bno; 833 off_t size; 834 { 835 register struct fs *fs; 836 register struct cg *cgp; 837 struct buf *bp; 838 int error, cg, blk, frags, bbase; 839 register int i; 840 struct ucred *cred = u.u_cred; /* XXX */ 841 842 fs = ip->i_fs; 843 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 844 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 845 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 846 panic("blkfree: bad size"); 847 } 848 cg = dtog(fs, bno); 849 if ((unsigned)bno >= fs->fs_size) { 850 printf("bad block %d, ino %d\n", bno, ip->i_number); 851 fserr(fs, cred->cr_uid, "bad block"); 852 return; 853 } 854 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 855 (int)fs->fs_cgsize, NOCRED, &bp); 856 if (error) { 857 brelse(bp); 858 return; 859 } 860 cgp = bp->b_un.b_cg; 861 if (!cg_chkmagic(cgp)) { 862 brelse(bp); 863 return; 864 } 865 cgp->cg_time = time.tv_sec; 866 bno = dtogd(fs, bno); 867 if (size == fs->fs_bsize) { 868 if (isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno))) { 869 printf("dev = 0x%x, block = %d, fs = %s\n", 870 ip->i_dev, bno, fs->fs_fsmnt); 871 panic("blkfree: freeing free block"); 872 } 873 setblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); 874 cgp->cg_cs.cs_nbfree++; 875 fs->fs_cstotal.cs_nbfree++; 876 fs->fs_cs(fs, cg).cs_nbfree++; 877 i = cbtocylno(fs, bno); 878 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 879 cg_blktot(cgp)[i]++; 880 } else { 881 bbase = bno - fragnum(fs, bno); 882 /* 883 * decrement the counts associated with the old frags 884 */ 885 blk = blkmap(fs, cg_blksfree(cgp), bbase); 886 fragacct(fs, blk, cgp->cg_frsum, -1); 887 /* 888 * deallocate the fragment 889 */ 890 frags = numfrags(fs, size); 891 for (i = 0; i < frags; i++) { 892 if (isset(cg_blksfree(cgp), bno + i)) { 893 printf("dev = 0x%x, block = %d, fs = %s\n", 894 ip->i_dev, bno + i, fs->fs_fsmnt); 895 panic("blkfree: freeing free frag"); 896 } 897 setbit(cg_blksfree(cgp), bno + i); 898 } 899 cgp->cg_cs.cs_nffree += i; 900 fs->fs_cstotal.cs_nffree += i; 901 fs->fs_cs(fs, cg).cs_nffree += i; 902 /* 903 * add back in counts associated with the new frags 904 */ 905 blk = blkmap(fs, cg_blksfree(cgp), bbase); 906 fragacct(fs, blk, cgp->cg_frsum, 1); 907 /* 908 * if a complete block has been reassembled, account for it 909 */ 910 if (isblock(fs, cg_blksfree(cgp), 911 (daddr_t)fragstoblks(fs, bbase))) { 912 cgp->cg_cs.cs_nffree -= fs->fs_frag; 913 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 914 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 915 cgp->cg_cs.cs_nbfree++; 916 fs->fs_cstotal.cs_nbfree++; 917 fs->fs_cs(fs, cg).cs_nbfree++; 918 i = cbtocylno(fs, bbase); 919 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 920 cg_blktot(cgp)[i]++; 921 } 922 } 923 fs->fs_fmod++; 924 bdwrite(bp); 925 } 926 927 /* 928 * Free an inode. 929 * 930 * The specified inode is placed back in the free map. 931 */ 932 ifree(ip, ino, mode) 933 struct inode *ip; 934 ino_t ino; 935 int mode; 936 { 937 register struct fs *fs; 938 register struct cg *cgp; 939 struct buf *bp; 940 int error, cg; 941 942 fs = ip->i_fs; 943 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) { 944 printf("dev = 0x%x, ino = %d, fs = %s\n", 945 ip->i_dev, ino, fs->fs_fsmnt); 946 panic("ifree: range"); 947 } 948 cg = itog(fs, ino); 949 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 950 (int)fs->fs_cgsize, NOCRED, &bp); 951 if (error) { 952 brelse(bp); 953 return; 954 } 955 cgp = bp->b_un.b_cg; 956 if (!cg_chkmagic(cgp)) { 957 brelse(bp); 958 return; 959 } 960 cgp->cg_time = time.tv_sec; 961 ino %= fs->fs_ipg; 962 if (isclr(cg_inosused(cgp), ino)) { 963 printf("dev = 0x%x, ino = %d, fs = %s\n", 964 ip->i_dev, ino, fs->fs_fsmnt); 965 panic("ifree: freeing free inode"); 966 } 967 clrbit(cg_inosused(cgp), ino); 968 if (ino < cgp->cg_irotor) 969 cgp->cg_irotor = ino; 970 cgp->cg_cs.cs_nifree++; 971 fs->fs_cstotal.cs_nifree++; 972 fs->fs_cs(fs, cg).cs_nifree++; 973 if ((mode & IFMT) == IFDIR) { 974 cgp->cg_cs.cs_ndir--; 975 fs->fs_cstotal.cs_ndir--; 976 fs->fs_cs(fs, cg).cs_ndir--; 977 } 978 fs->fs_fmod++; 979 bdwrite(bp); 980 } 981 982 /* 983 * Find a block of the specified size in the specified cylinder group. 984 * 985 * It is a panic if a request is made to find a block if none are 986 * available. 987 */ 988 daddr_t 989 mapsearch(fs, cgp, bpref, allocsiz) 990 register struct fs *fs; 991 register struct cg *cgp; 992 daddr_t bpref; 993 int allocsiz; 994 { 995 daddr_t bno; 996 int start, len, loc, i; 997 int blk, field, subfield, pos; 998 999 /* 1000 * find the fragment by searching through the free block 1001 * map for an appropriate bit pattern 1002 */ 1003 if (bpref) 1004 start = dtogd(fs, bpref) / NBBY; 1005 else 1006 start = cgp->cg_frotor / NBBY; 1007 len = howmany(fs->fs_fpg, NBBY) - start; 1008 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[start], 1009 (u_char *)fragtbl[fs->fs_frag], 1010 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1011 if (loc == 0) { 1012 len = start + 1; 1013 start = 0; 1014 loc = scanc((unsigned)len, (u_char *)&cg_blksfree(cgp)[0], 1015 (u_char *)fragtbl[fs->fs_frag], 1016 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1017 if (loc == 0) { 1018 printf("start = %d, len = %d, fs = %s\n", 1019 start, len, fs->fs_fsmnt); 1020 panic("alloccg: map corrupted"); 1021 /* NOTREACHED */ 1022 } 1023 } 1024 bno = (start + len - loc) * NBBY; 1025 cgp->cg_frotor = bno; 1026 /* 1027 * found the byte in the map 1028 * sift through the bits to find the selected frag 1029 */ 1030 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1031 blk = blkmap(fs, cg_blksfree(cgp), bno); 1032 blk <<= 1; 1033 field = around[allocsiz]; 1034 subfield = inside[allocsiz]; 1035 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1036 if ((blk & field) == subfield) 1037 return (bno + pos); 1038 field <<= 1; 1039 subfield <<= 1; 1040 } 1041 } 1042 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1043 panic("alloccg: block not in map"); 1044 return (-1); 1045 } 1046 1047 /* 1048 * Fserr prints the name of a file system with an error diagnostic. 1049 * 1050 * The form of the error message is: 1051 * fs: error message 1052 */ 1053 fserr(fs, uid, cp) 1054 struct fs *fs; 1055 uid_t uid; 1056 char *cp; 1057 { 1058 1059 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1060 } 1061