1 /*- 2 * modified for Lites 1.1 3 * 4 * Aug 1995, Godmar Back (gback@cs.utah.edu) 5 * University of Utah, Department of Computer Science 6 */ 7 /*- 8 * Copyright (c) 1982, 1986, 1989, 1993 9 * The Regents of the University of California. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 36 * $FreeBSD$ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/conf.h> 42 #include <sys/vnode.h> 43 #include <sys/stat.h> 44 #include <sys/mount.h> 45 #include <sys/sysctl.h> 46 #include <sys/syslog.h> 47 #include <sys/buf.h> 48 #include <sys/endian.h> 49 50 #include <fs/ext2fs/fs.h> 51 #include <fs/ext2fs/inode.h> 52 #include <fs/ext2fs/ext2_mount.h> 53 #include <fs/ext2fs/ext2fs.h> 54 #include <fs/ext2fs/ext2_extern.h> 55 56 static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int); 57 static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int); 58 static u_long ext2_dirpref(struct inode *); 59 static void ext2_fserr(struct m_ext2fs *, uid_t, char *); 60 static u_long ext2_hashalloc(struct inode *, int, long, int, 61 daddr_t (*)(struct inode *, int, daddr_t, 62 int)); 63 static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int); 64 static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t); 65 66 /* 67 * Allocate a block in the filesystem. 68 * 69 * A preference may be optionally specified. If a preference is given 70 * the following hierarchy is used to allocate a block: 71 * 1) allocate the requested block. 72 * 2) allocate a rotationally optimal block in the same cylinder. 73 * 3) allocate a block in the same cylinder group. 74 * 4) quadradically rehash into other cylinder groups, until an 75 * available block is located. 76 * If no block preference is given the following hierarchy is used 77 * to allocate a block: 78 * 1) allocate a block in the cylinder group that contains the 79 * inode for the file. 80 * 2) quadradically rehash into other cylinder groups, until an 81 * available block is located. 82 */ 83 int 84 ext2_alloc(struct inode *ip, daddr_t lbn, e4fs_daddr_t bpref, int size, 85 struct ucred *cred, e4fs_daddr_t *bnp) 86 { 87 struct m_ext2fs *fs; 88 struct ext2mount *ump; 89 int32_t bno; 90 int cg; 91 92 *bnp = 0; 93 fs = ip->i_e2fs; 94 ump = ip->i_ump; 95 mtx_assert(EXT2_MTX(ump), MA_OWNED); 96 #ifdef INVARIANTS 97 if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) { 98 vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n", 99 (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt); 100 panic("ext2_alloc: bad size"); 101 } 102 if (cred == NOCRED) 103 panic("ext2_alloc: missing credential"); 104 #endif /* INVARIANTS */ 105 if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0) 106 goto nospace; 107 if (cred->cr_uid != 0 && 108 fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount) 109 goto nospace; 110 if (bpref >= fs->e2fs->e2fs_bcount) 111 bpref = 0; 112 if (bpref == 0) 113 cg = ino_to_cg(fs, ip->i_number); 114 else 115 cg = dtog(fs, bpref); 116 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize, 117 ext2_alloccg); 118 if (bno > 0) { 119 /* set next_alloc fields as done in block_getblk */ 120 ip->i_next_alloc_block = lbn; 121 ip->i_next_alloc_goal = bno; 122 123 ip->i_blocks += btodb(fs->e2fs_bsize); 124 ip->i_flag |= IN_CHANGE | IN_UPDATE; 125 *bnp = bno; 126 return (0); 127 } 128 nospace: 129 EXT2_UNLOCK(ump); 130 ext2_fserr(fs, cred->cr_uid, "filesystem full"); 131 uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt); 132 return (ENOSPC); 133 } 134 135 /* 136 * Allocate EA's block for inode. 137 */ 138 daddr_t 139 ext2_allocfacl(struct inode *ip) 140 { 141 struct m_ext2fs *fs; 142 daddr_t facl; 143 144 fs = ip->i_e2fs; 145 146 EXT2_LOCK(ip->i_ump); 147 facl = ext2_alloccg(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize); 148 if (0 == facl) 149 EXT2_UNLOCK(ip->i_ump); 150 151 return (facl); 152 } 153 154 /* 155 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 156 * 157 * The vnode and an array of buffer pointers for a range of sequential 158 * logical blocks to be made contiguous is given. The allocator attempts 159 * to find a range of sequential blocks starting as close as possible to 160 * an fs_rotdelay offset from the end of the allocation for the logical 161 * block immediately preceding the current range. If successful, the 162 * physical block numbers in the buffer pointers and in the inode are 163 * changed to reflect the new allocation. If unsuccessful, the allocation 164 * is left unchanged. The success in doing the reallocation is returned. 165 * Note that the error return is not reflected back to the user. Rather 166 * the previous block allocation will be used. 167 */ 168 169 static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem"); 170 171 static int doasyncfree = 1; 172 173 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, 174 "Use asychronous writes to update block pointers when freeing blocks"); 175 176 static int doreallocblks = 1; 177 178 SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 179 180 int 181 ext2_reallocblks(struct vop_reallocblks_args *ap) 182 { 183 struct m_ext2fs *fs; 184 struct inode *ip; 185 struct vnode *vp; 186 struct buf *sbp, *ebp; 187 uint32_t *bap, *sbap, *ebap; 188 struct ext2mount *ump; 189 struct cluster_save *buflist; 190 struct indir start_ap[EXT2_NIADDR + 1], end_ap[EXT2_NIADDR + 1], *idp; 191 e2fs_lbn_t start_lbn, end_lbn; 192 int soff; 193 e2fs_daddr_t newblk, blkno; 194 int i, len, start_lvl, end_lvl, pref, ssize; 195 196 if (doreallocblks == 0) 197 return (ENOSPC); 198 199 vp = ap->a_vp; 200 ip = VTOI(vp); 201 fs = ip->i_e2fs; 202 ump = ip->i_ump; 203 204 if (fs->e2fs_contigsumsize <= 0) 205 return (ENOSPC); 206 207 buflist = ap->a_buflist; 208 len = buflist->bs_nchildren; 209 start_lbn = buflist->bs_children[0]->b_lblkno; 210 end_lbn = start_lbn + len - 1; 211 #ifdef INVARIANTS 212 for (i = 1; i < len; i++) 213 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 214 panic("ext2_reallocblks: non-cluster"); 215 #endif 216 /* 217 * If the cluster crosses the boundary for the first indirect 218 * block, leave space for the indirect block. Indirect blocks 219 * are initially laid out in a position after the last direct 220 * block. Block reallocation would usually destroy locality by 221 * moving the indirect block out of the way to make room for 222 * data blocks if we didn't compensate here. We should also do 223 * this for other indirect block boundaries, but it is only 224 * important for the first one. 225 */ 226 if (start_lbn < EXT2_NDADDR && end_lbn >= EXT2_NDADDR) 227 return (ENOSPC); 228 /* 229 * If the latest allocation is in a new cylinder group, assume that 230 * the filesystem has decided to move and do not force it back to 231 * the previous cylinder group. 232 */ 233 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 234 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 235 return (ENOSPC); 236 if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) || 237 ext2_getlbns(vp, end_lbn, end_ap, &end_lvl)) 238 return (ENOSPC); 239 /* 240 * Get the starting offset and block map for the first block. 241 */ 242 if (start_lvl == 0) { 243 sbap = &ip->i_db[0]; 244 soff = start_lbn; 245 } else { 246 idp = &start_ap[start_lvl - 1]; 247 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) { 248 brelse(sbp); 249 return (ENOSPC); 250 } 251 sbap = (u_int *)sbp->b_data; 252 soff = idp->in_off; 253 } 254 /* 255 * If the block range spans two block maps, get the second map. 256 */ 257 ebap = NULL; 258 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 259 ssize = len; 260 } else { 261 #ifdef INVARIANTS 262 if (start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 263 panic("ext2_reallocblks: start == end"); 264 #endif 265 ssize = len - (idp->in_off + 1); 266 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp)) 267 goto fail; 268 ebap = (u_int *)ebp->b_data; 269 } 270 /* 271 * Find the preferred location for the cluster. 272 */ 273 EXT2_LOCK(ump); 274 pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0); 275 /* 276 * Search the block map looking for an allocation of the desired size. 277 */ 278 if ((newblk = (e2fs_daddr_t)ext2_hashalloc(ip, dtog(fs, pref), pref, 279 len, ext2_clusteralloc)) == 0) { 280 EXT2_UNLOCK(ump); 281 goto fail; 282 } 283 /* 284 * We have found a new contiguous block. 285 * 286 * First we have to replace the old block pointers with the new 287 * block pointers in the inode and indirect blocks associated 288 * with the file. 289 */ 290 #ifdef DEBUG 291 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", 292 (uintmax_t)ip->i_number, (intmax_t)start_lbn, (intmax_t)end_lbn); 293 #endif /* DEBUG */ 294 blkno = newblk; 295 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 296 if (i == ssize) { 297 bap = ebap; 298 soff = -i; 299 } 300 #ifdef INVARIANTS 301 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) 302 panic("ext2_reallocblks: alloc mismatch"); 303 #endif 304 #ifdef DEBUG 305 printf(" %d,", *bap); 306 #endif /* DEBUG */ 307 *bap++ = blkno; 308 } 309 /* 310 * Next we must write out the modified inode and indirect blocks. 311 * For strict correctness, the writes should be synchronous since 312 * the old block values may have been written to disk. In practise 313 * they are almost never written, but if we are concerned about 314 * strict correctness, the `doasyncfree' flag should be set to zero. 315 * 316 * The test on `doasyncfree' should be changed to test a flag 317 * that shows whether the associated buffers and inodes have 318 * been written. The flag should be set when the cluster is 319 * started and cleared whenever the buffer or inode is flushed. 320 * We can then check below to see if it is set, and do the 321 * synchronous write only when it has been cleared. 322 */ 323 if (sbap != &ip->i_db[0]) { 324 if (doasyncfree) 325 bdwrite(sbp); 326 else 327 bwrite(sbp); 328 } else { 329 ip->i_flag |= IN_CHANGE | IN_UPDATE; 330 if (!doasyncfree) 331 ext2_update(vp, 1); 332 } 333 if (ssize < len) { 334 if (doasyncfree) 335 bdwrite(ebp); 336 else 337 bwrite(ebp); 338 } 339 /* 340 * Last, free the old blocks and assign the new blocks to the buffers. 341 */ 342 #ifdef DEBUG 343 printf("\n\tnew:"); 344 #endif /* DEBUG */ 345 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) { 346 ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 347 fs->e2fs_bsize); 348 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 349 #ifdef DEBUG 350 printf(" %d,", blkno); 351 #endif /* DEBUG */ 352 } 353 #ifdef DEBUG 354 printf("\n"); 355 #endif /* DEBUG */ 356 return (0); 357 358 fail: 359 if (ssize < len) 360 brelse(ebp); 361 if (sbap != &ip->i_db[0]) 362 brelse(sbp); 363 return (ENOSPC); 364 } 365 366 /* 367 * Allocate an inode in the filesystem. 368 * 369 */ 370 int 371 ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) 372 { 373 struct timespec ts; 374 struct inode *pip; 375 struct m_ext2fs *fs; 376 struct inode *ip; 377 struct ext2mount *ump; 378 ino_t ino, ipref; 379 int i, error, cg; 380 381 *vpp = NULL; 382 pip = VTOI(pvp); 383 fs = pip->i_e2fs; 384 ump = pip->i_ump; 385 386 EXT2_LOCK(ump); 387 if (fs->e2fs->e2fs_ficount == 0) 388 goto noinodes; 389 /* 390 * If it is a directory then obtain a cylinder group based on 391 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is 392 * always the next inode. 393 */ 394 if ((mode & IFMT) == IFDIR) { 395 cg = ext2_dirpref(pip); 396 if (fs->e2fs_contigdirs[cg] < 255) 397 fs->e2fs_contigdirs[cg]++; 398 } else { 399 cg = ino_to_cg(fs, pip->i_number); 400 if (fs->e2fs_contigdirs[cg] > 0) 401 fs->e2fs_contigdirs[cg]--; 402 } 403 ipref = cg * fs->e2fs->e2fs_ipg + 1; 404 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg); 405 406 if (ino == 0) 407 goto noinodes; 408 error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 409 if (error) { 410 ext2_vfree(pvp, ino, mode); 411 return (error); 412 } 413 ip = VTOI(*vpp); 414 415 /* 416 * The question is whether using VGET was such good idea at all: 417 * Linux doesn't read the old inode in when it is allocating a 418 * new one. I will set at least i_size and i_blocks to zero. 419 */ 420 ip->i_flag = 0; 421 ip->i_size = 0; 422 ip->i_blocks = 0; 423 ip->i_mode = 0; 424 ip->i_flags = 0; 425 /* now we want to make sure that the block pointers are zeroed out */ 426 for (i = 0; i < EXT2_NDADDR; i++) 427 ip->i_db[i] = 0; 428 for (i = 0; i < EXT2_NIADDR; i++) 429 ip->i_ib[i] = 0; 430 431 /* 432 * Set up a new generation number for this inode. 433 * Avoid zero values. 434 */ 435 do { 436 ip->i_gen = arc4random(); 437 } while (ip->i_gen == 0); 438 439 vfs_timestamp(&ts); 440 ip->i_birthtime = ts.tv_sec; 441 ip->i_birthnsec = ts.tv_nsec; 442 443 /* 444 printf("ext2_valloc: allocated inode %d\n", ino); 445 */ 446 return (0); 447 noinodes: 448 EXT2_UNLOCK(ump); 449 ext2_fserr(fs, cred->cr_uid, "out of inodes"); 450 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt); 451 return (ENOSPC); 452 } 453 454 /* 455 * Find a cylinder to place a directory. 456 * 457 * The policy implemented by this algorithm is to allocate a 458 * directory inode in the same cylinder group as its parent 459 * directory, but also to reserve space for its files inodes 460 * and data. Restrict the number of directories which may be 461 * allocated one after another in the same cylinder group 462 * without intervening allocation of files. 463 * 464 * If we allocate a first level directory then force allocation 465 * in another cylinder group. 466 * 467 */ 468 static u_long 469 ext2_dirpref(struct inode *pip) 470 { 471 struct m_ext2fs *fs; 472 int cg, prefcg, cgsize; 473 u_int avgifree, avgbfree, avgndir, curdirsize; 474 u_int minifree, minbfree, maxndir; 475 u_int mincg, minndir; 476 u_int dirsize, maxcontigdirs; 477 478 mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED); 479 fs = pip->i_e2fs; 480 481 avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount; 482 avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount; 483 avgndir = fs->e2fs_total_dir / fs->e2fs_gcount; 484 485 /* 486 * Force allocation in another cg if creating a first level dir. 487 */ 488 ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref"); 489 if (ITOV(pip)->v_vflag & VV_ROOT) { 490 prefcg = arc4random() % fs->e2fs_gcount; 491 mincg = prefcg; 492 minndir = fs->e2fs_ipg; 493 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 494 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && 495 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && 496 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { 497 mincg = cg; 498 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; 499 } 500 for (cg = 0; cg < prefcg; cg++) 501 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && 502 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && 503 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { 504 mincg = cg; 505 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; 506 } 507 return (mincg); 508 } 509 /* 510 * Count various limits which used for 511 * optimal allocation of a directory inode. 512 */ 513 maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg); 514 minifree = avgifree - avgifree / 4; 515 if (minifree < 1) 516 minifree = 1; 517 minbfree = avgbfree - avgbfree / 4; 518 if (minbfree < 1) 519 minbfree = 1; 520 cgsize = fs->e2fs_fsize * fs->e2fs_fpg; 521 dirsize = AVGDIRSIZE; 522 curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; 523 if (dirsize < curdirsize) 524 dirsize = curdirsize; 525 maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255); 526 maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR); 527 if (maxcontigdirs == 0) 528 maxcontigdirs = 1; 529 530 /* 531 * Limit number of dirs in one cg and reserve space for 532 * regular files, but only if we have no deficit in 533 * inodes or space. 534 */ 535 prefcg = ino_to_cg(fs, pip->i_number); 536 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 537 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && 538 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && 539 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { 540 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 541 return (cg); 542 } 543 for (cg = 0; cg < prefcg; cg++) 544 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && 545 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && 546 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { 547 if (fs->e2fs_contigdirs[cg] < maxcontigdirs) 548 return (cg); 549 } 550 /* 551 * This is a backstop when we have deficit in space. 552 */ 553 for (cg = prefcg; cg < fs->e2fs_gcount; cg++) 554 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) 555 return (cg); 556 for (cg = 0; cg < prefcg; cg++) 557 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) 558 break; 559 return (cg); 560 } 561 562 /* 563 * Select the desired position for the next block in a file. 564 * 565 * we try to mimic what Remy does in inode_getblk/block_getblk 566 * 567 * we note: blocknr == 0 means that we're about to allocate either 568 * a direct block or a pointer block at the first level of indirection 569 * (In other words, stuff that will go in i_db[] or i_ib[]) 570 * 571 * blocknr != 0 means that we're allocating a block that is none 572 * of the above. Then, blocknr tells us the number of the block 573 * that will hold the pointer 574 */ 575 e4fs_daddr_t 576 ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, e2fs_daddr_t *bap, 577 e2fs_daddr_t blocknr) 578 { 579 int tmp; 580 581 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 582 583 /* 584 * If the next block is actually what we thought it is, then set the 585 * goal to what we thought it should be. 586 */ 587 if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0) 588 return ip->i_next_alloc_goal; 589 590 /* 591 * Now check whether we were provided with an array that basically 592 * tells us previous blocks to which we want to stay close. 593 */ 594 if (bap) 595 for (tmp = indx - 1; tmp >= 0; tmp--) 596 if (bap[tmp]) 597 return bap[tmp]; 598 599 /* 600 * Else lets fall back to the blocknr or, if there is none, follow 601 * the rule that a block should be allocated near its inode. 602 */ 603 return blocknr ? blocknr : 604 (e2fs_daddr_t)(ip->i_block_group * 605 EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) + 606 ip->i_e2fs->e2fs->e2fs_first_dblock; 607 } 608 609 /* 610 * Implement the cylinder overflow algorithm. 611 * 612 * The policy implemented by this algorithm is: 613 * 1) allocate the block in its requested cylinder group. 614 * 2) quadradically rehash on the cylinder group number. 615 * 3) brute force search for a free block. 616 */ 617 static u_long 618 ext2_hashalloc(struct inode *ip, int cg, long pref, int size, 619 daddr_t (*allocator) (struct inode *, int, daddr_t, int)) 620 { 621 struct m_ext2fs *fs; 622 ino_t result; 623 int i, icg = cg; 624 625 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); 626 fs = ip->i_e2fs; 627 /* 628 * 1: preferred cylinder group 629 */ 630 result = (*allocator)(ip, cg, pref, size); 631 if (result) 632 return (result); 633 /* 634 * 2: quadratic rehash 635 */ 636 for (i = 1; i < fs->e2fs_gcount; i *= 2) { 637 cg += i; 638 if (cg >= fs->e2fs_gcount) 639 cg -= fs->e2fs_gcount; 640 result = (*allocator)(ip, cg, 0, size); 641 if (result) 642 return (result); 643 } 644 /* 645 * 3: brute force search 646 * Note that we start at i == 2, since 0 was checked initially, 647 * and 1 is always checked in the quadratic rehash. 648 */ 649 cg = (icg + 2) % fs->e2fs_gcount; 650 for (i = 2; i < fs->e2fs_gcount; i++) { 651 result = (*allocator)(ip, cg, 0, size); 652 if (result) 653 return (result); 654 cg++; 655 if (cg == fs->e2fs_gcount) 656 cg = 0; 657 } 658 return (0); 659 } 660 661 static unsigned long 662 ext2_cg_num_gdb(struct m_ext2fs *fs, int cg) 663 { 664 int gd_per_block, metagroup, first, last; 665 666 gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); 667 metagroup = cg / gd_per_block; 668 first = metagroup * gd_per_block; 669 last = first + gd_per_block - 1; 670 671 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || 672 metagroup < fs->e2fs->e3fs_first_meta_bg) { 673 if (!ext2_cg_has_sb(fs, cg)) 674 return (0); 675 if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG)) 676 return (fs->e2fs->e3fs_first_meta_bg); 677 return (fs->e2fs_gdbcount); 678 } 679 680 if (cg == first || cg == first + 1 || cg == last) 681 return (1); 682 return (0); 683 684 } 685 686 static int 687 ext2_num_base_meta_blocks(struct m_ext2fs *fs, int cg) 688 { 689 int num, gd_per_block; 690 691 gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); 692 num = ext2_cg_has_sb(fs, cg); 693 694 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || 695 cg < fs->e2fs->e3fs_first_meta_bg * gd_per_block) { 696 if (num) { 697 num += ext2_cg_num_gdb(fs, cg); 698 num += fs->e2fs->e2fs_reserved_ngdb; 699 } 700 } else { 701 num += ext2_cg_num_gdb(fs, cg); 702 } 703 704 return (num); 705 } 706 707 static int 708 ext2_get_cg_number(struct m_ext2fs *fs, daddr_t blk) 709 { 710 int cg; 711 712 if (fs->e2fs->e2fs_bpg == fs->e2fs_bsize * 8) 713 cg = (blk - fs->e2fs->e2fs_first_dblock) / (fs->e2fs_bsize * 8); 714 else 715 cg = blk - fs->e2fs->e2fs_first_dblock; 716 717 return (cg); 718 } 719 720 static void 721 ext2_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 722 { 723 int i; 724 725 if (start_bit >= end_bit) 726 return; 727 728 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 729 setbit(bitmap, i); 730 if (i < end_bit) 731 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 732 } 733 734 static int 735 ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp) 736 { 737 int bit, bit_max, inodes_per_block; 738 uint32_t start, tmp; 739 740 if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || 741 !(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT)) 742 return (0); 743 744 memset(bp->b_data, 0, fs->e2fs_bsize); 745 746 bit_max = ext2_num_base_meta_blocks(fs, cg); 747 if ((bit_max >> 3) >= fs->e2fs_bsize) 748 return (EINVAL); 749 750 for (bit = 0; bit < bit_max; bit++) 751 setbit(bp->b_data, bit); 752 753 start = cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock; 754 755 /* Set bits for block and inode bitmaps, and inode table */ 756 tmp = fs->e2fs_gd[cg].ext2bgd_b_bitmap; 757 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 758 tmp == ext2_get_cg_number(fs, cg)) 759 setbit(bp->b_data, tmp - start); 760 761 tmp = fs->e2fs_gd[cg].ext2bgd_i_bitmap; 762 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 763 tmp == ext2_get_cg_number(fs, cg)) 764 setbit(bp->b_data, tmp - start); 765 766 tmp = fs->e2fs_gd[cg].ext2bgd_i_tables; 767 inodes_per_block = fs->e2fs_bsize/EXT2_INODE_SIZE(fs); 768 while( tmp < fs->e2fs_gd[cg].ext2bgd_i_tables + 769 fs->e2fs->e2fs_ipg / inodes_per_block ) { 770 if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || 771 tmp == ext2_get_cg_number(fs, cg)) 772 setbit(bp->b_data, tmp - start); 773 tmp++; 774 } 775 776 /* 777 * Also if the number of blocks within the group is less than 778 * the blocksize * 8 ( which is the size of bitmap ), set rest 779 * of the block bitmap to 1 780 */ 781 ext2_mark_bitmap_end(fs->e2fs->e2fs_bpg, fs->e2fs_bsize * 8, 782 bp->b_data); 783 784 /* Clean the flag */ 785 fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_BLOCK_UNINIT; 786 787 return (0); 788 } 789 790 /* 791 * Determine whether a block can be allocated. 792 * 793 * Check to see if a block of the appropriate size is available, 794 * and if it is, allocate it. 795 */ 796 static daddr_t 797 ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 798 { 799 struct m_ext2fs *fs; 800 struct buf *bp; 801 struct ext2mount *ump; 802 daddr_t bno, runstart, runlen; 803 int bit, loc, end, error, start; 804 char *bbp; 805 /* XXX ondisk32 */ 806 fs = ip->i_e2fs; 807 ump = ip->i_ump; 808 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) 809 return (0); 810 EXT2_UNLOCK(ump); 811 error = bread(ip->i_devvp, fsbtodb(fs, 812 fs->e2fs_gd[cg].ext2bgd_b_bitmap), 813 (int)fs->e2fs_bsize, NOCRED, &bp); 814 if (error) { 815 brelse(bp); 816 EXT2_LOCK(ump); 817 return (0); 818 } 819 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { 820 error = ext2_cg_block_bitmap_init(fs, cg, bp); 821 if (error) { 822 brelse(bp); 823 EXT2_LOCK(ump); 824 return (0); 825 } 826 } 827 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) { 828 /* 829 * Another thread allocated the last block in this 830 * group while we were waiting for the buffer. 831 */ 832 brelse(bp); 833 EXT2_LOCK(ump); 834 return (0); 835 } 836 bbp = (char *)bp->b_data; 837 838 if (dtog(fs, bpref) != cg) 839 bpref = 0; 840 if (bpref != 0) { 841 bpref = dtogd(fs, bpref); 842 /* 843 * if the requested block is available, use it 844 */ 845 if (isclr(bbp, bpref)) { 846 bno = bpref; 847 goto gotit; 848 } 849 } 850 /* 851 * no blocks in the requested cylinder, so take next 852 * available one in this cylinder group. 853 * first try to get 8 contigous blocks, then fall back to a single 854 * block. 855 */ 856 if (bpref) 857 start = dtogd(fs, bpref) / NBBY; 858 else 859 start = 0; 860 end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 861 retry: 862 runlen = 0; 863 runstart = 0; 864 for (loc = start; loc < end; loc++) { 865 if (bbp[loc] == (char)0xff) { 866 runlen = 0; 867 continue; 868 } 869 870 /* Start of a run, find the number of high clear bits. */ 871 if (runlen == 0) { 872 bit = fls(bbp[loc]); 873 runlen = NBBY - bit; 874 runstart = loc * NBBY + bit; 875 } else if (bbp[loc] == 0) { 876 /* Continue a run. */ 877 runlen += NBBY; 878 } else { 879 /* 880 * Finish the current run. If it isn't long 881 * enough, start a new one. 882 */ 883 bit = ffs(bbp[loc]) - 1; 884 runlen += bit; 885 if (runlen >= 8) { 886 bno = runstart; 887 goto gotit; 888 } 889 890 /* Run was too short, start a new one. */ 891 bit = fls(bbp[loc]); 892 runlen = NBBY - bit; 893 runstart = loc * NBBY + bit; 894 } 895 896 /* If the current run is long enough, use it. */ 897 if (runlen >= 8) { 898 bno = runstart; 899 goto gotit; 900 } 901 } 902 if (start != 0) { 903 end = start; 904 start = 0; 905 goto retry; 906 } 907 bno = ext2_mapsearch(fs, bbp, bpref); 908 if (bno < 0) { 909 brelse(bp); 910 EXT2_LOCK(ump); 911 return (0); 912 } 913 gotit: 914 #ifdef INVARIANTS 915 if (isset(bbp, bno)) { 916 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n", 917 cg, (intmax_t)bno, fs->e2fs_fsmnt); 918 panic("ext2fs_alloccg: dup alloc"); 919 } 920 #endif 921 setbit(bbp, bno); 922 EXT2_LOCK(ump); 923 ext2_clusteracct(fs, bbp, cg, bno, -1); 924 fs->e2fs->e2fs_fbcount--; 925 fs->e2fs_gd[cg].ext2bgd_nbfree--; 926 fs->e2fs_fmod = 1; 927 EXT2_UNLOCK(ump); 928 bdwrite(bp); 929 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 930 } 931 932 /* 933 * Determine whether a cluster can be allocated. 934 */ 935 static daddr_t 936 ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len) 937 { 938 struct m_ext2fs *fs; 939 struct ext2mount *ump; 940 struct buf *bp; 941 char *bbp; 942 int bit, error, got, i, loc, run; 943 int32_t *lp; 944 daddr_t bno; 945 946 fs = ip->i_e2fs; 947 ump = ip->i_ump; 948 949 if (fs->e2fs_maxcluster[cg] < len) 950 return (0); 951 952 EXT2_UNLOCK(ump); 953 error = bread(ip->i_devvp, 954 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), 955 (int)fs->e2fs_bsize, NOCRED, &bp); 956 if (error) 957 goto fail_lock; 958 959 bbp = (char *)bp->b_data; 960 EXT2_LOCK(ump); 961 /* 962 * Check to see if a cluster of the needed size (or bigger) is 963 * available in this cylinder group. 964 */ 965 lp = &fs->e2fs_clustersum[cg].cs_sum[len]; 966 for (i = len; i <= fs->e2fs_contigsumsize; i++) 967 if (*lp++ > 0) 968 break; 969 if (i > fs->e2fs_contigsumsize) { 970 /* 971 * Update the cluster summary information to reflect 972 * the true maximum-sized cluster so that future cluster 973 * allocation requests can avoid reading the bitmap only 974 * to find no cluster. 975 */ 976 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1]; 977 for (i = len - 1; i > 0; i--) 978 if (*lp-- > 0) 979 break; 980 fs->e2fs_maxcluster[cg] = i; 981 goto fail; 982 } 983 EXT2_UNLOCK(ump); 984 985 /* Search the bitmap to find a big enough cluster like in FFS. */ 986 if (dtog(fs, bpref) != cg) 987 bpref = 0; 988 if (bpref != 0) 989 bpref = dtogd(fs, bpref); 990 loc = bpref / NBBY; 991 bit = 1 << (bpref % NBBY); 992 for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) { 993 if ((bbp[loc] & bit) != 0) 994 run = 0; 995 else { 996 run++; 997 if (run == len) 998 break; 999 } 1000 if ((got & (NBBY - 1)) != (NBBY - 1)) 1001 bit <<= 1; 1002 else { 1003 loc++; 1004 bit = 1; 1005 } 1006 } 1007 1008 if (got >= fs->e2fs->e2fs_fpg) 1009 goto fail_lock; 1010 1011 /* Allocate the cluster that we found. */ 1012 for (i = 1; i < len; i++) 1013 if (!isclr(bbp, got - run + i)) 1014 panic("ext2_clusteralloc: map mismatch"); 1015 1016 bno = got - run + 1; 1017 if (bno >= fs->e2fs->e2fs_fpg) 1018 panic("ext2_clusteralloc: allocated out of group"); 1019 1020 EXT2_LOCK(ump); 1021 for (i = 0; i < len; i += fs->e2fs_fpb) { 1022 setbit(bbp, bno + i); 1023 ext2_clusteracct(fs, bbp, cg, bno + i, -1); 1024 fs->e2fs->e2fs_fbcount--; 1025 fs->e2fs_gd[cg].ext2bgd_nbfree--; 1026 } 1027 fs->e2fs_fmod = 1; 1028 EXT2_UNLOCK(ump); 1029 1030 bdwrite(bp); 1031 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); 1032 1033 fail_lock: 1034 EXT2_LOCK(ump); 1035 fail: 1036 brelse(bp); 1037 return (0); 1038 } 1039 1040 static int 1041 ext2_zero_inode_table(struct inode *ip, int cg) 1042 { 1043 struct m_ext2fs *fs; 1044 struct buf *bp; 1045 int i, all_blks, used_blks; 1046 1047 fs = ip->i_e2fs; 1048 1049 if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_ZEROED) 1050 return (0); 1051 1052 all_blks = fs->e2fs->e2fs_inode_size * fs->e2fs->e2fs_ipg / 1053 fs->e2fs_bsize; 1054 1055 used_blks = howmany(fs->e2fs->e2fs_ipg - 1056 fs->e2fs_gd[cg].ext4bgd_i_unused, 1057 fs->e2fs_bsize / EXT2_INODE_SIZE(fs)); 1058 1059 for (i = 0; i < all_blks - used_blks; i++) { 1060 bp = getblk(ip->i_devvp, fsbtodb(fs, 1061 fs->e2fs_gd[cg].ext2bgd_i_tables + used_blks + i), 1062 fs->e2fs_bsize, 0, 0, 0); 1063 if (!bp) 1064 return (EIO); 1065 1066 vfs_bio_bzero_buf(bp, 0, fs->e2fs_bsize); 1067 bawrite(bp); 1068 } 1069 1070 fs->e2fs_gd[cg].ext4bgd_flags |= EXT2_BG_INODE_ZEROED; 1071 1072 return (0); 1073 } 1074 1075 /* 1076 * Determine whether an inode can be allocated. 1077 * 1078 * Check to see if an inode is available, and if it is, 1079 * allocate it using tode in the specified cylinder group. 1080 */ 1081 static daddr_t 1082 ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode) 1083 { 1084 struct m_ext2fs *fs; 1085 struct buf *bp; 1086 struct ext2mount *ump; 1087 int error, start, len; 1088 char *ibp, *loc; 1089 1090 ipref--; /* to avoid a lot of (ipref -1) */ 1091 if (ipref == -1) 1092 ipref = 0; 1093 fs = ip->i_e2fs; 1094 ump = ip->i_ump; 1095 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) 1096 return (0); 1097 EXT2_UNLOCK(ump); 1098 error = bread(ip->i_devvp, fsbtodb(fs, 1099 fs->e2fs_gd[cg].ext2bgd_i_bitmap), 1100 (int)fs->e2fs_bsize, NOCRED, &bp); 1101 if (error) { 1102 brelse(bp); 1103 EXT2_LOCK(ump); 1104 return (0); 1105 } 1106 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { 1107 if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_UNINIT) { 1108 memset(bp->b_data, 0, fs->e2fs_bsize); 1109 fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_INODE_UNINIT; 1110 } 1111 error = ext2_zero_inode_table(ip, cg); 1112 if (error) { 1113 brelse(bp); 1114 EXT2_LOCK(ump); 1115 return (0); 1116 } 1117 } 1118 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) { 1119 /* 1120 * Another thread allocated the last i-node in this 1121 * group while we were waiting for the buffer. 1122 */ 1123 brelse(bp); 1124 EXT2_LOCK(ump); 1125 return (0); 1126 } 1127 ibp = (char *)bp->b_data; 1128 if (ipref) { 1129 ipref %= fs->e2fs->e2fs_ipg; 1130 if (isclr(ibp, ipref)) 1131 goto gotit; 1132 } 1133 start = ipref / NBBY; 1134 len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY); 1135 loc = memcchr(&ibp[start], 0xff, len); 1136 if (loc == NULL) { 1137 len = start + 1; 1138 start = 0; 1139 loc = memcchr(&ibp[start], 0xff, len); 1140 if (loc == NULL) { 1141 printf("cg = %d, ipref = %lld, fs = %s\n", 1142 cg, (long long)ipref, fs->e2fs_fsmnt); 1143 panic("ext2fs_nodealloccg: map corrupted"); 1144 /* NOTREACHED */ 1145 } 1146 } 1147 ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1; 1148 gotit: 1149 setbit(ibp, ipref); 1150 EXT2_LOCK(ump); 1151 fs->e2fs_gd[cg].ext2bgd_nifree--; 1152 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) 1153 fs->e2fs_gd[cg].ext4bgd_i_unused--; 1154 fs->e2fs->e2fs_ficount--; 1155 fs->e2fs_fmod = 1; 1156 if ((mode & IFMT) == IFDIR) { 1157 fs->e2fs_gd[cg].ext2bgd_ndirs++; 1158 fs->e2fs_total_dir++; 1159 } 1160 EXT2_UNLOCK(ump); 1161 bdwrite(bp); 1162 return (cg * fs->e2fs->e2fs_ipg + ipref + 1); 1163 } 1164 1165 /* 1166 * Free a block or fragment. 1167 * 1168 */ 1169 void 1170 ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size) 1171 { 1172 struct m_ext2fs *fs; 1173 struct buf *bp; 1174 struct ext2mount *ump; 1175 int cg, error; 1176 char *bbp; 1177 1178 fs = ip->i_e2fs; 1179 ump = ip->i_ump; 1180 cg = dtog(fs, bno); 1181 if ((u_int)bno >= fs->e2fs->e2fs_bcount) { 1182 printf("bad block %lld, ino %ju\n", (long long)bno, 1183 (uintmax_t)ip->i_number); 1184 ext2_fserr(fs, ip->i_uid, "bad block"); 1185 return; 1186 } 1187 error = bread(ip->i_devvp, 1188 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), 1189 (int)fs->e2fs_bsize, NOCRED, &bp); 1190 if (error) { 1191 brelse(bp); 1192 return; 1193 } 1194 bbp = (char *)bp->b_data; 1195 bno = dtogd(fs, bno); 1196 if (isclr(bbp, bno)) { 1197 printf("block = %lld, fs = %s\n", 1198 (long long)bno, fs->e2fs_fsmnt); 1199 panic("ext2_blkfree: freeing free block"); 1200 } 1201 clrbit(bbp, bno); 1202 EXT2_LOCK(ump); 1203 ext2_clusteracct(fs, bbp, cg, bno, 1); 1204 fs->e2fs->e2fs_fbcount++; 1205 fs->e2fs_gd[cg].ext2bgd_nbfree++; 1206 fs->e2fs_fmod = 1; 1207 EXT2_UNLOCK(ump); 1208 bdwrite(bp); 1209 } 1210 1211 /* 1212 * Free an inode. 1213 * 1214 */ 1215 int 1216 ext2_vfree(struct vnode *pvp, ino_t ino, int mode) 1217 { 1218 struct m_ext2fs *fs; 1219 struct inode *pip; 1220 struct buf *bp; 1221 struct ext2mount *ump; 1222 int error, cg; 1223 char *ibp; 1224 1225 pip = VTOI(pvp); 1226 fs = pip->i_e2fs; 1227 ump = pip->i_ump; 1228 if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount) 1229 panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s", 1230 pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt); 1231 1232 cg = ino_to_cg(fs, ino); 1233 error = bread(pip->i_devvp, 1234 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap), 1235 (int)fs->e2fs_bsize, NOCRED, &bp); 1236 if (error) { 1237 brelse(bp); 1238 return (0); 1239 } 1240 ibp = (char *)bp->b_data; 1241 ino = (ino - 1) % fs->e2fs->e2fs_ipg; 1242 if (isclr(ibp, ino)) { 1243 printf("ino = %llu, fs = %s\n", 1244 (unsigned long long)ino, fs->e2fs_fsmnt); 1245 if (fs->e2fs_ronly == 0) 1246 panic("ext2_vfree: freeing free inode"); 1247 } 1248 clrbit(ibp, ino); 1249 EXT2_LOCK(ump); 1250 fs->e2fs->e2fs_ficount++; 1251 fs->e2fs_gd[cg].ext2bgd_nifree++; 1252 if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) 1253 fs->e2fs_gd[cg].ext4bgd_i_unused++; 1254 if ((mode & IFMT) == IFDIR) { 1255 fs->e2fs_gd[cg].ext2bgd_ndirs--; 1256 fs->e2fs_total_dir--; 1257 } 1258 fs->e2fs_fmod = 1; 1259 EXT2_UNLOCK(ump); 1260 bdwrite(bp); 1261 return (0); 1262 } 1263 1264 /* 1265 * Find a block in the specified cylinder group. 1266 * 1267 * It is a panic if a request is made to find a block if none are 1268 * available. 1269 */ 1270 static daddr_t 1271 ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref) 1272 { 1273 char *loc; 1274 int start, len; 1275 1276 /* 1277 * find the fragment by searching through the free block 1278 * map for an appropriate bit pattern 1279 */ 1280 if (bpref) 1281 start = dtogd(fs, bpref) / NBBY; 1282 else 1283 start = 0; 1284 len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; 1285 loc = memcchr(&bbp[start], 0xff, len); 1286 if (loc == NULL) { 1287 len = start + 1; 1288 start = 0; 1289 loc = memcchr(&bbp[start], 0xff, len); 1290 if (loc == NULL) { 1291 printf("start = %d, len = %d, fs = %s\n", 1292 start, len, fs->e2fs_fsmnt); 1293 panic("ext2_mapsearch: map corrupted"); 1294 /* NOTREACHED */ 1295 } 1296 } 1297 return ((loc - bbp) * NBBY + ffs(~*loc) - 1); 1298 } 1299 1300 /* 1301 * Fserr prints the name of a filesystem with an error diagnostic. 1302 * 1303 * The form of the error message is: 1304 * fs: error message 1305 */ 1306 static void 1307 ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp) 1308 { 1309 1310 log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp); 1311 } 1312 1313 int 1314 ext2_cg_has_sb(struct m_ext2fs *fs, int cg) 1315 { 1316 int a3, a5, a7; 1317 1318 if (cg == 0) 1319 return (1); 1320 1321 if (EXT2_HAS_COMPAT_FEATURE(fs, EXT2F_COMPAT_SPARSESUPER2)) { 1322 if (cg == fs->e2fs->e4fs_backup_bgs[0] || 1323 cg == fs->e2fs->e4fs_backup_bgs[1]) 1324 return (1); 1325 return (0); 1326 } 1327 1328 if ((cg <= 1) || 1329 !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_SPARSESUPER)) 1330 return (1); 1331 1332 if (!(cg & 1)) 1333 return (0); 1334 1335 for (a3 = 3, a5 = 5, a7 = 7; 1336 a3 <= cg || a5 <= cg || a7 <= cg; 1337 a3 *= 3, a5 *= 5, a7 *= 7) 1338 if (cg == a3 || cg == a5 || cg == a7) 1339 return (1); 1340 return (0); 1341 } 1342