15978408cSSascha Wildner /* $NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $ */ 25978408cSSascha Wildner /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */ 35978408cSSascha Wildner 45978408cSSascha Wildner /*- 55978408cSSascha Wildner * SPDX-License-Identifier: BSD-3-Clause 65978408cSSascha Wildner * 75978408cSSascha Wildner * Copyright (c) 2002 Networks Associates Technology, Inc. 85978408cSSascha Wildner * All rights reserved. 95978408cSSascha Wildner * 105978408cSSascha Wildner * This software was developed for the FreeBSD Project by Marshall 115978408cSSascha Wildner * Kirk McKusick and Network Associates Laboratories, the Security 125978408cSSascha Wildner * Research Division of Network Associates, Inc. under DARPA/SPAWAR 135978408cSSascha Wildner * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 145978408cSSascha Wildner * research program 155978408cSSascha Wildner * 165978408cSSascha Wildner * Copyright (c) 1982, 1986, 1989, 1993 175978408cSSascha Wildner * The Regents of the University of California. All rights reserved. 185978408cSSascha Wildner * 195978408cSSascha Wildner * Redistribution and use in source and binary forms, with or without 205978408cSSascha Wildner * modification, are permitted provided that the following conditions 215978408cSSascha Wildner * are met: 225978408cSSascha Wildner * 1. Redistributions of source code must retain the above copyright 235978408cSSascha Wildner * notice, this list of conditions and the following disclaimer. 245978408cSSascha Wildner * 2. Redistributions in binary form must reproduce the above copyright 255978408cSSascha Wildner * notice, this list of conditions and the following disclaimer in the 265978408cSSascha Wildner * documentation and/or other materials provided with the distribution. 275978408cSSascha Wildner * 3. Neither the name of the University nor the names of its contributors 285978408cSSascha Wildner * may be used to endorse or promote products derived from this software 295978408cSSascha Wildner * without specific prior written permission. 305978408cSSascha Wildner * 315978408cSSascha Wildner * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 325978408cSSascha Wildner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 335978408cSSascha Wildner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 345978408cSSascha Wildner * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 355978408cSSascha Wildner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 365978408cSSascha Wildner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 375978408cSSascha Wildner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 385978408cSSascha Wildner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 395978408cSSascha Wildner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 405978408cSSascha Wildner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 415978408cSSascha Wildner * SUCH DAMAGE. 425978408cSSascha Wildner * 435978408cSSascha Wildner * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95 44811c2036SSascha Wildner * $FreeBSD: head/usr.sbin/makefs/ffs/ffs_alloc.c 336736 2018-07-26 13:33:10Z emaste $ 455978408cSSascha Wildner */ 465978408cSSascha Wildner 475978408cSSascha Wildner #include <sys/param.h> 485978408cSSascha Wildner #include <sys/time.h> 495978408cSSascha Wildner 505978408cSSascha Wildner #include <errno.h> 515978408cSSascha Wildner #include <stdint.h> 525978408cSSascha Wildner 535978408cSSascha Wildner #include "makefs.h" 545978408cSSascha Wildner 55*346b9dadSzrj #include <vfs/ufs/dinode.h> 56*346b9dadSzrj #include <vfs/ufs/fs.h> 575978408cSSascha Wildner 585978408cSSascha Wildner #include "ffs/ufs_bswap.h" 595978408cSSascha Wildner #include "ffs/buf.h" 605978408cSSascha Wildner #include "ffs/ufs_inode.h" 615978408cSSascha Wildner #include "ffs/ffs_extern.h" 625978408cSSascha Wildner 63811c2036SSascha Wildner #include "ffs.h" /* XXX swildner: for compat defines */ 64811c2036SSascha Wildner 655978408cSSascha Wildner static int scanc(u_int, const u_char *, const u_char *, int); 665978408cSSascha Wildner 67811c2036SSascha Wildner static makefs_daddr_t ffs_alloccg(struct inode *, int, makefs_daddr_t, int); 68811c2036SSascha Wildner static makefs_daddr_t ffs_alloccgblk(struct inode *, struct buf *, makefs_daddr_t); 69811c2036SSascha Wildner static makefs_daddr_t ffs_hashalloc(struct inode *, u_int, makefs_daddr_t, int, 70811c2036SSascha Wildner makefs_daddr_t (*)(struct inode *, int, makefs_daddr_t, int)); 71811c2036SSascha Wildner static int32_t ffs_mapsearch(struct fs *, struct cg *, makefs_daddr_t, int); 725978408cSSascha Wildner 735978408cSSascha Wildner /* 745978408cSSascha Wildner * Allocate a block in the file system. 755978408cSSascha Wildner * 765978408cSSascha Wildner * The size of the requested block is given, which must be some 775978408cSSascha Wildner * multiple of fs_fsize and <= fs_bsize. 785978408cSSascha Wildner * A preference may be optionally specified. If a preference is given 795978408cSSascha Wildner * the following hierarchy is used to allocate a block: 805978408cSSascha Wildner * 1) allocate the requested block. 815978408cSSascha Wildner * 2) allocate a rotationally optimal block in the same cylinder. 825978408cSSascha Wildner * 3) allocate a block in the same cylinder group. 835978408cSSascha Wildner * 4) quadradically rehash into other cylinder groups, until an 845978408cSSascha Wildner * available block is located. 855978408cSSascha Wildner * If no block preference is given the following hierarchy is used 865978408cSSascha Wildner * to allocate a block: 875978408cSSascha Wildner * 1) allocate a block in the cylinder group that contains the 885978408cSSascha Wildner * inode for the file. 895978408cSSascha Wildner * 2) quadradically rehash into other cylinder groups, until an 905978408cSSascha Wildner * available block is located. 915978408cSSascha Wildner */ 925978408cSSascha Wildner int 93811c2036SSascha Wildner ffs_alloc(struct inode *ip, makefs_daddr_t lbn __unused, makefs_daddr_t bpref, int size, 94811c2036SSascha Wildner makefs_daddr_t *bnp) 955978408cSSascha Wildner { 965978408cSSascha Wildner struct fs *fs = ip->i_fs; 97811c2036SSascha Wildner makefs_daddr_t bno; 985978408cSSascha Wildner int cg; 995978408cSSascha Wildner 1005978408cSSascha Wildner *bnp = 0; 1015978408cSSascha Wildner if (size > fs->fs_bsize || fragoff(fs, size) != 0) { 1025978408cSSascha Wildner errx(1, "ffs_alloc: bad size: bsize %d size %d", 1035978408cSSascha Wildner fs->fs_bsize, size); 1045978408cSSascha Wildner } 1055978408cSSascha Wildner if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 1065978408cSSascha Wildner goto nospace; 1075978408cSSascha Wildner if (bpref >= fs->fs_size) 1085978408cSSascha Wildner bpref = 0; 1095978408cSSascha Wildner if (bpref == 0) 1105978408cSSascha Wildner cg = ino_to_cg(fs, ip->i_number); 1115978408cSSascha Wildner else 1125978408cSSascha Wildner cg = dtog(fs, bpref); 1135978408cSSascha Wildner bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); 1145978408cSSascha Wildner if (bno > 0) { 1155978408cSSascha Wildner if (ip->i_fs->fs_magic == FS_UFS1_MAGIC) 1165978408cSSascha Wildner ip->i_ffs1_blocks += size / DEV_BSIZE; 117811c2036SSascha Wildner #ifndef __DragonFly__ /* XXX UFS2 */ 1185978408cSSascha Wildner else 1195978408cSSascha Wildner ip->i_ffs2_blocks += size / DEV_BSIZE; 120811c2036SSascha Wildner #endif 1215978408cSSascha Wildner *bnp = bno; 1225978408cSSascha Wildner return (0); 1235978408cSSascha Wildner } 1245978408cSSascha Wildner nospace: 1255978408cSSascha Wildner return (ENOSPC); 1265978408cSSascha Wildner } 1275978408cSSascha Wildner 1285978408cSSascha Wildner /* 1295978408cSSascha Wildner * Select the desired position for the next block in a file. The file is 1305978408cSSascha Wildner * logically divided into sections. The first section is composed of the 1315978408cSSascha Wildner * direct blocks. Each additional section contains fs_maxbpg blocks. 1325978408cSSascha Wildner * 1335978408cSSascha Wildner * If no blocks have been allocated in the first section, the policy is to 1345978408cSSascha Wildner * request a block in the same cylinder group as the inode that describes 1355978408cSSascha Wildner * the file. If no blocks have been allocated in any other section, the 1365978408cSSascha Wildner * policy is to place the section in a cylinder group with a greater than 1375978408cSSascha Wildner * average number of free blocks. An appropriate cylinder group is found 1385978408cSSascha Wildner * by using a rotor that sweeps the cylinder groups. When a new group of 1395978408cSSascha Wildner * blocks is needed, the sweep begins in the cylinder group following the 1405978408cSSascha Wildner * cylinder group from which the previous allocation was made. The sweep 1415978408cSSascha Wildner * continues until a cylinder group with greater than the average number 1425978408cSSascha Wildner * of free blocks is found. If the allocation is for the first block in an 1435978408cSSascha Wildner * indirect block, the information on the previous allocation is unavailable; 1445978408cSSascha Wildner * here a best guess is made based upon the logical block number being 1455978408cSSascha Wildner * allocated. 1465978408cSSascha Wildner * 1475978408cSSascha Wildner * If a section is already partially allocated, the policy is to 1485978408cSSascha Wildner * contiguously allocate fs_maxcontig blocks. The end of one of these 1495978408cSSascha Wildner * contiguous blocks and the beginning of the next is physically separated 1505978408cSSascha Wildner * so that the disk head will be in transit between them for at least 1515978408cSSascha Wildner * fs_rotdelay milliseconds. This is to allow time for the processor to 1525978408cSSascha Wildner * schedule another I/O transfer. 1535978408cSSascha Wildner */ 1545978408cSSascha Wildner /* XXX ondisk32 */ 155811c2036SSascha Wildner makefs_daddr_t 156811c2036SSascha Wildner ffs_blkpref_ufs1(struct inode *ip, makefs_daddr_t lbn, int indx, int32_t *bap) 1575978408cSSascha Wildner { 1585978408cSSascha Wildner struct fs *fs; 1595978408cSSascha Wildner u_int cg, startcg; 1605978408cSSascha Wildner int avgbfree; 1615978408cSSascha Wildner 1625978408cSSascha Wildner fs = ip->i_fs; 1635978408cSSascha Wildner if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1645978408cSSascha Wildner if (lbn < UFS_NDADDR + NINDIR(fs)) { 1655978408cSSascha Wildner cg = ino_to_cg(fs, ip->i_number); 1665978408cSSascha Wildner return (fs->fs_fpg * cg + fs->fs_frag); 1675978408cSSascha Wildner } 1685978408cSSascha Wildner /* 1695978408cSSascha Wildner * Find a cylinder with greater than average number of 1705978408cSSascha Wildner * unused data blocks. 1715978408cSSascha Wildner */ 1725978408cSSascha Wildner if (indx == 0 || bap[indx - 1] == 0) 1735978408cSSascha Wildner startcg = 1745978408cSSascha Wildner ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 1755978408cSSascha Wildner else 1765978408cSSascha Wildner startcg = dtog(fs, 1775978408cSSascha Wildner ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 1785978408cSSascha Wildner startcg %= fs->fs_ncg; 1795978408cSSascha Wildner avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1805978408cSSascha Wildner for (cg = startcg; cg < fs->fs_ncg; cg++) 1815978408cSSascha Wildner if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 1825978408cSSascha Wildner return (fs->fs_fpg * cg + fs->fs_frag); 1835978408cSSascha Wildner for (cg = 0; cg <= startcg; cg++) 1845978408cSSascha Wildner if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 1855978408cSSascha Wildner return (fs->fs_fpg * cg + fs->fs_frag); 1865978408cSSascha Wildner return (0); 1875978408cSSascha Wildner } 1885978408cSSascha Wildner /* 1895978408cSSascha Wildner * We just always try to lay things out contiguously. 1905978408cSSascha Wildner */ 1915978408cSSascha Wildner return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 1925978408cSSascha Wildner } 1935978408cSSascha Wildner 194811c2036SSascha Wildner #ifndef __DragonFly__ /* XXX UFS2 */ 1955978408cSSascha Wildner daddr_t 1965978408cSSascha Wildner ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap) 1975978408cSSascha Wildner { 1985978408cSSascha Wildner struct fs *fs; 1995978408cSSascha Wildner u_int cg, startcg; 2005978408cSSascha Wildner int avgbfree; 2015978408cSSascha Wildner 2025978408cSSascha Wildner fs = ip->i_fs; 2035978408cSSascha Wildner if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 2045978408cSSascha Wildner if (lbn < UFS_NDADDR + NINDIR(fs)) { 2055978408cSSascha Wildner cg = ino_to_cg(fs, ip->i_number); 2065978408cSSascha Wildner return (fs->fs_fpg * cg + fs->fs_frag); 2075978408cSSascha Wildner } 2085978408cSSascha Wildner /* 2095978408cSSascha Wildner * Find a cylinder with greater than average number of 2105978408cSSascha Wildner * unused data blocks. 2115978408cSSascha Wildner */ 2125978408cSSascha Wildner if (indx == 0 || bap[indx - 1] == 0) 2135978408cSSascha Wildner startcg = 2145978408cSSascha Wildner ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 2155978408cSSascha Wildner else 2165978408cSSascha Wildner startcg = dtog(fs, 2175978408cSSascha Wildner ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 2185978408cSSascha Wildner startcg %= fs->fs_ncg; 2195978408cSSascha Wildner avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 2205978408cSSascha Wildner for (cg = startcg; cg < fs->fs_ncg; cg++) 2215978408cSSascha Wildner if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 2225978408cSSascha Wildner return (fs->fs_fpg * cg + fs->fs_frag); 2235978408cSSascha Wildner } 2245978408cSSascha Wildner for (cg = 0; cg < startcg; cg++) 2255978408cSSascha Wildner if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 2265978408cSSascha Wildner return (fs->fs_fpg * cg + fs->fs_frag); 2275978408cSSascha Wildner } 2285978408cSSascha Wildner return (0); 2295978408cSSascha Wildner } 2305978408cSSascha Wildner /* 2315978408cSSascha Wildner * We just always try to lay things out contiguously. 2325978408cSSascha Wildner */ 2335978408cSSascha Wildner return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 2345978408cSSascha Wildner } 235811c2036SSascha Wildner #endif 2365978408cSSascha Wildner 2375978408cSSascha Wildner /* 2385978408cSSascha Wildner * Implement the cylinder overflow algorithm. 2395978408cSSascha Wildner * 2405978408cSSascha Wildner * The policy implemented by this algorithm is: 2415978408cSSascha Wildner * 1) allocate the block in its requested cylinder group. 2425978408cSSascha Wildner * 2) quadradically rehash on the cylinder group number. 2435978408cSSascha Wildner * 3) brute force search for a free block. 2445978408cSSascha Wildner * 2455978408cSSascha Wildner * `size': size for data blocks, mode for inodes 2465978408cSSascha Wildner */ 2475978408cSSascha Wildner /*VARARGS5*/ 248811c2036SSascha Wildner static makefs_daddr_t 249811c2036SSascha Wildner ffs_hashalloc(struct inode *ip, u_int cg, makefs_daddr_t pref, int size, 250811c2036SSascha Wildner makefs_daddr_t (*allocator)(struct inode *, int, makefs_daddr_t, int)) 2515978408cSSascha Wildner { 2525978408cSSascha Wildner struct fs *fs; 253811c2036SSascha Wildner makefs_daddr_t result; 2545978408cSSascha Wildner u_int i, icg = cg; 2555978408cSSascha Wildner 2565978408cSSascha Wildner fs = ip->i_fs; 2575978408cSSascha Wildner /* 2585978408cSSascha Wildner * 1: preferred cylinder group 2595978408cSSascha Wildner */ 2605978408cSSascha Wildner result = (*allocator)(ip, cg, pref, size); 2615978408cSSascha Wildner if (result) 2625978408cSSascha Wildner return (result); 2635978408cSSascha Wildner /* 2645978408cSSascha Wildner * 2: quadratic rehash 2655978408cSSascha Wildner */ 2665978408cSSascha Wildner for (i = 1; i < fs->fs_ncg; i *= 2) { 2675978408cSSascha Wildner cg += i; 2685978408cSSascha Wildner if (cg >= fs->fs_ncg) 2695978408cSSascha Wildner cg -= fs->fs_ncg; 2705978408cSSascha Wildner result = (*allocator)(ip, cg, 0, size); 2715978408cSSascha Wildner if (result) 2725978408cSSascha Wildner return (result); 2735978408cSSascha Wildner } 2745978408cSSascha Wildner /* 2755978408cSSascha Wildner * 3: brute force search 2765978408cSSascha Wildner * Note that we start at i == 2, since 0 was checked initially, 2775978408cSSascha Wildner * and 1 is always checked in the quadratic rehash. 2785978408cSSascha Wildner */ 2795978408cSSascha Wildner cg = (icg + 2) % fs->fs_ncg; 2805978408cSSascha Wildner for (i = 2; i < fs->fs_ncg; i++) { 2815978408cSSascha Wildner result = (*allocator)(ip, cg, 0, size); 2825978408cSSascha Wildner if (result) 2835978408cSSascha Wildner return (result); 2845978408cSSascha Wildner cg++; 2855978408cSSascha Wildner if (cg == fs->fs_ncg) 2865978408cSSascha Wildner cg = 0; 2875978408cSSascha Wildner } 2885978408cSSascha Wildner return (0); 2895978408cSSascha Wildner } 2905978408cSSascha Wildner 2915978408cSSascha Wildner /* 2925978408cSSascha Wildner * Determine whether a block can be allocated. 2935978408cSSascha Wildner * 2945978408cSSascha Wildner * Check to see if a block of the appropriate size is available, 2955978408cSSascha Wildner * and if it is, allocate it. 2965978408cSSascha Wildner */ 297811c2036SSascha Wildner static makefs_daddr_t 298811c2036SSascha Wildner ffs_alloccg(struct inode *ip, int cg, makefs_daddr_t bpref, int size) 2995978408cSSascha Wildner { 3005978408cSSascha Wildner struct cg *cgp; 3015978408cSSascha Wildner struct buf *bp; 302811c2036SSascha Wildner makefs_daddr_t bno, blkno; 3035978408cSSascha Wildner int error, frags, allocsiz, i; 3045978408cSSascha Wildner struct fs *fs = ip->i_fs; 3055978408cSSascha Wildner const int needswap = UFS_FSNEEDSWAP(fs); 3065978408cSSascha Wildner 3075978408cSSascha Wildner if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 3085978408cSSascha Wildner return (0); 3095978408cSSascha Wildner error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 3105978408cSSascha Wildner NULL, &bp); 3115978408cSSascha Wildner if (error) { 3125978408cSSascha Wildner brelse(bp); 3135978408cSSascha Wildner return (0); 3145978408cSSascha Wildner } 3155978408cSSascha Wildner cgp = (struct cg *)bp->b_data; 3165978408cSSascha Wildner if (!cg_chkmagic_swap(cgp, needswap) || 3175978408cSSascha Wildner (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 3185978408cSSascha Wildner brelse(bp); 3195978408cSSascha Wildner return (0); 3205978408cSSascha Wildner } 3215978408cSSascha Wildner if (size == fs->fs_bsize) { 3225978408cSSascha Wildner bno = ffs_alloccgblk(ip, bp, bpref); 3235978408cSSascha Wildner bdwrite(bp); 3245978408cSSascha Wildner return (bno); 3255978408cSSascha Wildner } 3265978408cSSascha Wildner /* 3275978408cSSascha Wildner * check to see if any fragments are already available 3285978408cSSascha Wildner * allocsiz is the size which will be allocated, hacking 3295978408cSSascha Wildner * it down to a smaller size if necessary 3305978408cSSascha Wildner */ 3315978408cSSascha Wildner frags = numfrags(fs, size); 3325978408cSSascha Wildner for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 3335978408cSSascha Wildner if (cgp->cg_frsum[allocsiz] != 0) 3345978408cSSascha Wildner break; 3355978408cSSascha Wildner if (allocsiz == fs->fs_frag) { 3365978408cSSascha Wildner /* 3375978408cSSascha Wildner * no fragments were available, so a block will be 3385978408cSSascha Wildner * allocated, and hacked up 3395978408cSSascha Wildner */ 3405978408cSSascha Wildner if (cgp->cg_cs.cs_nbfree == 0) { 3415978408cSSascha Wildner brelse(bp); 3425978408cSSascha Wildner return (0); 3435978408cSSascha Wildner } 3445978408cSSascha Wildner bno = ffs_alloccgblk(ip, bp, bpref); 3455978408cSSascha Wildner bpref = dtogd(fs, bno); 3465978408cSSascha Wildner for (i = frags; i < fs->fs_frag; i++) 3475978408cSSascha Wildner setbit(cg_blksfree_swap(cgp, needswap), bpref + i); 3485978408cSSascha Wildner i = fs->fs_frag - frags; 3495978408cSSascha Wildner ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 3505978408cSSascha Wildner fs->fs_cstotal.cs_nffree += i; 3515978408cSSascha Wildner fs->fs_cs(fs, cg).cs_nffree += i; 3525978408cSSascha Wildner fs->fs_fmod = 1; 3535978408cSSascha Wildner ufs_add32(cgp->cg_frsum[i], 1, needswap); 3545978408cSSascha Wildner bdwrite(bp); 3555978408cSSascha Wildner return (bno); 3565978408cSSascha Wildner } 3575978408cSSascha Wildner bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 3585978408cSSascha Wildner for (i = 0; i < frags; i++) 3595978408cSSascha Wildner clrbit(cg_blksfree_swap(cgp, needswap), bno + i); 3605978408cSSascha Wildner ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); 3615978408cSSascha Wildner fs->fs_cstotal.cs_nffree -= frags; 3625978408cSSascha Wildner fs->fs_cs(fs, cg).cs_nffree -= frags; 3635978408cSSascha Wildner fs->fs_fmod = 1; 3645978408cSSascha Wildner ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); 3655978408cSSascha Wildner if (frags != allocsiz) 3665978408cSSascha Wildner ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); 3675978408cSSascha Wildner blkno = cg * fs->fs_fpg + bno; 3685978408cSSascha Wildner bdwrite(bp); 3695978408cSSascha Wildner return blkno; 3705978408cSSascha Wildner } 3715978408cSSascha Wildner 3725978408cSSascha Wildner /* 3735978408cSSascha Wildner * Allocate a block in a cylinder group. 3745978408cSSascha Wildner * 3755978408cSSascha Wildner * This algorithm implements the following policy: 3765978408cSSascha Wildner * 1) allocate the requested block. 3775978408cSSascha Wildner * 2) allocate a rotationally optimal block in the same cylinder. 3785978408cSSascha Wildner * 3) allocate the next available block on the block rotor for the 3795978408cSSascha Wildner * specified cylinder group. 3805978408cSSascha Wildner * Note that this routine only allocates fs_bsize blocks; these 3815978408cSSascha Wildner * blocks may be fragmented by the routine that allocates them. 3825978408cSSascha Wildner */ 383811c2036SSascha Wildner static makefs_daddr_t 384811c2036SSascha Wildner ffs_alloccgblk(struct inode *ip, struct buf *bp, makefs_daddr_t bpref) 3855978408cSSascha Wildner { 3865978408cSSascha Wildner struct cg *cgp; 387811c2036SSascha Wildner makefs_daddr_t blkno; 3885978408cSSascha Wildner int32_t bno; 3895978408cSSascha Wildner struct fs *fs = ip->i_fs; 3905978408cSSascha Wildner const int needswap = UFS_FSNEEDSWAP(fs); 3915978408cSSascha Wildner u_int8_t *blksfree_swap; 3925978408cSSascha Wildner 3935978408cSSascha Wildner cgp = (struct cg *)bp->b_data; 3945978408cSSascha Wildner blksfree_swap = cg_blksfree_swap(cgp, needswap); 3955978408cSSascha Wildner if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { 3965978408cSSascha Wildner bpref = ufs_rw32(cgp->cg_rotor, needswap); 3975978408cSSascha Wildner } else { 3985978408cSSascha Wildner bpref = blknum(fs, bpref); 3995978408cSSascha Wildner bno = dtogd(fs, bpref); 4005978408cSSascha Wildner /* 4015978408cSSascha Wildner * if the requested block is available, use it 4025978408cSSascha Wildner */ 4035978408cSSascha Wildner if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno))) 4045978408cSSascha Wildner goto gotit; 4055978408cSSascha Wildner } 4065978408cSSascha Wildner /* 4075978408cSSascha Wildner * Take the next available one in this cylinder group. 4085978408cSSascha Wildner */ 4095978408cSSascha Wildner bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 4105978408cSSascha Wildner if (bno < 0) 4115978408cSSascha Wildner return (0); 4125978408cSSascha Wildner cgp->cg_rotor = ufs_rw32(bno, needswap); 4135978408cSSascha Wildner gotit: 4145978408cSSascha Wildner blkno = fragstoblks(fs, bno); 4155978408cSSascha Wildner ffs_clrblock(fs, blksfree_swap, (long)blkno); 4165978408cSSascha Wildner ffs_clusteracct(fs, cgp, blkno, -1); 4175978408cSSascha Wildner ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); 4185978408cSSascha Wildner fs->fs_cstotal.cs_nbfree--; 4195978408cSSascha Wildner fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; 420811c2036SSascha Wildner #ifdef __DragonFly__ /* XXX swildner: our fsck checks these */ 421811c2036SSascha Wildner cg_blks(fs, cgp, cbtocylno(fs, bno))[cbtorpos(fs, bno)]--; 422811c2036SSascha Wildner cg_blktot(cgp)[cbtocylno(fs, bno)]--; 423811c2036SSascha Wildner #endif 4245978408cSSascha Wildner fs->fs_fmod = 1; 4255978408cSSascha Wildner blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; 4265978408cSSascha Wildner return (blkno); 4275978408cSSascha Wildner } 4285978408cSSascha Wildner 4295978408cSSascha Wildner /* 4305978408cSSascha Wildner * Free a block or fragment. 4315978408cSSascha Wildner * 4325978408cSSascha Wildner * The specified block or fragment is placed back in the 4335978408cSSascha Wildner * free map. If a fragment is deallocated, a possible 4345978408cSSascha Wildner * block reassembly is checked. 4355978408cSSascha Wildner */ 4365978408cSSascha Wildner void 437811c2036SSascha Wildner ffs_blkfree(struct inode *ip, makefs_daddr_t bno, long size) 4385978408cSSascha Wildner { 4395978408cSSascha Wildner struct cg *cgp; 4405978408cSSascha Wildner struct buf *bp; 4415978408cSSascha Wildner int32_t fragno, cgbno; 4425978408cSSascha Wildner int i, error, cg, blk, frags, bbase; 4435978408cSSascha Wildner struct fs *fs = ip->i_fs; 4445978408cSSascha Wildner const int needswap = UFS_FSNEEDSWAP(fs); 4455978408cSSascha Wildner 4465978408cSSascha Wildner if (size > fs->fs_bsize || fragoff(fs, size) != 0 || 4475978408cSSascha Wildner fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 4485978408cSSascha Wildner errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", 4495978408cSSascha Wildner (long long)bno, fs->fs_bsize, size); 4505978408cSSascha Wildner } 4515978408cSSascha Wildner cg = dtog(fs, bno); 4525978408cSSascha Wildner if (bno >= fs->fs_size) { 4535978408cSSascha Wildner warnx("bad block %lld, ino %ju", (long long)bno, 4545978408cSSascha Wildner (uintmax_t)ip->i_number); 4555978408cSSascha Wildner return; 4565978408cSSascha Wildner } 4575978408cSSascha Wildner error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 4585978408cSSascha Wildner NULL, &bp); 4595978408cSSascha Wildner if (error) { 4605978408cSSascha Wildner brelse(bp); 4615978408cSSascha Wildner return; 4625978408cSSascha Wildner } 4635978408cSSascha Wildner cgp = (struct cg *)bp->b_data; 4645978408cSSascha Wildner if (!cg_chkmagic_swap(cgp, needswap)) { 4655978408cSSascha Wildner brelse(bp); 4665978408cSSascha Wildner return; 4675978408cSSascha Wildner } 4685978408cSSascha Wildner cgbno = dtogd(fs, bno); 4695978408cSSascha Wildner if (size == fs->fs_bsize) { 4705978408cSSascha Wildner fragno = fragstoblks(fs, cgbno); 4715978408cSSascha Wildner if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { 4725978408cSSascha Wildner errx(1, "blkfree: freeing free block %lld", 4735978408cSSascha Wildner (long long)bno); 4745978408cSSascha Wildner } 4755978408cSSascha Wildner ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno); 4765978408cSSascha Wildner ffs_clusteracct(fs, cgp, fragno, 1); 4775978408cSSascha Wildner ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 4785978408cSSascha Wildner fs->fs_cstotal.cs_nbfree++; 4795978408cSSascha Wildner fs->fs_cs(fs, cg).cs_nbfree++; 480811c2036SSascha Wildner #ifdef __DragonFly__ /* XXX swildner: our fsck checks these */ 481811c2036SSascha Wildner cg_blks(fs, cgp, cbtocylno(fs, bno))[cbtorpos(fs, bno)]++; 482811c2036SSascha Wildner cg_blktot(cgp)[cbtocylno(fs, bno)]++; 483811c2036SSascha Wildner #endif 4845978408cSSascha Wildner } else { 4855978408cSSascha Wildner bbase = cgbno - fragnum(fs, cgbno); 4865978408cSSascha Wildner /* 4875978408cSSascha Wildner * decrement the counts associated with the old frags 4885978408cSSascha Wildner */ 4895978408cSSascha Wildner blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); 4905978408cSSascha Wildner ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap); 4915978408cSSascha Wildner /* 4925978408cSSascha Wildner * deallocate the fragment 4935978408cSSascha Wildner */ 4945978408cSSascha Wildner frags = numfrags(fs, size); 4955978408cSSascha Wildner for (i = 0; i < frags; i++) { 4965978408cSSascha Wildner if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) { 4975978408cSSascha Wildner errx(1, "blkfree: freeing free frag: block %lld", 4985978408cSSascha Wildner (long long)(cgbno + i)); 4995978408cSSascha Wildner } 5005978408cSSascha Wildner setbit(cg_blksfree_swap(cgp, needswap), cgbno + i); 5015978408cSSascha Wildner } 5025978408cSSascha Wildner ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 5035978408cSSascha Wildner fs->fs_cstotal.cs_nffree += i; 5045978408cSSascha Wildner fs->fs_cs(fs, cg).cs_nffree += i; 5055978408cSSascha Wildner /* 5065978408cSSascha Wildner * add back in counts associated with the new frags 5075978408cSSascha Wildner */ 5085978408cSSascha Wildner blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); 5095978408cSSascha Wildner ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap); 5105978408cSSascha Wildner /* 5115978408cSSascha Wildner * if a complete block has been reassembled, account for it 5125978408cSSascha Wildner */ 5135978408cSSascha Wildner fragno = fragstoblks(fs, bbase); 5145978408cSSascha Wildner if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { 5155978408cSSascha Wildner ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); 5165978408cSSascha Wildner fs->fs_cstotal.cs_nffree -= fs->fs_frag; 5175978408cSSascha Wildner fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 5185978408cSSascha Wildner ffs_clusteracct(fs, cgp, fragno, 1); 5195978408cSSascha Wildner ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 5205978408cSSascha Wildner fs->fs_cstotal.cs_nbfree++; 5215978408cSSascha Wildner fs->fs_cs(fs, cg).cs_nbfree++; 522811c2036SSascha Wildner #ifdef __DragonFly__ /* XXX swildner: our fsck checks these */ 523811c2036SSascha Wildner cg_blks(fs, cgp, 524811c2036SSascha Wildner cbtocylno(fs, bbase))[cbtorpos(fs, bbase)]++; 525811c2036SSascha Wildner cg_blktot(cgp)[cbtocylno(fs, bbase)]++; 526811c2036SSascha Wildner #endif 5275978408cSSascha Wildner } 5285978408cSSascha Wildner } 5295978408cSSascha Wildner fs->fs_fmod = 1; 5305978408cSSascha Wildner bdwrite(bp); 5315978408cSSascha Wildner } 5325978408cSSascha Wildner 5335978408cSSascha Wildner 5345978408cSSascha Wildner static int 5355978408cSSascha Wildner scanc(u_int size, const u_char *cp, const u_char table[], int mask) 5365978408cSSascha Wildner { 5375978408cSSascha Wildner const u_char *end = &cp[size]; 5385978408cSSascha Wildner 5395978408cSSascha Wildner while (cp < end && (table[*cp] & mask) == 0) 5405978408cSSascha Wildner cp++; 5415978408cSSascha Wildner return (end - cp); 5425978408cSSascha Wildner } 5435978408cSSascha Wildner 5445978408cSSascha Wildner /* 5455978408cSSascha Wildner * Find a block of the specified size in the specified cylinder group. 5465978408cSSascha Wildner * 5475978408cSSascha Wildner * It is a panic if a request is made to find a block if none are 5485978408cSSascha Wildner * available. 5495978408cSSascha Wildner */ 5505978408cSSascha Wildner static int32_t 551811c2036SSascha Wildner ffs_mapsearch(struct fs *fs, struct cg *cgp, makefs_daddr_t bpref, int allocsiz) 5525978408cSSascha Wildner { 5535978408cSSascha Wildner int32_t bno; 5545978408cSSascha Wildner int start, len, loc, i; 5555978408cSSascha Wildner int blk, field, subfield, pos; 5565978408cSSascha Wildner int ostart, olen; 5575978408cSSascha Wildner const int needswap = UFS_FSNEEDSWAP(fs); 5585978408cSSascha Wildner 5595978408cSSascha Wildner /* 5605978408cSSascha Wildner * find the fragment by searching through the free block 5615978408cSSascha Wildner * map for an appropriate bit pattern 5625978408cSSascha Wildner */ 5635978408cSSascha Wildner if (bpref) 5645978408cSSascha Wildner start = dtogd(fs, bpref) / NBBY; 5655978408cSSascha Wildner else 5665978408cSSascha Wildner start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; 5675978408cSSascha Wildner len = howmany(fs->fs_fpg, NBBY) - start; 5685978408cSSascha Wildner ostart = start; 5695978408cSSascha Wildner olen = len; 5705978408cSSascha Wildner loc = scanc((u_int)len, 5715978408cSSascha Wildner (const u_char *)&cg_blksfree_swap(cgp, needswap)[start], 5725978408cSSascha Wildner (const u_char *)fragtbl[fs->fs_frag], 5735978408cSSascha Wildner (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 5745978408cSSascha Wildner if (loc == 0) { 5755978408cSSascha Wildner len = start + 1; 5765978408cSSascha Wildner start = 0; 5775978408cSSascha Wildner loc = scanc((u_int)len, 5785978408cSSascha Wildner (const u_char *)&cg_blksfree_swap(cgp, needswap)[0], 5795978408cSSascha Wildner (const u_char *)fragtbl[fs->fs_frag], 5805978408cSSascha Wildner (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 5815978408cSSascha Wildner if (loc == 0) { 5825978408cSSascha Wildner errx(1, 5835978408cSSascha Wildner "ffs_alloccg: map corrupted: start %d len %d offset %d %ld", 5845978408cSSascha Wildner ostart, olen, 5855978408cSSascha Wildner ufs_rw32(cgp->cg_freeoff, needswap), 5865978408cSSascha Wildner (long)cg_blksfree_swap(cgp, needswap) - (long)cgp); 5875978408cSSascha Wildner /* NOTREACHED */ 5885978408cSSascha Wildner } 5895978408cSSascha Wildner } 5905978408cSSascha Wildner bno = (start + len - loc) * NBBY; 5915978408cSSascha Wildner cgp->cg_frotor = ufs_rw32(bno, needswap); 5925978408cSSascha Wildner /* 5935978408cSSascha Wildner * found the byte in the map 5945978408cSSascha Wildner * sift through the bits to find the selected frag 5955978408cSSascha Wildner */ 5965978408cSSascha Wildner for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 5975978408cSSascha Wildner blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno); 5985978408cSSascha Wildner blk <<= 1; 5995978408cSSascha Wildner field = around[allocsiz]; 6005978408cSSascha Wildner subfield = inside[allocsiz]; 6015978408cSSascha Wildner for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 6025978408cSSascha Wildner if ((blk & field) == subfield) 6035978408cSSascha Wildner return (bno + pos); 6045978408cSSascha Wildner field <<= 1; 6055978408cSSascha Wildner subfield <<= 1; 6065978408cSSascha Wildner } 6075978408cSSascha Wildner } 6085978408cSSascha Wildner errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno); 6095978408cSSascha Wildner return (-1); 6105978408cSSascha Wildner } 6115978408cSSascha Wildner 6125978408cSSascha Wildner /* 6135978408cSSascha Wildner * Update the cluster map because of an allocation or free. 6145978408cSSascha Wildner * 6155978408cSSascha Wildner * Cnt == 1 means free; cnt == -1 means allocating. 6165978408cSSascha Wildner */ 6175978408cSSascha Wildner void 6185978408cSSascha Wildner ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt) 6195978408cSSascha Wildner { 6205978408cSSascha Wildner int32_t *sump; 6215978408cSSascha Wildner int32_t *lp; 6225978408cSSascha Wildner u_char *freemapp, *mapp; 6235978408cSSascha Wildner int i, start, end, forw, back, map, bit; 6245978408cSSascha Wildner const int needswap = UFS_FSNEEDSWAP(fs); 6255978408cSSascha Wildner 6265978408cSSascha Wildner if (fs->fs_contigsumsize <= 0) 6275978408cSSascha Wildner return; 6285978408cSSascha Wildner freemapp = cg_clustersfree_swap(cgp, needswap); 6295978408cSSascha Wildner sump = cg_clustersum_swap(cgp, needswap); 6305978408cSSascha Wildner /* 6315978408cSSascha Wildner * Allocate or clear the actual block. 6325978408cSSascha Wildner */ 6335978408cSSascha Wildner if (cnt > 0) 6345978408cSSascha Wildner setbit(freemapp, blkno); 6355978408cSSascha Wildner else 6365978408cSSascha Wildner clrbit(freemapp, blkno); 6375978408cSSascha Wildner /* 6385978408cSSascha Wildner * Find the size of the cluster going forward. 6395978408cSSascha Wildner */ 6405978408cSSascha Wildner start = blkno + 1; 6415978408cSSascha Wildner end = start + fs->fs_contigsumsize; 6425978408cSSascha Wildner if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap)) 6435978408cSSascha Wildner end = ufs_rw32(cgp->cg_nclusterblks, needswap); 6445978408cSSascha Wildner mapp = &freemapp[start / NBBY]; 6455978408cSSascha Wildner map = *mapp++; 6465978408cSSascha Wildner bit = 1 << (start % NBBY); 6475978408cSSascha Wildner for (i = start; i < end; i++) { 6485978408cSSascha Wildner if ((map & bit) == 0) 6495978408cSSascha Wildner break; 6505978408cSSascha Wildner if ((i & (NBBY - 1)) != (NBBY - 1)) { 6515978408cSSascha Wildner bit <<= 1; 6525978408cSSascha Wildner } else { 6535978408cSSascha Wildner map = *mapp++; 6545978408cSSascha Wildner bit = 1; 6555978408cSSascha Wildner } 6565978408cSSascha Wildner } 6575978408cSSascha Wildner forw = i - start; 6585978408cSSascha Wildner /* 6595978408cSSascha Wildner * Find the size of the cluster going backward. 6605978408cSSascha Wildner */ 6615978408cSSascha Wildner start = blkno - 1; 6625978408cSSascha Wildner end = start - fs->fs_contigsumsize; 6635978408cSSascha Wildner if (end < 0) 6645978408cSSascha Wildner end = -1; 6655978408cSSascha Wildner mapp = &freemapp[start / NBBY]; 6665978408cSSascha Wildner map = *mapp--; 6675978408cSSascha Wildner bit = 1 << (start % NBBY); 6685978408cSSascha Wildner for (i = start; i > end; i--) { 6695978408cSSascha Wildner if ((map & bit) == 0) 6705978408cSSascha Wildner break; 6715978408cSSascha Wildner if ((i & (NBBY - 1)) != 0) { 6725978408cSSascha Wildner bit >>= 1; 6735978408cSSascha Wildner } else { 6745978408cSSascha Wildner map = *mapp--; 6755978408cSSascha Wildner bit = 1 << (NBBY - 1); 6765978408cSSascha Wildner } 6775978408cSSascha Wildner } 6785978408cSSascha Wildner back = start - i; 6795978408cSSascha Wildner /* 6805978408cSSascha Wildner * Account for old cluster and the possibly new forward and 6815978408cSSascha Wildner * back clusters. 6825978408cSSascha Wildner */ 6835978408cSSascha Wildner i = back + forw + 1; 6845978408cSSascha Wildner if (i > fs->fs_contigsumsize) 6855978408cSSascha Wildner i = fs->fs_contigsumsize; 6865978408cSSascha Wildner ufs_add32(sump[i], cnt, needswap); 6875978408cSSascha Wildner if (back > 0) 6885978408cSSascha Wildner ufs_add32(sump[back], -cnt, needswap); 6895978408cSSascha Wildner if (forw > 0) 6905978408cSSascha Wildner ufs_add32(sump[forw], -cnt, needswap); 6915978408cSSascha Wildner 6925978408cSSascha Wildner /* 6935978408cSSascha Wildner * Update cluster summary information. 6945978408cSSascha Wildner */ 6955978408cSSascha Wildner lp = &sump[fs->fs_contigsumsize]; 6965978408cSSascha Wildner for (i = fs->fs_contigsumsize; i > 0; i--) 6975978408cSSascha Wildner if (ufs_rw32(*lp--, needswap) > 0) 6985978408cSSascha Wildner break; 6995978408cSSascha Wildner fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i; 7005978408cSSascha Wildner } 701