1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_balloc.c 8.7 (Berkeley) 05/17/95 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/buf.h> 13 #include <sys/proc.h> 14 #include <sys/file.h> 15 #include <sys/vnode.h> 16 17 #include <vm/vm.h> 18 19 #include <ufs/ufs/quota.h> 20 #include <ufs/ufs/inode.h> 21 #include <ufs/ufs/ufs_extern.h> 22 23 #include <ufs/ffs/fs.h> 24 #include <ufs/ffs/ffs_extern.h> 25 26 /* 27 * Balloc defines the structure of file system storage 28 * by allocating the physical blocks on a device given 29 * the inode and the logical block number in a file. 30 */ 31 ffs_balloc(ip, lbn, size, cred, bpp, flags) 32 register struct inode *ip; 33 register ufs_daddr_t lbn; 34 int size; 35 struct ucred *cred; 36 struct buf **bpp; 37 int flags; 38 { 39 register struct fs *fs; 40 register ufs_daddr_t nb; 41 struct buf *bp, *nbp; 42 struct vnode *vp = ITOV(ip); 43 struct indir indirs[NIADDR + 2]; 44 ufs_daddr_t newb, *bap, pref; 45 int deallocated, osize, nsize, num, i, error; 46 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR]; 47 48 *bpp = NULL; 49 if (lbn < 0) 50 return (EFBIG); 51 fs = ip->i_fs; 52 53 /* 54 * If the next write will extend the file into a new block, 55 * and the file is currently composed of a fragment 56 * this fragment has to be extended to be a full block. 57 */ 58 nb = lblkno(fs, ip->i_size); 59 if (nb < NDADDR && nb < lbn) { 60 osize = blksize(fs, ip, nb); 61 if (osize < fs->fs_bsize && osize > 0) { 62 error = ffs_realloccg(ip, nb, 63 ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]), 64 osize, (int)fs->fs_bsize, cred, &bp); 65 if (error) 66 return (error); 67 ip->i_size = (nb + 1) * fs->fs_bsize; 68 vnode_pager_setsize(vp, (u_long)ip->i_size); 69 ip->i_db[nb] = dbtofsb(fs, bp->b_blkno); 70 ip->i_flag |= IN_CHANGE | IN_UPDATE; 71 if (flags & B_SYNC) 72 bwrite(bp); 73 else 74 bawrite(bp); 75 } 76 } 77 /* 78 * The first NDADDR blocks are direct blocks 79 */ 80 if (lbn < NDADDR) { 81 nb = ip->i_db[lbn]; 82 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) { 83 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp); 84 if (error) { 85 brelse(bp); 86 return (error); 87 } 88 *bpp = bp; 89 return (0); 90 } 91 if (nb != 0) { 92 /* 93 * Consider need to reallocate a fragment. 94 */ 95 osize = fragroundup(fs, blkoff(fs, ip->i_size)); 96 nsize = fragroundup(fs, size); 97 if (nsize <= osize) { 98 error = bread(vp, lbn, osize, NOCRED, &bp); 99 if (error) { 100 brelse(bp); 101 return (error); 102 } 103 } else { 104 error = ffs_realloccg(ip, lbn, 105 ffs_blkpref(ip, lbn, (int)lbn, 106 &ip->i_db[0]), osize, nsize, cred, &bp); 107 if (error) 108 return (error); 109 } 110 } else { 111 if (ip->i_size < (lbn + 1) * fs->fs_bsize) 112 nsize = fragroundup(fs, size); 113 else 114 nsize = fs->fs_bsize; 115 error = ffs_alloc(ip, lbn, 116 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]), 117 nsize, cred, &newb); 118 if (error) 119 return (error); 120 bp = getblk(vp, lbn, nsize, 0, 0); 121 bp->b_blkno = fsbtodb(fs, newb); 122 if (flags & B_CLRBUF) 123 clrbuf(bp); 124 } 125 ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno); 126 ip->i_flag |= IN_CHANGE | IN_UPDATE; 127 *bpp = bp; 128 return (0); 129 } 130 /* 131 * Determine the number of levels of indirection. 132 */ 133 pref = 0; 134 if (error = ufs_getlbns(vp, lbn, indirs, &num)) 135 return(error); 136 #ifdef DIAGNOSTIC 137 if (num < 1) 138 panic ("ffs_balloc: ufs_bmaparray returned indirect block\n"); 139 #endif 140 /* 141 * Fetch the first indirect block allocating if necessary. 142 */ 143 --num; 144 nb = ip->i_ib[indirs[0].in_off]; 145 allocib = 0; 146 allocblk = allociblk; 147 if (nb == 0) { 148 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); 149 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, 150 cred, &newb)) 151 return (error); 152 nb = newb; 153 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0); 154 bp->b_blkno = fsbtodb(fs, newb); 155 clrbuf(bp); 156 /* 157 * Write synchronously so that indirect blocks 158 * never point at garbage. 159 */ 160 if (error = bwrite(bp)) { 161 ffs_blkfree(ip, nb, fs->fs_bsize); 162 return (error); 163 } 164 allocib = &ip->i_ib[indirs[0].in_off]; 165 ip->i_ib[indirs[0].in_off] = newb; 166 ip->i_flag |= IN_CHANGE | IN_UPDATE; 167 } 168 /* 169 * Fetch through the indirect blocks, allocating as necessary. 170 */ 171 for (i = 1;;) { 172 error = bread(vp, 173 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp); 174 if (error) { 175 brelse(bp); 176 goto fail; 177 } 178 bap = (ufs_daddr_t *)bp->b_data; 179 nb = bap[indirs[i].in_off]; 180 if (i == num) 181 break; 182 i += 1; 183 if (nb != 0) { 184 brelse(bp); 185 continue; 186 } 187 if (pref == 0) 188 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); 189 if (error = 190 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) { 191 brelse(bp); 192 goto fail; 193 } 194 *allocblk++ = newb; 195 nb = newb; 196 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0); 197 nbp->b_blkno = fsbtodb(fs, nb); 198 clrbuf(nbp); 199 /* 200 * Write synchronously so that indirect blocks 201 * never point at garbage. 202 */ 203 if (error = bwrite(nbp)) { 204 brelse(bp); 205 goto fail; 206 } 207 bap[indirs[i - 1].in_off] = nb; 208 /* 209 * If required, write synchronously, otherwise use 210 * delayed write. 211 */ 212 if (flags & B_SYNC) { 213 bwrite(bp); 214 } else { 215 bdwrite(bp); 216 } 217 } 218 /* 219 * Get the data block, allocating if necessary. 220 */ 221 if (nb == 0) { 222 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]); 223 if (error = ffs_alloc(ip, 224 lbn, pref, (int)fs->fs_bsize, cred, &newb)) { 225 brelse(bp); 226 goto fail; 227 } 228 *allocblk++ = newb; 229 nb = newb; 230 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0); 231 nbp->b_blkno = fsbtodb(fs, nb); 232 if (flags & B_CLRBUF) 233 clrbuf(nbp); 234 bap[indirs[i].in_off] = nb; 235 /* 236 * If required, write synchronously, otherwise use 237 * delayed write. 238 */ 239 if (flags & B_SYNC) { 240 bwrite(bp); 241 } else { 242 bdwrite(bp); 243 } 244 *bpp = nbp; 245 return (0); 246 } 247 brelse(bp); 248 if (flags & B_CLRBUF) { 249 error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp); 250 if (error) { 251 brelse(nbp); 252 goto fail; 253 } 254 } else { 255 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0); 256 nbp->b_blkno = fsbtodb(fs, nb); 257 } 258 *bpp = nbp; 259 return (0); 260 fail: 261 /* 262 * If we have failed part way through block allocation, we 263 * have to deallocate any indirect blocks that we have allocated. 264 */ 265 if (allocib == 0) { 266 deallocated = 0; 267 } else { 268 ffs_blkfree(ip, *allocib, fs->fs_bsize); 269 *allocib = 0; 270 deallocated = fs->fs_bsize; 271 ip->i_flag |= IN_CHANGE | IN_UPDATE; 272 } 273 for (blkp = allociblk; blkp < allocblk; blkp++) { 274 ffs_blkfree(ip, *blkp, fs->fs_bsize); 275 deallocated += fs->fs_bsize; 276 } 277 if (deallocated) { 278 #ifdef QUOTA 279 /* 280 * Restore user's disk quota because allocation failed. 281 */ 282 (void) chkdq(ip, (long)-btodb(deallocated), cred, FORCE); 283 #endif 284 ip->i_blocks -= btodb(deallocated); 285 ip->i_flag |= IN_CHANGE | IN_UPDATE; 286 } 287 return (error); 288 } 289