1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 34 * $FreeBSD: src/sys/ufs/ffs/ffs_inode.c,v 1.56.2.5 2002/02/05 18:35:03 dillon Exp $ 35 * $DragonFly: src/sys/vfs/ufs/ffs_inode.c,v 1.12 2004/08/24 14:01:57 drhodus Exp $ 36 */ 37 38 #include "opt_quota.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mount.h> 43 #include <sys/proc.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vmmeter.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 54 #include "quota.h" 55 #include "ufsmount.h" 56 #include "inode.h" 57 #include "ufs_extern.h" 58 59 #include "fs.h" 60 #include "ffs_extern.h" 61 62 #include <vm/vm_page2.h> 63 64 static int ffs_indirtrunc (struct inode *, ufs_daddr_t, ufs_daddr_t, 65 ufs_daddr_t, int, long *); 66 67 /* 68 * Update the access, modified, and inode change times as specified by the 69 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 70 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 71 * the timestamp update). The IN_LAZYMOD flag is set to force a write 72 * later if not now. If we write now, then clear both IN_MODIFIED and 73 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 74 * set, then wait for the write to complete. 75 */ 76 int 77 ffs_update(struct vnode *vp, int waitfor) 78 { 79 struct fs *fs; 80 struct buf *bp; 81 struct inode *ip; 82 int error; 83 84 ufs_itimes(vp); 85 ip = VTOI(vp); 86 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 87 return (0); 88 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 89 fs = ip->i_fs; 90 if (fs->fs_ronly) 91 return (0); 92 /* 93 * Ensure that uid and gid are correct. This is a temporary 94 * fix until fsck has been changed to do the update. 95 */ 96 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 97 ip->i_din.di_ouid = ip->i_uid; /* XXX */ 98 ip->i_din.di_ogid = ip->i_gid; /* XXX */ 99 } /* XXX */ 100 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 101 (int)fs->fs_bsize, &bp); 102 if (error) { 103 brelse(bp); 104 return (error); 105 } 106 if (DOINGSOFTDEP(vp)) 107 softdep_update_inodeblock(ip, bp, waitfor); 108 else if (ip->i_effnlink != ip->i_nlink) 109 panic("ffs_update: bad link cnt"); 110 *((struct dinode *)bp->b_data + 111 ino_to_fsbo(fs, ip->i_number)) = ip->i_din; 112 if (waitfor && !DOINGASYNC(vp)) { 113 return (bwrite(bp)); 114 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 115 return (bwrite(bp)); 116 } else { 117 if (bp->b_bufsize == fs->fs_bsize) 118 bp->b_flags |= B_CLUSTEROK; 119 bdwrite(bp); 120 return (0); 121 } 122 } 123 124 #define SINGLE 0 /* index of single indirect block */ 125 #define DOUBLE 1 /* index of double indirect block */ 126 #define TRIPLE 2 /* index of triple indirect block */ 127 /* 128 * Truncate the inode oip to at most length size, freeing the 129 * disk blocks. 130 */ 131 int 132 ffs_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, 133 struct thread *td) 134 { 135 struct vnode *ovp = vp; 136 ufs_daddr_t lastblock; 137 struct inode *oip; 138 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; 139 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 140 struct fs *fs; 141 struct buf *bp; 142 int offset, size, level; 143 long count, nblocks, blocksreleased = 0; 144 int i; 145 int aflags, error, allerror; 146 off_t osize; 147 148 oip = VTOI(ovp); 149 fs = oip->i_fs; 150 if (length < 0) 151 return (EINVAL); 152 if (length > fs->fs_maxfilesize) 153 return (EFBIG); 154 if (ovp->v_type == VLNK && 155 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { 156 #ifdef DIAGNOSTIC 157 if (length != 0) 158 panic("ffs_truncate: partial truncate of symlink"); 159 #endif /* DIAGNOSTIC */ 160 bzero((char *)&oip->i_shortlink, (uint)oip->i_size); 161 oip->i_size = 0; 162 oip->i_flag |= IN_CHANGE | IN_UPDATE; 163 return (UFS_UPDATE(ovp, 1)); 164 } 165 if (oip->i_size == length) { 166 oip->i_flag |= IN_CHANGE | IN_UPDATE; 167 return (UFS_UPDATE(ovp, 0)); 168 } 169 if (fs->fs_ronly) 170 panic("ffs_truncate: read-only filesystem"); 171 #ifdef QUOTA 172 error = getinoquota(oip); 173 if (error) 174 return (error); 175 #endif 176 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 177 if (DOINGSOFTDEP(ovp)) { 178 if (length > 0 || softdep_slowdown(ovp)) { 179 /* 180 * If a file is only partially truncated, then 181 * we have to clean up the data structures 182 * describing the allocation past the truncation 183 * point. Finding and deallocating those structures 184 * is a lot of work. Since partial truncation occurs 185 * rarely, we solve the problem by syncing the file 186 * so that it will have no data structures left. 187 */ 188 if ((error = VOP_FSYNC(ovp, MNT_WAIT, td)) != 0) 189 return (error); 190 } else { 191 #ifdef QUOTA 192 (void) chkdq(oip, -oip->i_blocks, NOCRED, 0); 193 #endif 194 softdep_setup_freeblocks(oip, length); 195 vinvalbuf(ovp, 0, td, 0, 0); 196 oip->i_flag |= IN_CHANGE | IN_UPDATE; 197 return (ffs_update(ovp, 0)); 198 } 199 } 200 osize = oip->i_size; 201 /* 202 * Lengthen the size of the file. We must ensure that the 203 * last byte of the file is allocated. Since the smallest 204 * value of osize is 0, length will be at least 1. 205 */ 206 if (osize < length) { 207 vnode_pager_setsize(ovp, length); 208 aflags = B_CLRBUF; 209 if (flags & IO_SYNC) 210 aflags |= B_SYNC; 211 error = VOP_BALLOC(ovp, length - 1, 1, 212 cred, aflags, &bp); 213 if (error) 214 return (error); 215 oip->i_size = length; 216 if (bp->b_bufsize == fs->fs_bsize) 217 bp->b_flags |= B_CLUSTEROK; 218 if (aflags & B_SYNC) 219 bwrite(bp); 220 else 221 bawrite(bp); 222 oip->i_flag |= IN_CHANGE | IN_UPDATE; 223 return (UFS_UPDATE(ovp, 1)); 224 } 225 /* 226 * Shorten the size of the file. If the file is not being 227 * truncated to a block boundary, the contents of the 228 * partial block following the end of the file must be 229 * zero'ed in case it ever becomes accessible again because 230 * of subsequent file growth. Directories however are not 231 * zero'ed as they should grow back initialized to empty. 232 */ 233 offset = blkoff(fs, length); 234 if (offset == 0) { 235 oip->i_size = length; 236 } else { 237 lbn = lblkno(fs, length); 238 aflags = B_CLRBUF; 239 if (flags & IO_SYNC) 240 aflags |= B_SYNC; 241 error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 242 if (error) { 243 return (error); 244 } 245 /* 246 * When we are doing soft updates and the UFS_BALLOC 247 * above fills in a direct block hole with a full sized 248 * block that will be truncated down to a fragment below, 249 * we must flush out the block dependency with an FSYNC 250 * so that we do not get a soft updates inconsistency 251 * when we create the fragment below. 252 */ 253 if (DOINGSOFTDEP(ovp) && lbn < NDADDR && 254 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 255 (error = VOP_FSYNC(ovp, MNT_WAIT, td)) != 0) { 256 return (error); 257 } 258 oip->i_size = length; 259 size = blksize(fs, oip, lbn); 260 if (ovp->v_type != VDIR) 261 bzero((char *)bp->b_data + offset, 262 (uint)(size - offset)); 263 /* Kirk's code has reallocbuf(bp, size, 1) here */ 264 allocbuf(bp, size); 265 if (bp->b_bufsize == fs->fs_bsize) 266 bp->b_flags |= B_CLUSTEROK; 267 if (aflags & B_SYNC) 268 bwrite(bp); 269 else 270 bawrite(bp); 271 } 272 /* 273 * Calculate index into inode's block list of 274 * last direct and indirect blocks (if any) 275 * which we want to keep. Lastblock is -1 when 276 * the file is truncated to 0. 277 */ 278 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 279 lastiblock[SINGLE] = lastblock - NDADDR; 280 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 281 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 282 nblocks = btodb(fs->fs_bsize); 283 284 /* 285 * Update file and block pointers on disk before we start freeing 286 * blocks. If we crash before free'ing blocks below, the blocks 287 * will be returned to the free list. lastiblock values are also 288 * normalized to -1 for calls to ffs_indirtrunc below. 289 */ 290 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); 291 for (level = TRIPLE; level >= SINGLE; level--) 292 if (lastiblock[level] < 0) { 293 oip->i_ib[level] = 0; 294 lastiblock[level] = -1; 295 } 296 for (i = NDADDR - 1; i > lastblock; i--) 297 oip->i_db[i] = 0; 298 oip->i_flag |= IN_CHANGE | IN_UPDATE; 299 allerror = UFS_UPDATE(ovp, 1); 300 301 /* 302 * Having written the new inode to disk, save its new configuration 303 * and put back the old block pointers long enough to process them. 304 * Note that we save the new block configuration so we can check it 305 * when we are done. 306 */ 307 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); 308 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); 309 oip->i_size = osize; 310 311 error = vtruncbuf(ovp, td, length, fs->fs_bsize); 312 if (error && (allerror == 0)) 313 allerror = error; 314 315 /* 316 * Indirect blocks first. 317 */ 318 indir_lbn[SINGLE] = -NDADDR; 319 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 320 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 321 for (level = TRIPLE; level >= SINGLE; level--) { 322 bn = oip->i_ib[level]; 323 if (bn != 0) { 324 error = ffs_indirtrunc(oip, indir_lbn[level], 325 fsbtodb(fs, bn), lastiblock[level], level, &count); 326 if (error) 327 allerror = error; 328 blocksreleased += count; 329 if (lastiblock[level] < 0) { 330 oip->i_ib[level] = 0; 331 ffs_blkfree(oip, bn, fs->fs_bsize); 332 blocksreleased += nblocks; 333 } 334 } 335 if (lastiblock[level] >= 0) 336 goto done; 337 } 338 339 /* 340 * All whole direct blocks or frags. 341 */ 342 for (i = NDADDR - 1; i > lastblock; i--) { 343 long bsize; 344 345 bn = oip->i_db[i]; 346 if (bn == 0) 347 continue; 348 oip->i_db[i] = 0; 349 bsize = blksize(fs, oip, i); 350 ffs_blkfree(oip, bn, bsize); 351 blocksreleased += btodb(bsize); 352 } 353 if (lastblock < 0) 354 goto done; 355 356 /* 357 * Finally, look for a change in size of the 358 * last direct block; release any frags. 359 */ 360 bn = oip->i_db[lastblock]; 361 if (bn != 0) { 362 long oldspace, newspace; 363 364 /* 365 * Calculate amount of space we're giving 366 * back as old block size minus new block size. 367 */ 368 oldspace = blksize(fs, oip, lastblock); 369 oip->i_size = length; 370 newspace = blksize(fs, oip, lastblock); 371 if (newspace == 0) 372 panic("ffs_truncate: newspace"); 373 if (oldspace - newspace > 0) { 374 /* 375 * Block number of space to be free'd is 376 * the old block # plus the number of frags 377 * required for the storage we're keeping. 378 */ 379 bn += numfrags(fs, newspace); 380 ffs_blkfree(oip, bn, oldspace - newspace); 381 blocksreleased += btodb(oldspace - newspace); 382 } 383 } 384 done: 385 #ifdef DIAGNOSTIC 386 for (level = SINGLE; level <= TRIPLE; level++) 387 if (newblks[NDADDR + level] != oip->i_ib[level]) 388 panic("ffs_truncate1"); 389 for (i = 0; i < NDADDR; i++) 390 if (newblks[i] != oip->i_db[i]) 391 panic("ffs_truncate2"); 392 if (length == 0 && 393 (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) || 394 !TAILQ_EMPTY(&ovp->v_cleanblkhd))) 395 panic("ffs_truncate3"); 396 #endif /* DIAGNOSTIC */ 397 /* 398 * Put back the real size. 399 */ 400 oip->i_size = length; 401 oip->i_blocks -= blocksreleased; 402 403 if (oip->i_blocks < 0) /* sanity */ 404 oip->i_blocks = 0; 405 oip->i_flag |= IN_CHANGE; 406 #ifdef QUOTA 407 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 408 #endif 409 return (allerror); 410 } 411 412 /* 413 * Release blocks associated with the inode ip and stored in the indirect 414 * block bn. Blocks are free'd in LIFO order up to (but not including) 415 * lastbn. If level is greater than SINGLE, the block is an indirect block 416 * and recursive calls to indirtrunc must be used to cleanse other indirect 417 * blocks. 418 * 419 * NB: triple indirect blocks are untested. 420 */ 421 static int 422 ffs_indirtrunc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t dbn, 423 ufs_daddr_t lastbn, int level, long *countp) 424 { 425 int i; 426 struct buf *bp; 427 struct fs *fs = ip->i_fs; 428 ufs_daddr_t *bap; 429 struct vnode *vp; 430 ufs_daddr_t *copy = NULL, nb, nlbn, last; 431 long blkcount, factor; 432 int nblocks, blocksreleased = 0; 433 int error = 0, allerror = 0; 434 435 /* 436 * Calculate index in current block of last 437 * block to be kept. -1 indicates the entire 438 * block so we need not calculate the index. 439 */ 440 factor = 1; 441 for (i = SINGLE; i < level; i++) 442 factor *= NINDIR(fs); 443 last = lastbn; 444 if (lastbn > 0) 445 last /= factor; 446 nblocks = btodb(fs->fs_bsize); 447 /* 448 * Get buffer of block pointers, zero those entries corresponding 449 * to blocks to be free'd, and update on disk copy first. Since 450 * double(triple) indirect before single(double) indirect, calls 451 * to bmap on these blocks will fail. However, we already have 452 * the on disk address, so we have to set the b_blkno field 453 * explicitly instead of letting bread do everything for us. 454 */ 455 vp = ITOV(ip); 456 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 457 if ((bp->b_flags & B_CACHE) == 0) { 458 bp->b_flags |= B_READ; 459 bp->b_flags &= ~(B_ERROR|B_INVAL); 460 if (bp->b_bcount > bp->b_bufsize) 461 panic("ffs_indirtrunc: bad buffer size"); 462 bp->b_blkno = dbn; 463 vfs_busy_pages(bp, 0); 464 VOP_STRATEGY(bp->b_vp, bp); 465 error = biowait(bp); 466 } 467 if (error) { 468 brelse(bp); 469 *countp = 0; 470 return (error); 471 } 472 473 bap = (ufs_daddr_t *)bp->b_data; 474 if (lastbn != -1) { 475 MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK); 476 bcopy((caddr_t)bap, (caddr_t)copy, (uint)fs->fs_bsize); 477 bzero((caddr_t)&bap[last + 1], 478 (uint)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); 479 if (DOINGASYNC(vp)) { 480 bawrite(bp); 481 } else { 482 error = bwrite(bp); 483 if (error) 484 allerror = error; 485 } 486 bap = copy; 487 } 488 489 /* 490 * Recursively free totally unused blocks. 491 */ 492 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 493 i--, nlbn += factor) { 494 nb = bap[i]; 495 if (nb == 0) 496 continue; 497 if (level > SINGLE) { 498 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 499 (ufs_daddr_t)-1, level - 1, &blkcount)) != 0) 500 allerror = error; 501 blocksreleased += blkcount; 502 } 503 ffs_blkfree(ip, nb, fs->fs_bsize); 504 blocksreleased += nblocks; 505 } 506 507 /* 508 * Recursively free last partial block. 509 */ 510 if (level > SINGLE && lastbn >= 0) { 511 last = lastbn % factor; 512 nb = bap[i]; 513 if (nb != 0) { 514 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 515 last, level - 1, &blkcount); 516 if (error) 517 allerror = error; 518 blocksreleased += blkcount; 519 } 520 } 521 if (copy != NULL) { 522 FREE(copy, M_TEMP); 523 } else { 524 bp->b_flags |= B_INVAL | B_NOCACHE; 525 brelse(bp); 526 } 527 528 *countp = blocksreleased; 529 return (allerror); 530 } 531