1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 34 * $FreeBSD: src/sys/ufs/ffs/ffs_inode.c,v 1.56.2.5 2002/02/05 18:35:03 dillon Exp $ 35 * $DragonFly: src/sys/vfs/ufs/ffs_inode.c,v 1.2 2003/06/17 04:28:59 dillon Exp $ 36 */ 37 38 #include "opt_quota.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mount.h> 43 #include <sys/proc.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vmmeter.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 54 #include <ufs/ufs/quota.h> 55 #include <ufs/ufs/ufsmount.h> 56 #include <ufs/ufs/inode.h> 57 #include <ufs/ufs/ufs_extern.h> 58 59 #include <ufs/ffs/fs.h> 60 #include <ufs/ffs/ffs_extern.h> 61 62 static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t, 63 ufs_daddr_t, int, long *)); 64 65 /* 66 * Update the access, modified, and inode change times as specified by the 67 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 68 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 69 * the timestamp update). The IN_LAZYMOD flag is set to force a write 70 * later if not now. If we write now, then clear both IN_MODIFIED and 71 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 72 * set, then wait for the write to complete. 73 */ 74 int 75 ffs_update(vp, waitfor) 76 struct vnode *vp; 77 int waitfor; 78 { 79 register struct fs *fs; 80 struct buf *bp; 81 struct inode *ip; 82 int error; 83 84 ufs_itimes(vp); 85 ip = VTOI(vp); 86 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 87 return (0); 88 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 89 fs = ip->i_fs; 90 if (fs->fs_ronly) 91 return (0); 92 /* 93 * Ensure that uid and gid are correct. This is a temporary 94 * fix until fsck has been changed to do the update. 95 */ 96 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 97 ip->i_din.di_ouid = ip->i_uid; /* XXX */ 98 ip->i_din.di_ogid = ip->i_gid; /* XXX */ 99 } /* XXX */ 100 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 101 (int)fs->fs_bsize, NOCRED, &bp); 102 if (error) { 103 brelse(bp); 104 return (error); 105 } 106 if (DOINGSOFTDEP(vp)) 107 softdep_update_inodeblock(ip, bp, waitfor); 108 else if (ip->i_effnlink != ip->i_nlink) 109 panic("ffs_update: bad link cnt"); 110 *((struct dinode *)bp->b_data + 111 ino_to_fsbo(fs, ip->i_number)) = ip->i_din; 112 if (waitfor && !DOINGASYNC(vp)) { 113 return (bwrite(bp)); 114 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 115 return (bwrite(bp)); 116 } else { 117 if (bp->b_bufsize == fs->fs_bsize) 118 bp->b_flags |= B_CLUSTEROK; 119 bdwrite(bp); 120 return (0); 121 } 122 } 123 124 #define SINGLE 0 /* index of single indirect block */ 125 #define DOUBLE 1 /* index of double indirect block */ 126 #define TRIPLE 2 /* index of triple indirect block */ 127 /* 128 * Truncate the inode oip to at most length size, freeing the 129 * disk blocks. 130 */ 131 int 132 ffs_truncate(vp, length, flags, cred, p) 133 struct vnode *vp; 134 off_t length; 135 int flags; 136 struct ucred *cred; 137 struct proc *p; 138 { 139 register struct vnode *ovp = vp; 140 ufs_daddr_t lastblock; 141 register struct inode *oip; 142 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; 143 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 144 register struct fs *fs; 145 struct buf *bp; 146 int offset, size, level; 147 long count, nblocks, blocksreleased = 0; 148 register int i; 149 int aflags, error, allerror; 150 off_t osize; 151 152 oip = VTOI(ovp); 153 fs = oip->i_fs; 154 if (length < 0) 155 return (EINVAL); 156 if (length > fs->fs_maxfilesize) 157 return (EFBIG); 158 if (ovp->v_type == VLNK && 159 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { 160 #ifdef DIAGNOSTIC 161 if (length != 0) 162 panic("ffs_truncate: partial truncate of symlink"); 163 #endif 164 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); 165 oip->i_size = 0; 166 oip->i_flag |= IN_CHANGE | IN_UPDATE; 167 return (UFS_UPDATE(ovp, 1)); 168 } 169 if (oip->i_size == length) { 170 oip->i_flag |= IN_CHANGE | IN_UPDATE; 171 return (UFS_UPDATE(ovp, 0)); 172 } 173 if (fs->fs_ronly) 174 panic("ffs_truncate: read-only filesystem"); 175 #ifdef QUOTA 176 error = getinoquota(oip); 177 if (error) 178 return (error); 179 #endif 180 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 181 if (DOINGSOFTDEP(ovp)) { 182 if (length > 0 || softdep_slowdown(ovp)) { 183 /* 184 * If a file is only partially truncated, then 185 * we have to clean up the data structures 186 * describing the allocation past the truncation 187 * point. Finding and deallocating those structures 188 * is a lot of work. Since partial truncation occurs 189 * rarely, we solve the problem by syncing the file 190 * so that it will have no data structures left. 191 */ 192 if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT, 193 p)) != 0) 194 return (error); 195 } else { 196 #ifdef QUOTA 197 (void) chkdq(oip, -oip->i_blocks, NOCRED, 0); 198 #endif 199 softdep_setup_freeblocks(oip, length); 200 vinvalbuf(ovp, 0, cred, p, 0, 0); 201 oip->i_flag |= IN_CHANGE | IN_UPDATE; 202 return (ffs_update(ovp, 0)); 203 } 204 } 205 osize = oip->i_size; 206 /* 207 * Lengthen the size of the file. We must ensure that the 208 * last byte of the file is allocated. Since the smallest 209 * value of osize is 0, length will be at least 1. 210 */ 211 if (osize < length) { 212 vnode_pager_setsize(ovp, length); 213 aflags = B_CLRBUF; 214 if (flags & IO_SYNC) 215 aflags |= B_SYNC; 216 error = VOP_BALLOC(ovp, length - 1, 1, 217 cred, aflags, &bp); 218 if (error) 219 return (error); 220 oip->i_size = length; 221 if (bp->b_bufsize == fs->fs_bsize) 222 bp->b_flags |= B_CLUSTEROK; 223 if (aflags & B_SYNC) 224 bwrite(bp); 225 else 226 bawrite(bp); 227 oip->i_flag |= IN_CHANGE | IN_UPDATE; 228 return (UFS_UPDATE(ovp, 1)); 229 } 230 /* 231 * Shorten the size of the file. If the file is not being 232 * truncated to a block boundary, the contents of the 233 * partial block following the end of the file must be 234 * zero'ed in case it ever becomes accessible again because 235 * of subsequent file growth. Directories however are not 236 * zero'ed as they should grow back initialized to empty. 237 */ 238 offset = blkoff(fs, length); 239 if (offset == 0) { 240 oip->i_size = length; 241 } else { 242 lbn = lblkno(fs, length); 243 aflags = B_CLRBUF; 244 if (flags & IO_SYNC) 245 aflags |= B_SYNC; 246 error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 247 if (error) { 248 return (error); 249 } 250 /* 251 * When we are doing soft updates and the UFS_BALLOC 252 * above fills in a direct block hole with a full sized 253 * block that will be truncated down to a fragment below, 254 * we must flush out the block dependency with an FSYNC 255 * so that we do not get a soft updates inconsistency 256 * when we create the fragment below. 257 */ 258 if (DOINGSOFTDEP(ovp) && lbn < NDADDR && 259 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && 260 (error = VOP_FSYNC(ovp, cred, MNT_WAIT, p)) != 0) { 261 return (error); 262 } 263 oip->i_size = length; 264 size = blksize(fs, oip, lbn); 265 if (ovp->v_type != VDIR) 266 bzero((char *)bp->b_data + offset, 267 (u_int)(size - offset)); 268 /* Kirk's code has reallocbuf(bp, size, 1) here */ 269 allocbuf(bp, size); 270 if (bp->b_bufsize == fs->fs_bsize) 271 bp->b_flags |= B_CLUSTEROK; 272 if (aflags & B_SYNC) 273 bwrite(bp); 274 else 275 bawrite(bp); 276 } 277 /* 278 * Calculate index into inode's block list of 279 * last direct and indirect blocks (if any) 280 * which we want to keep. Lastblock is -1 when 281 * the file is truncated to 0. 282 */ 283 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 284 lastiblock[SINGLE] = lastblock - NDADDR; 285 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 286 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 287 nblocks = btodb(fs->fs_bsize); 288 /* 289 * Update file and block pointers on disk before we start freeing 290 * blocks. If we crash before free'ing blocks below, the blocks 291 * will be returned to the free list. lastiblock values are also 292 * normalized to -1 for calls to ffs_indirtrunc below. 293 */ 294 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); 295 for (level = TRIPLE; level >= SINGLE; level--) 296 if (lastiblock[level] < 0) { 297 oip->i_ib[level] = 0; 298 lastiblock[level] = -1; 299 } 300 for (i = NDADDR - 1; i > lastblock; i--) 301 oip->i_db[i] = 0; 302 oip->i_flag |= IN_CHANGE | IN_UPDATE; 303 allerror = UFS_UPDATE(ovp, 1); 304 305 /* 306 * Having written the new inode to disk, save its new configuration 307 * and put back the old block pointers long enough to process them. 308 * Note that we save the new block configuration so we can check it 309 * when we are done. 310 */ 311 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); 312 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); 313 oip->i_size = osize; 314 315 error = vtruncbuf(ovp, cred, p, length, fs->fs_bsize); 316 if (error && (allerror == 0)) 317 allerror = error; 318 319 /* 320 * Indirect blocks first. 321 */ 322 indir_lbn[SINGLE] = -NDADDR; 323 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 324 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 325 for (level = TRIPLE; level >= SINGLE; level--) { 326 bn = oip->i_ib[level]; 327 if (bn != 0) { 328 error = ffs_indirtrunc(oip, indir_lbn[level], 329 fsbtodb(fs, bn), lastiblock[level], level, &count); 330 if (error) 331 allerror = error; 332 blocksreleased += count; 333 if (lastiblock[level] < 0) { 334 oip->i_ib[level] = 0; 335 ffs_blkfree(oip, bn, fs->fs_bsize); 336 blocksreleased += nblocks; 337 } 338 } 339 if (lastiblock[level] >= 0) 340 goto done; 341 } 342 343 /* 344 * All whole direct blocks or frags. 345 */ 346 for (i = NDADDR - 1; i > lastblock; i--) { 347 register long bsize; 348 349 bn = oip->i_db[i]; 350 if (bn == 0) 351 continue; 352 oip->i_db[i] = 0; 353 bsize = blksize(fs, oip, i); 354 ffs_blkfree(oip, bn, bsize); 355 blocksreleased += btodb(bsize); 356 } 357 if (lastblock < 0) 358 goto done; 359 360 /* 361 * Finally, look for a change in size of the 362 * last direct block; release any frags. 363 */ 364 bn = oip->i_db[lastblock]; 365 if (bn != 0) { 366 long oldspace, newspace; 367 368 /* 369 * Calculate amount of space we're giving 370 * back as old block size minus new block size. 371 */ 372 oldspace = blksize(fs, oip, lastblock); 373 oip->i_size = length; 374 newspace = blksize(fs, oip, lastblock); 375 if (newspace == 0) 376 panic("ffs_truncate: newspace"); 377 if (oldspace - newspace > 0) { 378 /* 379 * Block number of space to be free'd is 380 * the old block # plus the number of frags 381 * required for the storage we're keeping. 382 */ 383 bn += numfrags(fs, newspace); 384 ffs_blkfree(oip, bn, oldspace - newspace); 385 blocksreleased += btodb(oldspace - newspace); 386 } 387 } 388 done: 389 #ifdef DIAGNOSTIC 390 for (level = SINGLE; level <= TRIPLE; level++) 391 if (newblks[NDADDR + level] != oip->i_ib[level]) 392 panic("ffs_truncate1"); 393 for (i = 0; i < NDADDR; i++) 394 if (newblks[i] != oip->i_db[i]) 395 panic("ffs_truncate2"); 396 if (length == 0 && 397 (!TAILQ_EMPTY(&ovp->v_dirtyblkhd) || 398 !TAILQ_EMPTY(&ovp->v_cleanblkhd))) 399 panic("ffs_truncate3"); 400 #endif /* DIAGNOSTIC */ 401 /* 402 * Put back the real size. 403 */ 404 oip->i_size = length; 405 oip->i_blocks -= blocksreleased; 406 407 if (oip->i_blocks < 0) /* sanity */ 408 oip->i_blocks = 0; 409 oip->i_flag |= IN_CHANGE; 410 #ifdef QUOTA 411 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 412 #endif 413 return (allerror); 414 } 415 416 /* 417 * Release blocks associated with the inode ip and stored in the indirect 418 * block bn. Blocks are free'd in LIFO order up to (but not including) 419 * lastbn. If level is greater than SINGLE, the block is an indirect block 420 * and recursive calls to indirtrunc must be used to cleanse other indirect 421 * blocks. 422 * 423 * NB: triple indirect blocks are untested. 424 */ 425 static int 426 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 427 register struct inode *ip; 428 ufs_daddr_t lbn, lastbn; 429 ufs_daddr_t dbn; 430 int level; 431 long *countp; 432 { 433 register int i; 434 struct buf *bp; 435 register struct fs *fs = ip->i_fs; 436 register ufs_daddr_t *bap; 437 struct vnode *vp; 438 ufs_daddr_t *copy = NULL, nb, nlbn, last; 439 long blkcount, factor; 440 int nblocks, blocksreleased = 0; 441 int error = 0, allerror = 0; 442 443 /* 444 * Calculate index in current block of last 445 * block to be kept. -1 indicates the entire 446 * block so we need not calculate the index. 447 */ 448 factor = 1; 449 for (i = SINGLE; i < level; i++) 450 factor *= NINDIR(fs); 451 last = lastbn; 452 if (lastbn > 0) 453 last /= factor; 454 nblocks = btodb(fs->fs_bsize); 455 /* 456 * Get buffer of block pointers, zero those entries corresponding 457 * to blocks to be free'd, and update on disk copy first. Since 458 * double(triple) indirect before single(double) indirect, calls 459 * to bmap on these blocks will fail. However, we already have 460 * the on disk address, so we have to set the b_blkno field 461 * explicitly instead of letting bread do everything for us. 462 */ 463 vp = ITOV(ip); 464 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 465 if ((bp->b_flags & B_CACHE) == 0) { 466 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 467 bp->b_flags |= B_READ; 468 bp->b_flags &= ~(B_ERROR|B_INVAL); 469 if (bp->b_bcount > bp->b_bufsize) 470 panic("ffs_indirtrunc: bad buffer size"); 471 bp->b_blkno = dbn; 472 vfs_busy_pages(bp, 0); 473 VOP_STRATEGY(bp->b_vp, bp); 474 error = biowait(bp); 475 } 476 if (error) { 477 brelse(bp); 478 *countp = 0; 479 return (error); 480 } 481 482 bap = (ufs_daddr_t *)bp->b_data; 483 if (lastbn != -1) { 484 MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK); 485 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize); 486 bzero((caddr_t)&bap[last + 1], 487 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); 488 if (DOINGASYNC(vp)) { 489 bawrite(bp); 490 } else { 491 error = bwrite(bp); 492 if (error) 493 allerror = error; 494 } 495 bap = copy; 496 } 497 498 /* 499 * Recursively free totally unused blocks. 500 */ 501 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 502 i--, nlbn += factor) { 503 nb = bap[i]; 504 if (nb == 0) 505 continue; 506 if (level > SINGLE) { 507 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 508 (ufs_daddr_t)-1, level - 1, &blkcount)) != 0) 509 allerror = error; 510 blocksreleased += blkcount; 511 } 512 ffs_blkfree(ip, nb, fs->fs_bsize); 513 blocksreleased += nblocks; 514 } 515 516 /* 517 * Recursively free last partial block. 518 */ 519 if (level > SINGLE && lastbn >= 0) { 520 last = lastbn % factor; 521 nb = bap[i]; 522 if (nb != 0) { 523 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 524 last, level - 1, &blkcount); 525 if (error) 526 allerror = error; 527 blocksreleased += blkcount; 528 } 529 } 530 if (copy != NULL) { 531 FREE(copy, M_TEMP); 532 } else { 533 bp->b_flags |= B_INVAL | B_NOCACHE; 534 brelse(bp); 535 } 536 537 *countp = blocksreleased; 538 return (allerror); 539 } 540