1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 30 * $FreeBSD: src/sys/ufs/ffs/ffs_inode.c,v 1.56.2.5 2002/02/05 18:35:03 dillon Exp $ 31 */ 32 33 #include "opt_quota.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/mount.h> 38 #include <sys/proc.h> 39 #include <sys/buf.h> 40 #include <sys/vnode.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/resourcevar.h> 44 #include <sys/vmmeter.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include "quota.h" 50 #include "ufsmount.h" 51 #include "inode.h" 52 #include "ufs_extern.h" 53 54 #include "fs.h" 55 #include "ffs_extern.h" 56 57 #include <vm/vm_page2.h> 58 #include <sys/buf2.h> 59 60 static int ffs_indirtrunc (struct inode *, ufs_daddr_t, ufs_daddr_t, 61 ufs_daddr_t, int, long *); 62 63 /* 64 * Update the access, modified, and inode change times as specified by the 65 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode 66 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by 67 * the timestamp update). The IN_LAZYMOD flag is set to force a write 68 * later if not now. If we write now, then clear both IN_MODIFIED and 69 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is 70 * set, then wait for the write to complete. 71 */ 72 int 73 ffs_update(struct vnode *vp, int waitfor) 74 { 75 struct fs *fs; 76 struct buf *bp; 77 struct inode *ip; 78 int error; 79 80 ufs_itimes(vp); 81 ip = VTOI(vp); 82 if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) 83 return (0); 84 ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED); 85 fs = ip->i_fs; 86 if (fs->fs_ronly) 87 return (0); 88 89 /* 90 * The vnode type is usually set to VBAD if an unrecoverable I/O 91 * error has occured (such as when reading the inode). Clear the 92 * modified bits but do not write anything out in this case. 93 */ 94 if (vp->v_type == VBAD) 95 return (0); 96 /* 97 * Ensure that uid and gid are correct. This is a temporary 98 * fix until fsck has been changed to do the update. 99 */ 100 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 101 ip->i_din.di_ouid = ip->i_uid; /* XXX */ 102 ip->i_din.di_ogid = ip->i_gid; /* XXX */ 103 } /* XXX */ 104 error = bread(ip->i_devvp, 105 fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)), 106 (int)fs->fs_bsize, &bp); 107 if (error) { 108 brelse(bp); 109 return (error); 110 } 111 if (DOINGSOFTDEP(vp)) 112 softdep_update_inodeblock(ip, bp, waitfor); 113 else if (ip->i_effnlink != ip->i_nlink) 114 panic("ffs_update: bad link cnt"); 115 *((struct ufs1_dinode *)bp->b_data + 116 ino_to_fsbo(fs, ip->i_number)) = ip->i_din; 117 if (waitfor && !DOINGASYNC(vp)) { 118 return (bwrite(bp)); 119 } else if (vm_page_count_severe() || buf_dirty_count_severe()) { 120 return (bwrite(bp)); 121 } else { 122 if (bp->b_bufsize == fs->fs_bsize) 123 bp->b_flags |= B_CLUSTEROK; 124 bdwrite(bp); 125 return (0); 126 } 127 } 128 129 #define SINGLE 0 /* index of single indirect block */ 130 #define DOUBLE 1 /* index of double indirect block */ 131 #define TRIPLE 2 /* index of triple indirect block */ 132 /* 133 * Truncate the inode oip to at most length size, freeing the 134 * disk blocks. 135 */ 136 int 137 ffs_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred) 138 { 139 struct vnode *ovp = vp; 140 ufs_daddr_t lastblock; 141 struct inode *oip; 142 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; 143 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 144 struct fs *fs; 145 struct buf *bp; 146 int offset, size, level; 147 long count, nblocks, blocksreleased = 0; 148 int i; 149 int aflags, error, allerror; 150 off_t osize; 151 152 oip = VTOI(ovp); 153 fs = oip->i_fs; 154 if (length < 0) 155 return (EINVAL); 156 if (length > fs->fs_maxfilesize) 157 return (EFBIG); 158 if (ovp->v_type == VLNK && 159 (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { 160 #ifdef DIAGNOSTIC 161 if (length != 0) 162 panic("ffs_truncate: partial truncate of symlink"); 163 #endif /* DIAGNOSTIC */ 164 bzero((char *)&oip->i_shortlink, (uint)oip->i_size); 165 oip->i_size = 0; 166 oip->i_flag |= IN_CHANGE | IN_UPDATE; 167 return (ffs_update(ovp, 1)); 168 } 169 if (oip->i_size == length) { 170 oip->i_flag |= IN_CHANGE | IN_UPDATE; 171 return (ffs_update(ovp, 0)); 172 } 173 if (fs->fs_ronly) 174 panic("ffs_truncate: read-only filesystem"); 175 #ifdef QUOTA 176 error = ufs_getinoquota(oip); 177 if (error) 178 return (error); 179 #endif 180 ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; 181 if (DOINGSOFTDEP(ovp)) { 182 if (length > 0 || softdep_slowdown(ovp)) { 183 /* 184 * If a file is only partially truncated, then 185 * we have to clean up the data structures 186 * describing the allocation past the truncation 187 * point. Finding and deallocating those structures 188 * is a lot of work. Since partial truncation occurs 189 * rarely, we solve the problem by syncing the file 190 * so that it will have no data structures left. 191 */ 192 if ((error = VOP_FSYNC(ovp, MNT_WAIT, 0)) != 0) 193 return (error); 194 } else { 195 #ifdef QUOTA 196 (void) ufs_chkdq(oip, -oip->i_blocks, NOCRED, 0); 197 #endif 198 softdep_setup_freeblocks(oip, length); 199 vinvalbuf(ovp, 0, 0, 0); 200 nvnode_pager_setsize(ovp, 0, fs->fs_bsize, 0); 201 oip->i_flag |= IN_CHANGE | IN_UPDATE; 202 return (ffs_update(ovp, 0)); 203 } 204 } 205 osize = oip->i_size; 206 207 /* 208 * Lengthen the size of the file. We must ensure that the 209 * last byte of the file is allocated. Since the smallest 210 * value of osize is 0, length will be at least 1. 211 * 212 * nvextendbuf() only breads the old buffer. The blocksize 213 * of the new buffer must be specified so it knows how large 214 * to make the VM object. 215 */ 216 if (osize < length) { 217 nvextendbuf(vp, osize, length, 218 blkoffsize(fs, oip, osize), /* oblksize */ 219 blkoffresize(fs, length), /* nblksize */ 220 blkoff(fs, osize), 221 blkoff(fs, length), 222 0); 223 224 aflags = B_CLRBUF; 225 if (flags & IO_SYNC) 226 aflags |= B_SYNC; 227 /* BALLOC will reallocate the fragment at the old EOF */ 228 error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 229 if (error) 230 return (error); 231 oip->i_size = length; 232 if (bp->b_bufsize == fs->fs_bsize) 233 bp->b_flags |= B_CLUSTEROK; 234 if (aflags & B_SYNC) 235 bwrite(bp); 236 else 237 bawrite(bp); 238 oip->i_flag |= IN_CHANGE | IN_UPDATE; 239 return (ffs_update(ovp, 1)); 240 } 241 242 /* 243 * Shorten the size of the file. 244 * 245 * NOTE: The block size specified in nvtruncbuf() is the blocksize 246 * of the buffer containing length prior to any reallocation 247 * of the block. 248 */ 249 allerror = nvtruncbuf(ovp, length, blkoffsize(fs, oip, length), 250 blkoff(fs, length), 0); 251 offset = blkoff(fs, length); 252 if (offset == 0) { 253 oip->i_size = length; 254 } else { 255 lbn = lblkno(fs, length); 256 aflags = B_CLRBUF; 257 if (flags & IO_SYNC) 258 aflags |= B_SYNC; 259 error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); 260 if (error) 261 return (error); 262 263 /* 264 * When we are doing soft updates and the UFS_BALLOC 265 * above fills in a direct block hole with a full sized 266 * block that will be truncated down to a fragment below, 267 * we must flush out the block dependency with an FSYNC 268 * so that we do not get a soft updates inconsistency 269 * when we create the fragment below. 270 * 271 * nvtruncbuf() may have re-dirtied the underlying block 272 * as part of its truncation zeroing code. To avoid a 273 * 'locking against myself' panic in the second fsync we 274 * can simply undirty the bp since the redirtying was 275 * related to areas of the buffer that we are going to 276 * throw away anyway, and we will b*write() the remainder 277 * anyway down below. 278 */ 279 if (DOINGSOFTDEP(ovp) && lbn < NDADDR && 280 fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize) { 281 bundirty(bp); 282 error = VOP_FSYNC(ovp, MNT_WAIT, 0); 283 if (error) { 284 bdwrite(bp); 285 return (error); 286 } 287 } 288 oip->i_size = length; 289 size = blksize(fs, oip, lbn); 290 #if 0 291 /* remove - nvtruncbuf deals with this */ 292 if (ovp->v_type != VDIR) 293 bzero((char *)bp->b_data + offset, 294 (uint)(size - offset)); 295 #endif 296 /* Kirk's code has reallocbuf(bp, size, 1) here */ 297 allocbuf(bp, size); 298 if (bp->b_bufsize == fs->fs_bsize) 299 bp->b_flags |= B_CLUSTEROK; 300 if (aflags & B_SYNC) 301 bwrite(bp); 302 else 303 bawrite(bp); 304 } 305 /* 306 * Calculate index into inode's block list of 307 * last direct and indirect blocks (if any) 308 * which we want to keep. Lastblock is -1 when 309 * the file is truncated to 0. 310 */ 311 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 312 lastiblock[SINGLE] = lastblock - NDADDR; 313 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 314 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 315 nblocks = btodb(fs->fs_bsize); 316 317 /* 318 * Update file and block pointers on disk before we start freeing 319 * blocks. If we crash before free'ing blocks below, the blocks 320 * will be returned to the free list. lastiblock values are also 321 * normalized to -1 for calls to ffs_indirtrunc below. 322 */ 323 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); 324 for (level = TRIPLE; level >= SINGLE; level--) 325 if (lastiblock[level] < 0) { 326 oip->i_ib[level] = 0; 327 lastiblock[level] = -1; 328 } 329 for (i = NDADDR - 1; i > lastblock; i--) 330 oip->i_db[i] = 0; 331 oip->i_flag |= IN_CHANGE | IN_UPDATE; 332 error = ffs_update(ovp, 1); 333 if (error && allerror == 0) 334 allerror = error; 335 336 /* 337 * Having written the new inode to disk, save its new configuration 338 * and put back the old block pointers long enough to process them. 339 * Note that we save the new block configuration so we can check it 340 * when we are done. 341 */ 342 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); 343 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); 344 oip->i_size = osize; 345 346 if (error && allerror == 0) 347 allerror = error; 348 349 /* 350 * Indirect blocks first. 351 */ 352 indir_lbn[SINGLE] = -NDADDR; 353 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 354 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 355 for (level = TRIPLE; level >= SINGLE; level--) { 356 bn = oip->i_ib[level]; 357 if (bn != 0) { 358 error = ffs_indirtrunc(oip, indir_lbn[level], 359 fsbtodb(fs, bn), lastiblock[level], level, &count); 360 if (error) 361 allerror = error; 362 blocksreleased += count; 363 if (lastiblock[level] < 0) { 364 oip->i_ib[level] = 0; 365 ffs_blkfree(oip, bn, fs->fs_bsize); 366 blocksreleased += nblocks; 367 } 368 } 369 if (lastiblock[level] >= 0) 370 goto done; 371 } 372 373 /* 374 * All whole direct blocks or frags. 375 */ 376 for (i = NDADDR - 1; i > lastblock; i--) { 377 long bsize; 378 379 bn = oip->i_db[i]; 380 if (bn == 0) 381 continue; 382 oip->i_db[i] = 0; 383 bsize = blksize(fs, oip, i); 384 ffs_blkfree(oip, bn, bsize); 385 blocksreleased += btodb(bsize); 386 } 387 if (lastblock < 0) 388 goto done; 389 390 /* 391 * Finally, look for a change in size of the 392 * last direct block; release any frags. 393 */ 394 bn = oip->i_db[lastblock]; 395 if (bn != 0) { 396 long oldspace, newspace; 397 398 /* 399 * Calculate amount of space we're giving 400 * back as old block size minus new block size. 401 */ 402 oldspace = blksize(fs, oip, lastblock); 403 oip->i_size = length; 404 newspace = blksize(fs, oip, lastblock); 405 if (newspace == 0) 406 panic("ffs_truncate: newspace"); 407 if (oldspace - newspace > 0) { 408 /* 409 * Block number of space to be free'd is 410 * the old block # plus the number of frags 411 * required for the storage we're keeping. 412 */ 413 bn += numfrags(fs, newspace); 414 ffs_blkfree(oip, bn, oldspace - newspace); 415 blocksreleased += btodb(oldspace - newspace); 416 } 417 } 418 done: 419 #ifdef DIAGNOSTIC 420 for (level = SINGLE; level <= TRIPLE; level++) 421 if (newblks[NDADDR + level] != oip->i_ib[level]) 422 panic("ffs_truncate1"); 423 for (i = 0; i < NDADDR; i++) 424 if (newblks[i] != oip->i_db[i]) 425 panic("ffs_truncate2"); 426 if (length == 0 && !RB_EMPTY(&ovp->v_rbdirty_tree)) 427 panic("ffs_truncate3"); 428 #endif /* DIAGNOSTIC */ 429 /* 430 * Put back the real size. 431 */ 432 oip->i_size = length; 433 oip->i_blocks -= blocksreleased; 434 435 if (oip->i_blocks < 0) /* sanity */ 436 oip->i_blocks = 0; 437 oip->i_flag |= IN_CHANGE; 438 #ifdef QUOTA 439 (void) ufs_chkdq(oip, -blocksreleased, NOCRED, 0); 440 #endif 441 return (allerror); 442 } 443 444 /* 445 * Release blocks associated with the inode ip and stored in the indirect 446 * block bn. Blocks are free'd in LIFO order up to (but not including) 447 * lastbn. If level is greater than SINGLE, the block is an indirect block 448 * and recursive calls to indirtrunc must be used to cleanse other indirect 449 * blocks. 450 * 451 * NB: triple indirect blocks are untested. 452 */ 453 static int 454 ffs_indirtrunc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t dbn, 455 ufs_daddr_t lastbn, int level, long *countp) 456 { 457 int i; 458 struct buf *bp; 459 struct fs *fs = ip->i_fs; 460 ufs_daddr_t *bap; 461 struct vnode *vp; 462 ufs_daddr_t *copy = NULL, nb, nlbn, last; 463 long blkcount, factor; 464 int nblocks, blocksreleased = 0; 465 int error = 0, allerror = 0; 466 467 /* 468 * Calculate index in current block of last 469 * block to be kept. -1 indicates the entire 470 * block so we need not calculate the index. 471 */ 472 factor = 1; 473 for (i = SINGLE; i < level; i++) 474 factor *= NINDIR(fs); 475 last = lastbn; 476 if (lastbn > 0) 477 last /= factor; 478 nblocks = btodb(fs->fs_bsize); 479 /* 480 * Get buffer of block pointers, zero those entries corresponding 481 * to blocks to be free'd, and update on disk copy first. Since 482 * double(triple) indirect before single(double) indirect, calls 483 * to bmap on these blocks will fail. However, we already have 484 * the on disk address, so we have to set the bio_offset field 485 * explicitly instead of letting bread do everything for us. 486 */ 487 vp = ITOV(ip); 488 bp = getblk(vp, lblktodoff(fs, lbn), (int)fs->fs_bsize, 0, 0); 489 if ((bp->b_flags & B_CACHE) == 0) { 490 bp->b_flags &= ~(B_ERROR|B_INVAL); 491 bp->b_cmd = BUF_CMD_READ; 492 if (bp->b_bcount > bp->b_bufsize) 493 panic("ffs_indirtrunc: bad buffer size"); 494 /* 495 * BIO is bio2 which chains back to bio1. We wait 496 * on bio1. 497 */ 498 bp->b_bio2.bio_offset = dbtodoff(fs, dbn); 499 bp->b_bio1.bio_done = biodone_sync; 500 bp->b_bio1.bio_flags |= BIO_SYNC; 501 vfs_busy_pages(vp, bp); 502 /* 503 * Access the block device layer using the device vnode 504 * and the translated block number (bio2) instead of the 505 * file vnode (vp) and logical block number (bio1). 506 * 507 * Even though we are bypassing the vnode layer, we still 508 * want the vnode state to indicate that an I/O on its behalf 509 * is in progress. 510 */ 511 bio_start_transaction(&bp->b_bio1, &vp->v_track_read); 512 vn_strategy(ip->i_devvp, &bp->b_bio2); 513 error = biowait(&bp->b_bio1, "biord"); 514 } 515 if (error) { 516 brelse(bp); 517 *countp = 0; 518 return (error); 519 } 520 521 bap = (ufs_daddr_t *)bp->b_data; 522 if (lastbn != -1) { 523 copy = kmalloc(fs->fs_bsize, M_TEMP, M_WAITOK); 524 bcopy((caddr_t)bap, (caddr_t)copy, (uint)fs->fs_bsize); 525 bzero((caddr_t)&bap[last + 1], 526 (uint)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); 527 if (DOINGASYNC(vp)) { 528 bawrite(bp); 529 } else { 530 error = bwrite(bp); 531 if (error) 532 allerror = error; 533 } 534 bap = copy; 535 } 536 537 /* 538 * Recursively free totally unused blocks. 539 */ 540 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 541 i--, nlbn += factor) { 542 nb = bap[i]; 543 if (nb == 0) 544 continue; 545 if (level > SINGLE) { 546 if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 547 (ufs_daddr_t)-1, level - 1, &blkcount)) != 0) 548 allerror = error; 549 blocksreleased += blkcount; 550 } 551 ffs_blkfree(ip, nb, fs->fs_bsize); 552 blocksreleased += nblocks; 553 } 554 555 /* 556 * Recursively free last partial block. 557 */ 558 if (level > SINGLE && lastbn >= 0) { 559 last = lastbn % factor; 560 nb = bap[i]; 561 if (nb != 0) { 562 error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 563 last, level - 1, &blkcount); 564 if (error) 565 allerror = error; 566 blocksreleased += blkcount; 567 } 568 } 569 if (copy != NULL) { 570 kfree(copy, M_TEMP); 571 } else { 572 bp->b_flags |= B_INVAL | B_NOCACHE; 573 brelse(bp); 574 } 575 576 *countp = blocksreleased; 577 return (allerror); 578 } 579