1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_inode.c 8.12 (Berkeley) 03/30/95 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/mount.h> 13 #include <sys/proc.h> 14 #include <sys/file.h> 15 #include <sys/buf.h> 16 #include <sys/vnode.h> 17 #include <sys/kernel.h> 18 #include <sys/malloc.h> 19 #include <sys/trace.h> 20 #include <sys/resourcevar.h> 21 22 #include <vm/vm.h> 23 24 #include <ufs/ufs/quota.h> 25 #include <ufs/ufs/inode.h> 26 #include <ufs/ufs/ufsmount.h> 27 #include <ufs/ufs/ufs_extern.h> 28 29 #include <ufs/ffs/fs.h> 30 #include <ufs/ffs/ffs_extern.h> 31 32 static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t, 33 ufs_daddr_t, int, long *)); 34 35 /* 36 * Update the access, modified, and inode change times as specified by the 37 * IACCESS, IUPDATE, and ICHANGE flags respectively. The IMODIFIED flag is 38 * used to specify that the inode needs to be updated but that the times have 39 * already been set. The access and modified times are taken from the second 40 * and third parameters; the inode change time is always taken from the current 41 * time. If waitfor is set, then wait for the disk write of the inode to 42 * complete. 43 */ 44 int 45 ffs_update(ap) 46 struct vop_update_args /* { 47 struct vnode *a_vp; 48 struct timeval *a_access; 49 struct timeval *a_modify; 50 int a_waitfor; 51 } */ *ap; 52 { 53 register struct fs *fs; 54 struct buf *bp; 55 struct inode *ip; 56 int error; 57 58 ip = VTOI(ap->a_vp); 59 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) { 60 ip->i_flag &= 61 ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); 62 return (0); 63 } 64 if ((ip->i_flag & 65 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) 66 return (0); 67 if (ip->i_flag & IN_ACCESS) 68 ip->i_atime = ap->a_access->tv_sec; 69 if (ip->i_flag & IN_UPDATE) { 70 ip->i_mtime = ap->a_modify->tv_sec; 71 ip->i_modrev++; 72 } 73 if (ip->i_flag & IN_CHANGE) 74 ip->i_ctime = time.tv_sec; 75 ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); 76 fs = ip->i_fs; 77 /* 78 * Ensure that uid and gid are correct. This is a temporary 79 * fix until fsck has been changed to do the update. 80 */ 81 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 82 ip->i_din.di_ouid = ip->i_uid; /* XXX */ 83 ip->i_din.di_ogid = ip->i_gid; /* XXX */ 84 } /* XXX */ 85 if (error = bread(ip->i_devvp, 86 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 87 (int)fs->fs_bsize, NOCRED, &bp)) { 88 brelse(bp); 89 return (error); 90 } 91 *((struct dinode *)bp->b_data + 92 ino_to_fsbo(fs, ip->i_number)) = ip->i_din; 93 if (ap->a_waitfor && (ap->a_vp->v_mount->mnt_flag & MNT_ASYNC) == 0) 94 return (bwrite(bp)); 95 else { 96 bdwrite(bp); 97 return (0); 98 } 99 } 100 101 #define SINGLE 0 /* index of single indirect block */ 102 #define DOUBLE 1 /* index of double indirect block */ 103 #define TRIPLE 2 /* index of triple indirect block */ 104 /* 105 * Truncate the inode oip to at most length size, freeing the 106 * disk blocks. 107 */ 108 ffs_truncate(ap) 109 struct vop_truncate_args /* { 110 struct vnode *a_vp; 111 off_t a_length; 112 int a_flags; 113 struct ucred *a_cred; 114 struct proc *a_p; 115 } */ *ap; 116 { 117 register struct vnode *ovp = ap->a_vp; 118 ufs_daddr_t lastblock; 119 register struct inode *oip; 120 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; 121 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; 122 off_t length = ap->a_length; 123 register struct fs *fs; 124 struct buf *bp; 125 int offset, size, level; 126 long count, nblocks, vflags, blocksreleased = 0; 127 struct timeval tv; 128 register int i; 129 int aflags, error, allerror; 130 off_t osize; 131 132 if (length < 0) 133 return (EINVAL); 134 oip = VTOI(ovp); 135 tv = time; 136 if (ovp->v_type == VLNK && 137 oip->i_size < ovp->v_mount->mnt_maxsymlinklen) { 138 #ifdef DIAGNOSTIC 139 if (length != 0) 140 panic("ffs_truncate: partial truncate of symlink"); 141 #endif 142 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); 143 oip->i_size = 0; 144 oip->i_flag |= IN_CHANGE | IN_UPDATE; 145 return (VOP_UPDATE(ovp, &tv, &tv, 1)); 146 } 147 if (oip->i_size == length) { 148 oip->i_flag |= IN_CHANGE | IN_UPDATE; 149 return (VOP_UPDATE(ovp, &tv, &tv, 0)); 150 } 151 #ifdef QUOTA 152 if (error = getinoquota(oip)) 153 return (error); 154 #endif 155 vnode_pager_setsize(ovp, (u_long)length); 156 fs = oip->i_fs; 157 osize = oip->i_size; 158 /* 159 * Lengthen the size of the file. We must ensure that the 160 * last byte of the file is allocated. Since the smallest 161 * value of osize is 0, length will be at least 1. 162 */ 163 if (osize < length) { 164 if (length > fs->fs_maxfilesize) 165 return (EFBIG); 166 offset = blkoff(fs, length - 1); 167 lbn = lblkno(fs, length - 1); 168 aflags = B_CLRBUF; 169 if (ap->a_flags & IO_SYNC) 170 aflags |= B_SYNC; 171 if (error = ffs_balloc(oip, lbn, offset + 1, ap->a_cred, &bp, 172 aflags)) 173 return (error); 174 oip->i_size = length; 175 (void) vnode_pager_uncache(ovp); 176 if (aflags & B_SYNC) 177 bwrite(bp); 178 else 179 bawrite(bp); 180 oip->i_flag |= IN_CHANGE | IN_UPDATE; 181 return (VOP_UPDATE(ovp, &tv, &tv, 1)); 182 } 183 /* 184 * Shorten the size of the file. If the file is not being 185 * truncated to a block boundry, the contents of the 186 * partial block following the end of the file must be 187 * zero'ed in case it ever become accessable again because 188 * of subsequent file growth. 189 */ 190 offset = blkoff(fs, length); 191 if (offset == 0) { 192 oip->i_size = length; 193 } else { 194 lbn = lblkno(fs, length); 195 aflags = B_CLRBUF; 196 if (ap->a_flags & IO_SYNC) 197 aflags |= B_SYNC; 198 if (error = ffs_balloc(oip, lbn, offset, ap->a_cred, &bp, 199 aflags)) 200 return (error); 201 oip->i_size = length; 202 size = blksize(fs, oip, lbn); 203 (void) vnode_pager_uncache(ovp); 204 bzero((char *)bp->b_data + offset, (u_int)(size - offset)); 205 allocbuf(bp, size); 206 if (aflags & B_SYNC) 207 bwrite(bp); 208 else 209 bawrite(bp); 210 } 211 /* 212 * Calculate index into inode's block list of 213 * last direct and indirect blocks (if any) 214 * which we want to keep. Lastblock is -1 when 215 * the file is truncated to 0. 216 */ 217 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 218 lastiblock[SINGLE] = lastblock - NDADDR; 219 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 220 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 221 nblocks = btodb(fs->fs_bsize); 222 /* 223 * Update file and block pointers on disk before we start freeing 224 * blocks. If we crash before free'ing blocks below, the blocks 225 * will be returned to the free list. lastiblock values are also 226 * normalized to -1 for calls to ffs_indirtrunc below. 227 */ 228 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); 229 for (level = TRIPLE; level >= SINGLE; level--) 230 if (lastiblock[level] < 0) { 231 oip->i_ib[level] = 0; 232 lastiblock[level] = -1; 233 } 234 for (i = NDADDR - 1; i > lastblock; i--) 235 oip->i_db[i] = 0; 236 oip->i_flag |= IN_CHANGE | IN_UPDATE; 237 if (error = VOP_UPDATE(ovp, &tv, &tv, MNT_WAIT)) 238 allerror = error; 239 /* 240 * Having written the new inode to disk, save its new configuration 241 * and put back the old block pointers long enough to process them. 242 * Note that we save the new block configuration so we can check it 243 * when we are done. 244 */ 245 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); 246 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); 247 oip->i_size = osize; 248 vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; 249 allerror = vinvalbuf(ovp, vflags, ap->a_cred, ap->a_p, 0, 0); 250 251 /* 252 * Indirect blocks first. 253 */ 254 indir_lbn[SINGLE] = -NDADDR; 255 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; 256 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; 257 for (level = TRIPLE; level >= SINGLE; level--) { 258 bn = oip->i_ib[level]; 259 if (bn != 0) { 260 error = ffs_indirtrunc(oip, indir_lbn[level], 261 fsbtodb(fs, bn), lastiblock[level], level, &count); 262 if (error) 263 allerror = error; 264 blocksreleased += count; 265 if (lastiblock[level] < 0) { 266 oip->i_ib[level] = 0; 267 ffs_blkfree(oip, bn, fs->fs_bsize); 268 blocksreleased += nblocks; 269 } 270 } 271 if (lastiblock[level] >= 0) 272 goto done; 273 } 274 275 /* 276 * All whole direct blocks or frags. 277 */ 278 for (i = NDADDR - 1; i > lastblock; i--) { 279 register long bsize; 280 281 bn = oip->i_db[i]; 282 if (bn == 0) 283 continue; 284 oip->i_db[i] = 0; 285 bsize = blksize(fs, oip, i); 286 ffs_blkfree(oip, bn, bsize); 287 blocksreleased += btodb(bsize); 288 } 289 if (lastblock < 0) 290 goto done; 291 292 /* 293 * Finally, look for a change in size of the 294 * last direct block; release any frags. 295 */ 296 bn = oip->i_db[lastblock]; 297 if (bn != 0) { 298 long oldspace, newspace; 299 300 /* 301 * Calculate amount of space we're giving 302 * back as old block size minus new block size. 303 */ 304 oldspace = blksize(fs, oip, lastblock); 305 oip->i_size = length; 306 newspace = blksize(fs, oip, lastblock); 307 if (newspace == 0) 308 panic("itrunc: newspace"); 309 if (oldspace - newspace > 0) { 310 /* 311 * Block number of space to be free'd is 312 * the old block # plus the number of frags 313 * required for the storage we're keeping. 314 */ 315 bn += numfrags(fs, newspace); 316 ffs_blkfree(oip, bn, oldspace - newspace); 317 blocksreleased += btodb(oldspace - newspace); 318 } 319 } 320 done: 321 #ifdef DIAGNOSTIC 322 for (level = SINGLE; level <= TRIPLE; level++) 323 if (newblks[NDADDR + level] != oip->i_ib[level]) 324 panic("itrunc1"); 325 for (i = 0; i < NDADDR; i++) 326 if (newblks[i] != oip->i_db[i]) 327 panic("itrunc2"); 328 if (length == 0 && 329 (ovp->v_dirtyblkhd.lh_first || ovp->v_cleanblkhd.lh_first)) 330 panic("itrunc3"); 331 #endif /* DIAGNOSTIC */ 332 /* 333 * Put back the real size. 334 */ 335 oip->i_size = length; 336 oip->i_blocks -= blocksreleased; 337 if (oip->i_blocks < 0) /* sanity */ 338 oip->i_blocks = 0; 339 oip->i_flag |= IN_CHANGE; 340 #ifdef QUOTA 341 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 342 #endif 343 return (allerror); 344 } 345 346 /* 347 * Release blocks associated with the inode ip and stored in the indirect 348 * block bn. Blocks are free'd in LIFO order up to (but not including) 349 * lastbn. If level is greater than SINGLE, the block is an indirect block 350 * and recursive calls to indirtrunc must be used to cleanse other indirect 351 * blocks. 352 * 353 * NB: triple indirect blocks are untested. 354 */ 355 static int 356 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) 357 register struct inode *ip; 358 ufs_daddr_t lbn, lastbn; 359 ufs_daddr_t dbn; 360 int level; 361 long *countp; 362 { 363 register int i; 364 struct buf *bp; 365 register struct fs *fs = ip->i_fs; 366 register ufs_daddr_t *bap; 367 struct vnode *vp; 368 ufs_daddr_t *copy, nb, nlbn, last; 369 long blkcount, factor; 370 int nblocks, blocksreleased = 0; 371 int error = 0, allerror = 0; 372 373 /* 374 * Calculate index in current block of last 375 * block to be kept. -1 indicates the entire 376 * block so we need not calculate the index. 377 */ 378 factor = 1; 379 for (i = SINGLE; i < level; i++) 380 factor *= NINDIR(fs); 381 last = lastbn; 382 if (lastbn > 0) 383 last /= factor; 384 nblocks = btodb(fs->fs_bsize); 385 /* 386 * Get buffer of block pointers, zero those entries corresponding 387 * to blocks to be free'd, and update on disk copy first. Since 388 * double(triple) indirect before single(double) indirect, calls 389 * to bmap on these blocks will fail. However, we already have 390 * the on disk address, so we have to set the b_blkno field 391 * explicitly instead of letting bread do everything for us. 392 */ 393 vp = ITOV(ip); 394 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0); 395 if (bp->b_flags & (B_DONE | B_DELWRI)) { 396 /* Braces must be here in case trace evaluates to nothing. */ 397 trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn); 398 } else { 399 trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn); 400 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */ 401 bp->b_flags |= B_READ; 402 if (bp->b_bcount > bp->b_bufsize) 403 panic("ffs_indirtrunc: bad buffer size"); 404 bp->b_blkno = dbn; 405 VOP_STRATEGY(bp); 406 error = biowait(bp); 407 } 408 if (error) { 409 brelse(bp); 410 *countp = 0; 411 return (error); 412 } 413 414 bap = (ufs_daddr_t *)bp->b_data; 415 MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK); 416 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize); 417 bzero((caddr_t)&bap[last + 1], 418 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); 419 if (last == -1) 420 bp->b_flags |= B_INVAL; 421 error = bwrite(bp); 422 if (error) 423 allerror = error; 424 bap = copy; 425 426 /* 427 * Recursively free totally unused blocks. 428 */ 429 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; 430 i--, nlbn += factor) { 431 nb = bap[i]; 432 if (nb == 0) 433 continue; 434 if (level > SINGLE) { 435 if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 436 (ufs_daddr_t)-1, level - 1, &blkcount)) 437 allerror = error; 438 blocksreleased += blkcount; 439 } 440 ffs_blkfree(ip, nb, fs->fs_bsize); 441 blocksreleased += nblocks; 442 } 443 444 /* 445 * Recursively free last partial block. 446 */ 447 if (level > SINGLE && lastbn >= 0) { 448 last = lastbn % factor; 449 nb = bap[i]; 450 if (nb != 0) { 451 if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), 452 last, level - 1, &blkcount)) 453 allerror = error; 454 blocksreleased += blkcount; 455 } 456 } 457 FREE(copy, M_TEMP); 458 *countp = blocksreleased; 459 return (allerror); 460 } 461