1 /* 2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ffs_inode.c 7.45 (Berkeley) 12/19/91 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/mount.h> 13 #include <sys/proc.h> 14 #include <sys/file.h> 15 #include <sys/buf.h> 16 #include <sys/vnode.h> 17 #include <sys/kernel.h> 18 #include <sys/malloc.h> 19 20 #include <ufs/ufs/quota.h> 21 #include <ufs/ufs/inode.h> 22 #include <ufs/ufs/ufsmount.h> 23 #include <ufs/ufs/ufs_extern.h> 24 25 #include <ufs/ffs/fs.h> 26 #include <ufs/ffs/ffs_extern.h> 27 28 static int ffs_indirtrunc __P((struct inode *, daddr_t, daddr_t, int, long *)); 29 30 extern u_long nextgennumber; 31 32 int 33 ffs_init() 34 { 35 return (ufs_init()); 36 } 37 38 /* 39 * Look up a UFS dinode number to find its incore vnode. 40 * If it is not in core, read it in from the specified device. 41 * If it is in core, wait for the lock bit to clear, then 42 * return the inode locked. Detection and handling of mount 43 * points must be done by the calling routine. 44 */ 45 ffs_vget(mntp, ino, vpp) 46 struct mount *mntp; 47 ino_t ino; 48 struct vnode **vpp; 49 { 50 register struct fs *fs; 51 register struct inode *ip; 52 struct ufsmount *ump; 53 struct buf *bp; 54 struct dinode *dp; 55 struct vnode *vp; 56 union ihead *ih; 57 dev_t dev; 58 int i, type, error; 59 60 ump = VFSTOUFS(mntp); 61 dev = ump->um_dev; 62 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) 63 return (0); 64 65 /* Allocate a new vnode/inode. */ 66 if (error = getnewvnode(VT_UFS, mntp, &ffs_vnodeops, &vp)) { 67 *vpp = NULL; 68 return (error); 69 } 70 type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */ 71 MALLOC(ip, struct inode *, sizeof(struct inode), type, M_WAITOK); 72 vp->v_data = ip; 73 ip->i_vnode = vp; 74 ip->i_flag = 0; 75 ip->i_devvp = 0; 76 ip->i_mode = 0; 77 ip->i_diroff = 0; 78 ip->i_lockf = 0; 79 ip->i_fs = fs = ump->um_fs; 80 ip->i_dev = dev; 81 ip->i_number = ino; 82 #ifdef QUOTA 83 for (i = 0; i < MAXQUOTAS; i++) 84 ip->i_dquot[i] = NODQUOT; 85 #endif 86 /* 87 * Put it onto its hash chain and lock it so that other requests for 88 * this inode will block if they arrive while we are sleeping waiting 89 * for old data structures to be purged or for the contents of the 90 * disk portion of this inode to be read. 91 */ 92 ufs_ihashins(ip); 93 94 /* Read in the disk contents for the inode, copy into the inode. */ 95 if (error = bread(ump->um_devvp, fsbtodb(fs, itod(fs, ino)), 96 (int)fs->fs_bsize, NOCRED, &bp)) { 97 /* 98 * The inode does not contain anything useful, so it would 99 * be misleading to leave it on its hash chain. It will be 100 * returned to the free list by ufs_iput(). 101 */ 102 remque(ip); 103 ip->i_forw = ip; 104 ip->i_back = ip; 105 106 /* Unlock and discard unneeded inode. */ 107 ufs_iput(ip); 108 brelse(bp); 109 *vpp = NULL; 110 return (error); 111 } 112 dp = bp->b_un.b_dino; 113 dp += itoo(fs, ino); 114 ip->i_din = *dp; 115 brelse(bp); 116 117 /* 118 * Initialize the vnode from the inode, check for aliases. 119 * Note that the underlying vnode may have changed. 120 */ 121 if (error = ufs_vinit(mntp, &ffs_specops, FFS_FIFOOPS, &vp)) { 122 ufs_iput(ip); 123 *vpp = NULL; 124 return (error); 125 } 126 /* 127 * Finish inode initialization now that aliasing has been resolved. 128 */ 129 ip->i_devvp = ump->um_devvp; 130 VREF(ip->i_devvp); 131 /* 132 * Set up a generation number for this inode if it does not 133 * already have one. This should only happen on old filesystems. 134 */ 135 if (ip->i_gen == 0) { 136 if (++nextgennumber < (u_long)time.tv_sec) 137 nextgennumber = time.tv_sec; 138 ip->i_gen = nextgennumber; 139 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 140 ip->i_flag |= IMOD; 141 } 142 *vpp = vp; 143 return (0); 144 } 145 146 /* 147 * Update the access, modified, and inode change times as specified 148 * by the IACC, IUPD, and ICHG flags respectively. The IMOD flag 149 * is used to specify that the inode needs to be updated but that 150 * the times have already been set. The access and modified times 151 * are taken from the second and third parameters; the inode change 152 * time is always taken from the current time. If waitfor is set, 153 * then wait for the disk write of the inode to complete. 154 */ 155 int 156 ffs_update(vp, ta, tm, waitfor) 157 register struct vnode *vp; 158 struct timeval *ta, *tm; 159 int waitfor; 160 { 161 struct buf *bp; 162 struct inode *ip; 163 struct dinode *dp; 164 register struct fs *fs; 165 int error; 166 167 if (vp->v_mount->mnt_flag & MNT_RDONLY) 168 return (0); 169 ip = VTOI(vp); 170 if ((ip->i_flag & (IUPD|IACC|ICHG|IMOD)) == 0) 171 return (0); 172 if (ip->i_flag&IACC) 173 ip->i_atime = ta->tv_sec; 174 if (ip->i_flag&IUPD) { 175 ip->i_mtime = tm->tv_sec; 176 INCRQUAD(ip->i_modrev); 177 } 178 if (ip->i_flag&ICHG) 179 ip->i_ctime = time.tv_sec; 180 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD); 181 182 fs = ip->i_fs; 183 if (error = bread(ip->i_devvp, fsbtodb(fs, itod(fs, ip->i_number)), 184 (int)fs->fs_bsize, NOCRED, &bp)) { 185 brelse(bp); 186 return (error); 187 } 188 dp = bp->b_un.b_dino + itoo(fs, ip->i_number); 189 *dp = ip->i_din; 190 if (waitfor) 191 return (bwrite(bp)); 192 else { 193 bdwrite(bp); 194 return (0); 195 } 196 } 197 198 #define SINGLE 0 /* index of single indirect block */ 199 #define DOUBLE 1 /* index of double indirect block */ 200 #define TRIPLE 2 /* index of triple indirect block */ 201 /* 202 * Truncate the inode ip to at most length size. Free affected disk 203 * blocks -- the blocks of the file are removed in reverse order. 204 * 205 * NB: triple indirect blocks are untested. 206 */ 207 ffs_truncate(ovp, length, flags) 208 register struct vnode *ovp; 209 u_long length; 210 int flags; 211 { 212 register daddr_t lastblock; 213 register struct inode *oip; 214 daddr_t bn, lbn, lastiblock[NIADDR]; 215 register struct fs *fs; 216 register struct inode *ip; 217 struct buf *bp; 218 int offset, osize, size, level; 219 long count, nblocks, blocksreleased = 0; 220 register int i; 221 int aflags, error, allerror; 222 struct inode tip; 223 224 vnode_pager_setsize(ovp, length); 225 oip = VTOI(ovp); 226 if (oip->i_size <= length) { 227 oip->i_flag |= ICHG|IUPD; 228 error = ffs_update(ovp, &time, &time, 1); 229 return (error); 230 } 231 /* 232 * Calculate index into inode's block list of 233 * last direct and indirect blocks (if any) 234 * which we want to keep. Lastblock is -1 when 235 * the file is truncated to 0. 236 */ 237 fs = oip->i_fs; 238 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; 239 lastiblock[SINGLE] = lastblock - NDADDR; 240 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); 241 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); 242 nblocks = btodb(fs->fs_bsize); 243 /* 244 * Update the size of the file. If the file is not being 245 * truncated to a block boundry, the contents of the 246 * partial block following the end of the file must be 247 * zero'ed in case it ever become accessable again because 248 * of subsequent file growth. 249 */ 250 osize = oip->i_size; 251 offset = blkoff(fs, length); 252 if (offset == 0) { 253 oip->i_size = length; 254 } else { 255 lbn = lblkno(fs, length); 256 aflags = B_CLRBUF; 257 if (flags & IO_SYNC) 258 aflags |= B_SYNC; 259 #ifdef QUOTA 260 if (error = getinoquota(oip)) 261 return (error); 262 #endif 263 if (error = ffs_balloc(oip, lbn, offset, &bp, aflags)) 264 return (error); 265 oip->i_size = length; 266 size = blksize(fs, oip, lbn); 267 (void) vnode_pager_uncache(ITOV(oip)); 268 bzero(bp->b_un.b_addr + offset, (unsigned)(size - offset)); 269 allocbuf(bp, size); 270 if (flags & IO_SYNC) 271 bwrite(bp); 272 else 273 bdwrite(bp); 274 } 275 /* 276 * Update file and block pointers on disk before we start freeing 277 * blocks. If we crash before free'ing blocks below, the blocks 278 * will be returned to the free list. lastiblock values are also 279 * normalized to -1 for calls to ffs_indirtrunc below. 280 */ 281 tip = *oip; 282 tip.i_size = osize; 283 for (level = TRIPLE; level >= SINGLE; level--) 284 if (lastiblock[level] < 0) { 285 oip->i_ib[level] = 0; 286 lastiblock[level] = -1; 287 } 288 for (i = NDADDR - 1; i > lastblock; i--) 289 oip->i_db[i] = 0; 290 oip->i_flag |= ICHG|IUPD; 291 vinvalbuf(ITOV(oip), (length > 0)); 292 allerror = ffs_update(ovp, &time, &time, MNT_WAIT); 293 294 /* 295 * Indirect blocks first. 296 */ 297 ip = &tip; 298 for (level = TRIPLE; level >= SINGLE; level--) { 299 bn = ip->i_ib[level]; 300 if (bn != 0) { 301 error = ffs_indirtrunc(ip, 302 bn, lastiblock[level], level, &count); 303 if (error) 304 allerror = error; 305 blocksreleased += count; 306 if (lastiblock[level] < 0) { 307 ip->i_ib[level] = 0; 308 ffs_blkfree(ip, bn, (off_t)fs->fs_bsize); 309 blocksreleased += nblocks; 310 } 311 } 312 if (lastiblock[level] >= 0) 313 goto done; 314 } 315 316 /* 317 * All whole direct blocks or frags. 318 */ 319 for (i = NDADDR - 1; i > lastblock; i--) { 320 register off_t bsize; 321 322 bn = ip->i_db[i]; 323 if (bn == 0) 324 continue; 325 ip->i_db[i] = 0; 326 bsize = (off_t)blksize(fs, ip, i); 327 ffs_blkfree(ip, bn, bsize); 328 blocksreleased += btodb(bsize); 329 } 330 if (lastblock < 0) 331 goto done; 332 333 /* 334 * Finally, look for a change in size of the 335 * last direct block; release any frags. 336 */ 337 bn = ip->i_db[lastblock]; 338 if (bn != 0) { 339 off_t oldspace, newspace; 340 341 /* 342 * Calculate amount of space we're giving 343 * back as old block size minus new block size. 344 */ 345 oldspace = blksize(fs, ip, lastblock); 346 ip->i_size = length; 347 newspace = blksize(fs, ip, lastblock); 348 if (newspace == 0) 349 panic("itrunc: newspace"); 350 if (oldspace - newspace > 0) { 351 /* 352 * Block number of space to be free'd is 353 * the old block # plus the number of frags 354 * required for the storage we're keeping. 355 */ 356 bn += numfrags(fs, newspace); 357 ffs_blkfree(ip, bn, oldspace - newspace); 358 blocksreleased += btodb(oldspace - newspace); 359 } 360 } 361 done: 362 /* BEGIN PARANOIA */ 363 for (level = SINGLE; level <= TRIPLE; level++) 364 if (ip->i_ib[level] != oip->i_ib[level]) 365 panic("itrunc1"); 366 for (i = 0; i < NDADDR; i++) 367 if (ip->i_db[i] != oip->i_db[i]) 368 panic("itrunc2"); 369 /* END PARANOIA */ 370 oip->i_blocks -= blocksreleased; 371 if (oip->i_blocks < 0) /* sanity */ 372 oip->i_blocks = 0; 373 oip->i_flag |= ICHG; 374 #ifdef QUOTA 375 if (!getinoquota(oip)) 376 (void) chkdq(oip, -blocksreleased, NOCRED, 0); 377 #endif 378 return (allerror); 379 } 380 381 /* 382 * Release blocks associated with the inode ip and stored in the indirect 383 * block bn. Blocks are free'd in LIFO order up to (but not including) 384 * lastbn. If level is greater than SINGLE, the block is an indirect block 385 * and recursive calls to indirtrunc must be used to cleanse other indirect 386 * blocks. 387 * 388 * NB: triple indirect blocks are untested. 389 */ 390 static int 391 ffs_indirtrunc(ip, bn, lastbn, level, countp) 392 register struct inode *ip; 393 daddr_t bn, lastbn; 394 int level; 395 long *countp; 396 { 397 register int i; 398 struct buf *bp; 399 register struct fs *fs = ip->i_fs; 400 register daddr_t *bap; 401 daddr_t *copy, nb, last; 402 long blkcount, factor; 403 int nblocks, blocksreleased = 0; 404 int error, allerror = 0; 405 406 /* 407 * Calculate index in current block of last 408 * block to be kept. -1 indicates the entire 409 * block so we need not calculate the index. 410 */ 411 factor = 1; 412 for (i = SINGLE; i < level; i++) 413 factor *= NINDIR(fs); 414 last = lastbn; 415 if (lastbn > 0) 416 last /= factor; 417 nblocks = btodb(fs->fs_bsize); 418 /* 419 * Get buffer of block pointers, zero those 420 * entries corresponding to blocks to be free'd, 421 * and update on disk copy first. 422 */ 423 error = bread(ip->i_devvp, fsbtodb(fs, bn), (int)fs->fs_bsize, 424 NOCRED, &bp); 425 if (error) { 426 brelse(bp); 427 *countp = 0; 428 return (error); 429 } 430 bap = bp->b_un.b_daddr; 431 MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK); 432 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize); 433 bzero((caddr_t)&bap[last + 1], 434 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t)); 435 if (last == -1) 436 bp->b_flags |= B_INVAL; 437 error = bwrite(bp); 438 if (error) 439 allerror = error; 440 bap = copy; 441 442 /* 443 * Recursively free totally unused blocks. 444 */ 445 for (i = NINDIR(fs) - 1; i > last; i--) { 446 nb = bap[i]; 447 if (nb == 0) 448 continue; 449 if (level > SINGLE) { 450 if (error = ffs_indirtrunc(ip, 451 nb, (daddr_t)-1, level - 1, &blkcount)) 452 allerror = error; 453 blocksreleased += blkcount; 454 } 455 ffs_blkfree(ip, nb, (off_t)fs->fs_bsize); 456 blocksreleased += nblocks; 457 } 458 459 /* 460 * Recursively free last partial block. 461 */ 462 if (level > SINGLE && lastbn >= 0) { 463 last = lastbn % factor; 464 nb = bap[i]; 465 if (nb != 0) { 466 if (error = 467 ffs_indirtrunc(ip, nb, last, level - 1, &blkcount)) 468 allerror = error; 469 blocksreleased += blkcount; 470 } 471 } 472 FREE(copy, M_TEMP); 473 *countp = blocksreleased; 474 return (allerror); 475 } 476