xref: /original-bsd/sys/ufs/ffs/ffs_inode.c (revision 812242a7)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)ffs_inode.c	7.70 (Berkeley) 04/17/93
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/mount.h>
13 #include <sys/proc.h>
14 #include <sys/file.h>
15 #include <sys/buf.h>
16 #include <sys/vnode.h>
17 #include <sys/kernel.h>
18 #include <sys/malloc.h>
19 #include <sys/trace.h>
20 #include <sys/resourcevar.h>
21 
22 #include <vm/vm.h>
23 
24 #include <ufs/ufs/quota.h>
25 #include <ufs/ufs/inode.h>
26 #include <ufs/ufs/ufsmount.h>
27 #include <ufs/ufs/ufs_extern.h>
28 
29 #include <ufs/ffs/fs.h>
30 #include <ufs/ffs/ffs_extern.h>
31 
32 static int ffs_indirtrunc __P((struct inode *, daddr_t, daddr_t, daddr_t, int,
33 	    long *));
34 
35 int
36 ffs_init()
37 {
38 	return (ufs_init());
39 }
40 
41 /*
42  * Update the access, modified, and inode change times as specified
43  * by the IACC, IUPD, and ICHG flags respectively. The IMOD flag
44  * is used to specify that the inode needs to be updated but that
45  * the times have already been set. The access and modified times
46  * are taken from the second and third parameters; the inode change
47  * time is always taken from the current time. If waitfor is set,
48  * then wait for the disk write of the inode to complete.
49  */
50 int
51 ffs_update(ap)
52 	struct vop_update_args /* {
53 		struct vnode *a_vp;
54 		struct timeval *a_ta;
55 		struct timeval *a_tm;
56 		int a_waitfor;
57 	} */ *ap;
58 {
59 	struct buf *bp;
60 	struct inode *ip;
61 	struct dinode *dp;
62 	register struct fs *fs;
63 	int error;
64 
65 	ip = VTOI(ap->a_vp);
66 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) {
67 		ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
68 		return (0);
69 	}
70 	if ((ip->i_flag & (IUPD|IACC|ICHG|IMOD)) == 0)
71 		return (0);
72 	if (ip->i_flag&IACC)
73 		ip->i_atime.ts_sec = ap->a_ta->tv_sec;
74 	if (ip->i_flag&IUPD) {
75 		ip->i_mtime.ts_sec = ap->a_tm->tv_sec;
76 		ip->i_modrev++;
77 	}
78 	if (ip->i_flag&ICHG)
79 		ip->i_ctime.ts_sec = time.tv_sec;
80 	ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
81 	fs = ip->i_fs;
82 	/*
83 	 * Ensure that uid and gid are correct. This is a temporary
84 	 * fix until fsck has been changed to do the update.
85 	 */
86 	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
87 		ip->i_din.di_ouid = ip->i_uid;		/* XXX */
88 		ip->i_din.di_ogid = ip->i_gid;		/* XXX */
89 	}						/* XXX */
90 	if (error = bread(ip->i_devvp, fsbtodb(fs, itod(fs, ip->i_number)),
91 		(int)fs->fs_bsize, NOCRED, &bp)) {
92 		brelse(bp);
93 		return (error);
94 	}
95 	dp = bp->b_un.b_dino + itoo(fs, ip->i_number);
96 	*dp = ip->i_din;
97 	if (ap->a_waitfor)
98 		return (bwrite(bp));
99 	else {
100 		bdwrite(bp);
101 		return (0);
102 	}
103 }
104 
105 #define	SINGLE	0	/* index of single indirect block */
106 #define	DOUBLE	1	/* index of double indirect block */
107 #define	TRIPLE	2	/* index of triple indirect block */
108 /*
109  * Truncate the inode oip to at most length size.  Free affected disk
110  * blocks -- the blocks of the file are removed in reverse order.
111  */
112 ffs_truncate(ap)
113 	struct vop_truncate_args /* {
114 		struct vnode *a_vp;
115 		off_t a_length;
116 		int a_flags;
117 		struct ucred *a_cred;
118 		struct proc *a_p;
119 	} */ *ap;
120 {
121 	register struct vnode *ovp = ap->a_vp;
122 	register daddr_t lastblock;
123 	register struct inode *oip;
124 	daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
125 	daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
126 	off_t length = ap->a_length;
127 	register struct fs *fs;
128 	struct buf *bp;
129 	int offset, size, level;
130 	long count, nblocks, vflags, blocksreleased = 0;
131 	struct timeval tv;
132 	register int i;
133 	int aflags, error, allerror;
134 	off_t osize;
135 
136 	oip = VTOI(ovp);
137 	tv = time;
138 	if (ovp->v_type == VLNK &&
139 	    oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
140 #ifdef DIAGNOSTIC
141 		if (length != 0)
142 			panic("ffs_truncate: partial truncate of symlink");
143 #endif
144 		bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
145 		oip->i_size = 0;
146 		oip->i_flag |= ICHG|IUPD;
147 		return (VOP_UPDATE(ovp, &tv, &tv, 1));
148 	}
149 	if (oip->i_size == length) {
150 		oip->i_flag |= ICHG|IUPD;
151 		return (VOP_UPDATE(ovp, &tv, &tv, 0));
152 	}
153 	vnode_pager_setsize(ovp, (u_long)length);
154 	/*
155 	 * Update the size of the file. If the file is not being
156 	 * truncated to a block boundry, the contents of the
157 	 * partial block following the end of the file must be
158 	 * zero'ed in case it ever become accessable again because
159 	 * of subsequent file growth.
160 	 */
161 	fs = oip->i_fs;
162 	osize = oip->i_size;
163 	offset = blkoff(fs, length);
164 	if (offset == 0 && osize > length) {
165 		oip->i_size = length;
166 	} else {
167 		lbn = lblkno(fs, length);
168 		aflags = B_CLRBUF;
169 		if (ap->a_flags & IO_SYNC)
170 			aflags |= B_SYNC;
171 #ifdef QUOTA
172 		if (error = getinoquota(oip))
173 			return (error);
174 #endif
175 		if (error = ffs_balloc(oip, lbn, offset, ap->a_cred, &bp, aflags))
176 			return (error);
177 		oip->i_size = length;
178 		size = blksize(fs, oip, lbn);
179 		(void) vnode_pager_uncache(ovp);
180 		if (osize > length) {
181 			bzero(bp->b_un.b_addr + offset, (u_int)(size - offset));
182 			allocbuf(bp, size);
183 		}
184 		if (ap->a_flags & IO_SYNC)
185 			bwrite(bp);
186 		else
187 			bawrite(bp);
188 		if (osize < length) {
189 			oip->i_flag |= ICHG|IUPD;
190 			return (VOP_UPDATE(ovp, &tv, &tv, 1));
191 		}
192 	}
193 	/*
194 	 * Calculate index into inode's block list of
195 	 * last direct and indirect blocks (if any)
196 	 * which we want to keep.  Lastblock is -1 when
197 	 * the file is truncated to 0.
198 	 */
199 	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
200 	lastiblock[SINGLE] = lastblock - NDADDR;
201 	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
202 	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
203 	nblocks = btodb(fs->fs_bsize);
204 	/*
205 	 * Update file and block pointers on disk before we start freeing
206 	 * blocks.  If we crash before free'ing blocks below, the blocks
207 	 * will be returned to the free list.  lastiblock values are also
208 	 * normalized to -1 for calls to ffs_indirtrunc below.
209 	 */
210 	bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
211 	for (level = TRIPLE; level >= SINGLE; level--)
212 		if (lastiblock[level] < 0) {
213 			oip->i_ib[level] = 0;
214 			lastiblock[level] = -1;
215 		}
216 	for (i = NDADDR - 1; i > lastblock; i--)
217 		oip->i_db[i] = 0;
218 	oip->i_flag |= ICHG|IUPD;
219 	if (error = VOP_UPDATE(ovp, &tv, &tv, MNT_WAIT))
220 		allerror = error;
221 	/*
222 	 * Having written the new inode to disk, save its new configuration
223 	 * and put back the old block pointers long enough to process them.
224 	 * Note that we save the new block configuration so we can check it
225 	 * when we are done.
226 	 */
227 	bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
228 	bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
229 	oip->i_size = osize;
230 	vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
231 	allerror = vinvalbuf(ovp, vflags, ap->a_cred, ap->a_p, 0, 0);
232 
233 	/*
234 	 * Indirect blocks first.
235 	 */
236 	indir_lbn[SINGLE] = -NDADDR;
237 	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
238 	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
239 	for (level = TRIPLE; level >= SINGLE; level--) {
240 		bn = oip->i_ib[level];
241 		if (bn != 0) {
242 			error = ffs_indirtrunc(oip, indir_lbn[level],
243 			    fsbtodb(fs, bn), lastiblock[level], level, &count);
244 			if (error)
245 				allerror = error;
246 			blocksreleased += count;
247 			if (lastiblock[level] < 0) {
248 				oip->i_ib[level] = 0;
249 				ffs_blkfree(oip, bn, fs->fs_bsize);
250 				blocksreleased += nblocks;
251 			}
252 		}
253 		if (lastiblock[level] >= 0)
254 			goto done;
255 	}
256 
257 	/*
258 	 * All whole direct blocks or frags.
259 	 */
260 	for (i = NDADDR - 1; i > lastblock; i--) {
261 		register long bsize;
262 
263 		bn = oip->i_db[i];
264 		if (bn == 0)
265 			continue;
266 		oip->i_db[i] = 0;
267 		bsize = blksize(fs, oip, i);
268 		ffs_blkfree(oip, bn, bsize);
269 		blocksreleased += btodb(bsize);
270 	}
271 	if (lastblock < 0)
272 		goto done;
273 
274 	/*
275 	 * Finally, look for a change in size of the
276 	 * last direct block; release any frags.
277 	 */
278 	bn = oip->i_db[lastblock];
279 	if (bn != 0) {
280 		long oldspace, newspace;
281 
282 		/*
283 		 * Calculate amount of space we're giving
284 		 * back as old block size minus new block size.
285 		 */
286 		oldspace = blksize(fs, oip, lastblock);
287 		oip->i_size = length;
288 		newspace = blksize(fs, oip, lastblock);
289 		if (newspace == 0)
290 			panic("itrunc: newspace");
291 		if (oldspace - newspace > 0) {
292 			/*
293 			 * Block number of space to be free'd is
294 			 * the old block # plus the number of frags
295 			 * required for the storage we're keeping.
296 			 */
297 			bn += numfrags(fs, newspace);
298 			ffs_blkfree(oip, bn, oldspace - newspace);
299 			blocksreleased += btodb(oldspace - newspace);
300 		}
301 	}
302 done:
303 #ifdef DIAGNOSTIC
304 	for (level = SINGLE; level <= TRIPLE; level++)
305 		if (newblks[NDADDR + level] != oip->i_ib[level])
306 			panic("itrunc1");
307 	for (i = 0; i < NDADDR; i++)
308 		if (newblks[i] != oip->i_db[i])
309 			panic("itrunc2");
310 	if (length == 0 &&
311 	    (ovp->v_dirtyblkhd.le_next || ovp->v_cleanblkhd.le_next))
312 		panic("itrunc3");
313 #endif /* DIAGNOSTIC */
314 	/*
315 	 * Put back the real size.
316 	 */
317 	oip->i_size = length;
318 	oip->i_blocks -= blocksreleased;
319 	if (oip->i_blocks < 0)			/* sanity */
320 		oip->i_blocks = 0;
321 	oip->i_flag |= ICHG;
322 #ifdef QUOTA
323 	if (!getinoquota(oip))
324 		(void) chkdq(oip, -blocksreleased, NOCRED, 0);
325 #endif
326 	return (allerror);
327 }
328 
329 /*
330  * Release blocks associated with the inode ip and stored in the indirect
331  * block bn.  Blocks are free'd in LIFO order up to (but not including)
332  * lastbn.  If level is greater than SINGLE, the block is an indirect block
333  * and recursive calls to indirtrunc must be used to cleanse other indirect
334  * blocks.
335  *
336  * NB: triple indirect blocks are untested.
337  */
338 static int
339 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
340 	register struct inode *ip;
341 	daddr_t lbn, lastbn;
342 	daddr_t dbn;
343 	int level;
344 	long *countp;
345 {
346 	register int i;
347 	struct buf *bp;
348 	register struct fs *fs = ip->i_fs;
349 	register daddr_t *bap;
350 	struct vnode *vp;
351 	daddr_t *copy, nb, nlbn, last;
352 	long blkcount, factor;
353 	int nblocks, blocksreleased = 0;
354 	int error = 0, allerror = 0;
355 
356 	/*
357 	 * Calculate index in current block of last
358 	 * block to be kept.  -1 indicates the entire
359 	 * block so we need not calculate the index.
360 	 */
361 	factor = 1;
362 	for (i = SINGLE; i < level; i++)
363 		factor *= NINDIR(fs);
364 	last = lastbn;
365 	if (lastbn > 0)
366 		last /= factor;
367 	nblocks = btodb(fs->fs_bsize);
368 	/*
369 	 * Get buffer of block pointers, zero those entries corresponding
370 	 * to blocks to be free'd, and update on disk copy first.  Since
371 	 * double(triple) indirect before single(double) indirect, calls
372 	 * to bmap on these blocks will fail.  However, we already have
373 	 * the on disk address, so we have to set the b_blkno field
374 	 * explicitly instead of letting bread do everything for us.
375 	 */
376 	vp = ITOV(ip);
377 	bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
378 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
379 		/* Braces must be here in case trace evaluates to nothing. */
380 		trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn);
381 	} else {
382 		trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn);
383 		curproc->p_stats->p_ru.ru_inblock++;	/* pay for read */
384 		bp->b_flags |= B_READ;
385 		if (bp->b_bcount > bp->b_bufsize)
386 			panic("ffs_indirtrunc: bad buffer size");
387 		bp->b_blkno = dbn;
388 		VOP_STRATEGY(bp);
389 		error = biowait(bp);
390 	}
391 	if (error) {
392 		brelse(bp);
393 		*countp = 0;
394 		return (error);
395 	}
396 
397 	bap = bp->b_un.b_daddr;
398 	MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
399 	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
400 	bzero((caddr_t)&bap[last + 1],
401 	  (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
402 	if (last == -1)
403 		bp->b_flags |= B_INVAL;
404 	error = bwrite(bp);
405 	if (error)
406 		allerror = error;
407 	bap = copy;
408 
409 	/*
410 	 * Recursively free totally unused blocks.
411 	 */
412 	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
413 	    i--, nlbn += factor) {
414 		nb = bap[i];
415 		if (nb == 0)
416 			continue;
417 		if (level > SINGLE) {
418 			if (error = ffs_indirtrunc(ip, nlbn,
419 			    fsbtodb(fs, nb), (daddr_t)-1, level - 1, &blkcount))
420 				allerror = error;
421 			blocksreleased += blkcount;
422 		}
423 		ffs_blkfree(ip, nb, fs->fs_bsize);
424 		blocksreleased += nblocks;
425 	}
426 
427 	/*
428 	 * Recursively free last partial block.
429 	 */
430 	if (level > SINGLE && lastbn >= 0) {
431 		last = lastbn % factor;
432 		nb = bap[i];
433 		if (nb != 0) {
434 			if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
435 			    last, level - 1, &blkcount))
436 				allerror = error;
437 			blocksreleased += blkcount;
438 		}
439 	}
440 	FREE(copy, M_TEMP);
441 	*countp = blocksreleased;
442 	return (allerror);
443 }
444