xref: /original-bsd/sys/ufs/ffs/ffs_inode.c (revision 5f97f134)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)ffs_inode.c	7.68 (Berkeley) 02/28/93
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/mount.h>
13 #include <sys/proc.h>
14 #include <sys/file.h>
15 #include <sys/buf.h>
16 #include <sys/vnode.h>
17 #include <sys/kernel.h>
18 #include <sys/malloc.h>
19 #include <sys/trace.h>
20 #include <sys/resourcevar.h>
21 
22 #include <vm/vm.h>
23 
24 #include <ufs/ufs/quota.h>
25 #include <ufs/ufs/inode.h>
26 #include <ufs/ufs/ufsmount.h>
27 #include <ufs/ufs/ufs_extern.h>
28 
29 #include <ufs/ffs/fs.h>
30 #include <ufs/ffs/ffs_extern.h>
31 
32 static int ffs_indirtrunc __P((struct inode *, daddr_t, daddr_t, daddr_t, int,
33 	    long *));
34 
35 int
36 ffs_init()
37 {
38 	return (ufs_init());
39 }
40 
41 /*
42  * Update the access, modified, and inode change times as specified
43  * by the IACC, IUPD, and ICHG flags respectively. The IMOD flag
44  * is used to specify that the inode needs to be updated but that
45  * the times have already been set. The access and modified times
46  * are taken from the second and third parameters; the inode change
47  * time is always taken from the current time. If waitfor is set,
48  * then wait for the disk write of the inode to complete.
49  */
50 int
51 ffs_update(ap)
52 	struct vop_update_args /* {
53 		struct vnode *a_vp;
54 		struct timeval *a_ta;
55 		struct timeval *a_tm;
56 		int a_waitfor;
57 	} */ *ap;
58 {
59 	struct buf *bp;
60 	struct inode *ip;
61 	struct dinode *dp;
62 	register struct fs *fs;
63 	int error;
64 
65 	ip = VTOI(ap->a_vp);
66 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) {
67 		ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
68 		return (0);
69 	}
70 	if ((ip->i_flag & (IUPD|IACC|ICHG|IMOD)) == 0)
71 		return (0);
72 	if (ip->i_flag&IACC)
73 		ip->i_atime.ts_sec = ap->a_ta->tv_sec;
74 	if (ip->i_flag&IUPD) {
75 		ip->i_mtime.ts_sec = ap->a_tm->tv_sec;
76 		ip->i_modrev++;
77 	}
78 	if (ip->i_flag&ICHG)
79 		ip->i_ctime.ts_sec = time.tv_sec;
80 	ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
81 	fs = ip->i_fs;
82 	/*
83 	 * Ensure that uid and gid are correct. This is a temporary
84 	 * fix until fsck has been changed to do the update.
85 	 */
86 	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
87 		ip->i_din.di_ouid = ip->i_uid;		/* XXX */
88 		ip->i_din.di_ogid = ip->i_gid;		/* XXX */
89 	}						/* XXX */
90 	if (error = bread(ip->i_devvp, fsbtodb(fs, itod(fs, ip->i_number)),
91 		(int)fs->fs_bsize, NOCRED, &bp)) {
92 		brelse(bp);
93 		return (error);
94 	}
95 	dp = bp->b_un.b_dino + itoo(fs, ip->i_number);
96 	*dp = ip->i_din;
97 	if (ap->a_waitfor)
98 		return (bwrite(bp));
99 	else {
100 		bdwrite(bp);
101 		return (0);
102 	}
103 }
104 
105 #define	SINGLE	0	/* index of single indirect block */
106 #define	DOUBLE	1	/* index of double indirect block */
107 #define	TRIPLE	2	/* index of triple indirect block */
108 /*
109  * Truncate the inode oip to at most length size.  Free affected disk
110  * blocks -- the blocks of the file are removed in reverse order.
111  */
112 ffs_truncate(ap)
113 	struct vop_truncate_args /* {
114 		struct vnode *a_vp;
115 		off_t a_length;
116 		int a_flags;
117 		struct ucred *a_cred;
118 		struct proc *a_p;
119 	} */ *ap;
120 {
121 	register struct vnode *ovp = ap->a_vp;
122 	register daddr_t lastblock;
123 	register struct inode *oip;
124 	daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
125 	daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
126 	off_t length = ap->a_length;
127 	register struct fs *fs;
128 	struct buf *bp;
129 	int offset, size, level;
130 	long count, nblocks, vflags, blocksreleased = 0;
131 	struct timeval tv;
132 	register int i;
133 	int aflags, error, allerror;
134 	off_t osize;
135 
136 	oip = VTOI(ovp);
137 	tv = time;
138 	if (ovp->v_type == VLNK &&
139 	    oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
140 #ifdef DIAGNOSTIC
141 		if (length != 0)
142 			panic("ffs_truncate: partial truncate of symlink");
143 #endif
144 		bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
145 		oip->i_size = 0;
146 		oip->i_flag |= ICHG|IUPD;
147 		return (VOP_UPDATE(ovp, &tv, &tv, 1));
148 	}
149 	if (oip->i_size <= length) {
150 		oip->i_flag |= ICHG|IUPD;
151 		return (VOP_UPDATE(ovp, &tv, &tv, 1));
152 	}
153 	vnode_pager_setsize(ovp, (u_long)length);
154 	/*
155 	 * Calculate index into inode's block list of
156 	 * last direct and indirect blocks (if any)
157 	 * which we want to keep.  Lastblock is -1 when
158 	 * the file is truncated to 0.
159 	 */
160 	fs = oip->i_fs;
161 	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
162 	lastiblock[SINGLE] = lastblock - NDADDR;
163 	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
164 	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
165 	nblocks = btodb(fs->fs_bsize);
166 	/*
167 	 * Update the size of the file. If the file is not being
168 	 * truncated to a block boundry, the contents of the
169 	 * partial block following the end of the file must be
170 	 * zero'ed in case it ever become accessable again because
171 	 * of subsequent file growth.
172 	 */
173 	osize = oip->i_size;
174 	offset = blkoff(fs, length);
175 	if (offset == 0) {
176 		oip->i_size = length;
177 	} else {
178 		lbn = lblkno(fs, length);
179 		aflags = B_CLRBUF;
180 		if (ap->a_flags & IO_SYNC)
181 			aflags |= B_SYNC;
182 #ifdef QUOTA
183 		if (error = getinoquota(oip))
184 			return (error);
185 #endif
186 		if (error = ffs_balloc(oip, lbn, offset, ap->a_cred, &bp, aflags))
187 			return (error);
188 		oip->i_size = length;
189 		size = blksize(fs, oip, lbn);
190 		(void) vnode_pager_uncache(ovp);
191 		bzero(bp->b_un.b_addr + offset, (unsigned)(size - offset));
192 		allocbuf(bp, size);
193 		if (ap->a_flags & IO_SYNC)
194 			bwrite(bp);
195 		else
196 			bawrite(bp);
197 	}
198 	/*
199 	 * Update file and block pointers on disk before we start freeing
200 	 * blocks.  If we crash before free'ing blocks below, the blocks
201 	 * will be returned to the free list.  lastiblock values are also
202 	 * normalized to -1 for calls to ffs_indirtrunc below.
203 	 */
204 	bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
205 	for (level = TRIPLE; level >= SINGLE; level--)
206 		if (lastiblock[level] < 0) {
207 			oip->i_ib[level] = 0;
208 			lastiblock[level] = -1;
209 		}
210 	for (i = NDADDR - 1; i > lastblock; i--)
211 		oip->i_db[i] = 0;
212 	oip->i_flag |= ICHG|IUPD;
213 	if (error = VOP_UPDATE(ovp, &tv, &tv, MNT_WAIT))
214 		allerror = error;
215 	/*
216 	 * Having written the new inode to disk, save its new configuration
217 	 * and put back the old block pointers long enough to process them.
218 	 * Note that we save the new block configuration so we can check it
219 	 * when we are done.
220 	 */
221 	bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
222 	bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
223 	oip->i_size = osize;
224 	vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
225 	allerror = vinvalbuf(ovp, vflags, ap->a_cred, ap->a_p, 0, 0);
226 
227 	/*
228 	 * Indirect blocks first.
229 	 */
230 	indir_lbn[SINGLE] = -NDADDR;
231 	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
232 	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
233 	for (level = TRIPLE; level >= SINGLE; level--) {
234 		bn = oip->i_ib[level];
235 		if (bn != 0) {
236 			error = ffs_indirtrunc(oip, indir_lbn[level],
237 			    fsbtodb(fs, bn), lastiblock[level], level, &count);
238 			if (error)
239 				allerror = error;
240 			blocksreleased += count;
241 			if (lastiblock[level] < 0) {
242 				oip->i_ib[level] = 0;
243 				ffs_blkfree(oip, bn, fs->fs_bsize);
244 				blocksreleased += nblocks;
245 			}
246 		}
247 		if (lastiblock[level] >= 0)
248 			goto done;
249 	}
250 
251 	/*
252 	 * All whole direct blocks or frags.
253 	 */
254 	for (i = NDADDR - 1; i > lastblock; i--) {
255 		register long bsize;
256 
257 		bn = oip->i_db[i];
258 		if (bn == 0)
259 			continue;
260 		oip->i_db[i] = 0;
261 		bsize = blksize(fs, oip, i);
262 		ffs_blkfree(oip, bn, bsize);
263 		blocksreleased += btodb(bsize);
264 	}
265 	if (lastblock < 0)
266 		goto done;
267 
268 	/*
269 	 * Finally, look for a change in size of the
270 	 * last direct block; release any frags.
271 	 */
272 	bn = oip->i_db[lastblock];
273 	if (bn != 0) {
274 		long oldspace, newspace;
275 
276 		/*
277 		 * Calculate amount of space we're giving
278 		 * back as old block size minus new block size.
279 		 */
280 		oldspace = blksize(fs, oip, lastblock);
281 		oip->i_size = length;
282 		newspace = blksize(fs, oip, lastblock);
283 		if (newspace == 0)
284 			panic("itrunc: newspace");
285 		if (oldspace - newspace > 0) {
286 			/*
287 			 * Block number of space to be free'd is
288 			 * the old block # plus the number of frags
289 			 * required for the storage we're keeping.
290 			 */
291 			bn += numfrags(fs, newspace);
292 			ffs_blkfree(oip, bn, oldspace - newspace);
293 			blocksreleased += btodb(oldspace - newspace);
294 		}
295 	}
296 done:
297 #ifdef DIAGNOSTIC
298 	for (level = SINGLE; level <= TRIPLE; level++)
299 		if (newblks[NDADDR + level] != oip->i_ib[level])
300 			panic("itrunc1");
301 	for (i = 0; i < NDADDR; i++)
302 		if (newblks[i] != oip->i_db[i])
303 			panic("itrunc2");
304 	if (length == 0 &&
305 	    (ovp->v_dirtyblkhd.le_next || ovp->v_cleanblkhd.le_next))
306 		panic("itrunc3");
307 #endif /* DIAGNOSTIC */
308 	/*
309 	 * Put back the real size.
310 	 */
311 	oip->i_size = length;
312 	oip->i_blocks -= blocksreleased;
313 	if (oip->i_blocks < 0)			/* sanity */
314 		oip->i_blocks = 0;
315 	oip->i_flag |= ICHG;
316 #ifdef QUOTA
317 	if (!getinoquota(oip))
318 		(void) chkdq(oip, -blocksreleased, NOCRED, 0);
319 #endif
320 	return (allerror);
321 }
322 
323 /*
324  * Release blocks associated with the inode ip and stored in the indirect
325  * block bn.  Blocks are free'd in LIFO order up to (but not including)
326  * lastbn.  If level is greater than SINGLE, the block is an indirect block
327  * and recursive calls to indirtrunc must be used to cleanse other indirect
328  * blocks.
329  *
330  * NB: triple indirect blocks are untested.
331  */
332 static int
333 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
334 	register struct inode *ip;
335 	daddr_t lbn, lastbn;
336 	daddr_t dbn;
337 	int level;
338 	long *countp;
339 {
340 	register int i;
341 	struct buf *bp;
342 	register struct fs *fs = ip->i_fs;
343 	register daddr_t *bap;
344 	struct vnode *vp;
345 	daddr_t *copy, nb, nlbn, last;
346 	long blkcount, factor;
347 	int nblocks, blocksreleased = 0;
348 	int error = 0, allerror = 0;
349 
350 	/*
351 	 * Calculate index in current block of last
352 	 * block to be kept.  -1 indicates the entire
353 	 * block so we need not calculate the index.
354 	 */
355 	factor = 1;
356 	for (i = SINGLE; i < level; i++)
357 		factor *= NINDIR(fs);
358 	last = lastbn;
359 	if (lastbn > 0)
360 		last /= factor;
361 	nblocks = btodb(fs->fs_bsize);
362 	/*
363 	 * Get buffer of block pointers, zero those entries corresponding
364 	 * to blocks to be free'd, and update on disk copy first.  Since
365 	 * double(triple) indirect before single(double) indirect, calls
366 	 * to bmap on these blocks will fail.  However, we already have
367 	 * the on disk address, so we have to set the b_blkno field
368 	 * explicitly instead of letting bread do everything for us.
369 	 */
370 	vp = ITOV(ip);
371 	bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
372 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
373 		/* Braces must be here in case trace evaluates to nothing. */
374 		trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn);
375 	} else {
376 		trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn);
377 		curproc->p_stats->p_ru.ru_inblock++;	/* pay for read */
378 		bp->b_flags |= B_READ;
379 		if (bp->b_bcount > bp->b_bufsize)
380 			panic("ffs_indirtrunc: bad buffer size");
381 		bp->b_blkno = dbn;
382 		VOP_STRATEGY(bp);
383 		error = biowait(bp);
384 	}
385 	if (error) {
386 		brelse(bp);
387 		*countp = 0;
388 		return (error);
389 	}
390 
391 	bap = bp->b_un.b_daddr;
392 	MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
393 	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
394 	bzero((caddr_t)&bap[last + 1],
395 	  (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
396 	if (last == -1)
397 		bp->b_flags |= B_INVAL;
398 	error = bwrite(bp);
399 	if (error)
400 		allerror = error;
401 	bap = copy;
402 
403 	/*
404 	 * Recursively free totally unused blocks.
405 	 */
406 	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
407 	    i--, nlbn += factor) {
408 		nb = bap[i];
409 		if (nb == 0)
410 			continue;
411 		if (level > SINGLE) {
412 			if (error = ffs_indirtrunc(ip, nlbn,
413 			    fsbtodb(fs, nb), (daddr_t)-1, level - 1, &blkcount))
414 				allerror = error;
415 			blocksreleased += blkcount;
416 		}
417 		ffs_blkfree(ip, nb, fs->fs_bsize);
418 		blocksreleased += nblocks;
419 	}
420 
421 	/*
422 	 * Recursively free last partial block.
423 	 */
424 	if (level > SINGLE && lastbn >= 0) {
425 		last = lastbn % factor;
426 		nb = bap[i];
427 		if (nb != 0) {
428 			if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
429 			    last, level - 1, &blkcount))
430 				allerror = error;
431 			blocksreleased += blkcount;
432 		}
433 	}
434 	FREE(copy, M_TEMP);
435 	*countp = blocksreleased;
436 	return (allerror);
437 }
438