xref: /dragonfly/sys/vfs/ufs/ffs_balloc.c (revision 1b722dce)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)ffs_balloc.c	8.8 (Berkeley) 6/16/95
34  * $FreeBSD: src/sys/ufs/ffs/ffs_balloc.c,v 1.26.2.1 2002/10/10 19:48:20 dillon Exp $
35  * $DragonFly: src/sys/vfs/ufs/ffs_balloc.c,v 1.19 2008/05/21 18:49:49 dillon Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/buf.h>
42 #include <sys/lock.h>
43 #include <sys/mount.h>
44 #include <sys/vnode.h>
45 
46 #include "quota.h"
47 #include "inode.h"
48 #include "ufs_extern.h"
49 
50 #include "fs.h"
51 #include "ffs_extern.h"
52 
53 /*
54  * Balloc defines the structure of filesystem storage
55  * by allocating the physical blocks on a device given
56  * the inode and the logical block number in a file.
57  *
58  * ffs_balloc(struct vnode *a_vp, ufs_daddr_t a_lbn, int a_size,
59  *	      struct ucred *a_cred, int a_flags, struct buf *a_bpp)
60  */
61 int
62 ffs_balloc(struct vop_balloc_args *ap)
63 {
64 	struct inode *ip;
65 	ufs_daddr_t lbn;
66 	int size;
67 	struct ucred *cred;
68 	int flags;
69 	struct fs *fs;
70 	ufs_daddr_t nb;
71 	struct buf *bp, *nbp, *dbp;
72 	struct vnode *vp;
73 	struct indir indirs[NIADDR + 2];
74 	ufs_daddr_t newb, *bap, pref;
75 	int deallocated, osize, nsize, num, i, error;
76 	ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
77 	ufs_daddr_t *lbns_remfree, lbns[NIADDR + 1];
78 	int unwindidx;
79 	int seqcount;
80 
81 	vp = ap->a_vp;
82 	ip = VTOI(vp);
83 	fs = ip->i_fs;
84 	lbn = lblkno(fs, ap->a_startoffset);
85 	size = blkoff(fs, ap->a_startoffset) + ap->a_size;
86 	if (size > fs->fs_bsize)
87 		panic("ffs_balloc: blk too big");
88 	*ap->a_bpp = NULL;
89 	if (lbn < 0)
90 		return (EFBIG);
91 	cred = ap->a_cred;
92 	flags = ap->a_flags;
93 
94 	/*
95 	 * The vnode must be locked for us to be able to safely mess
96 	 * around with the inode.
97 	 */
98 	if (vn_islocked(vp) != LK_EXCLUSIVE) {
99 		panic("ffs_balloc: vnode %p not exclusively locked!", vp);
100 	}
101 
102 	/*
103 	 * If the next write will extend the file into a new block,
104 	 * and the file is currently composed of a fragment
105 	 * this fragment has to be extended to be a full block.
106 	 */
107 	nb = lblkno(fs, ip->i_size);
108 	if (nb < NDADDR && nb < lbn) {
109 		/*
110 		 * The filesize prior to this write can fit in direct
111 		 * blocks (ex. fragmentation is possibly done)
112 		 * we are now extending the file write beyond
113 		 * the block which has end of the file prior to this write.
114 		 */
115 		osize = blksize(fs, ip, nb);
116 		/*
117 		 * osize gives disk allocated size in the last block. It is
118 		 * either in fragments or a file system block size.
119 		 */
120 		if (osize < fs->fs_bsize && osize > 0) {
121 			/* A few fragments are already allocated, since the
122 			 * current extends beyond this block allocated the
123 			 * complete block as fragments are on in last block.
124 			 */
125 			error = ffs_realloccg(ip, nb,
126 				ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
127 				osize, (int)fs->fs_bsize, cred, &bp);
128 			if (error)
129 				return (error);
130 			if (DOINGSOFTDEP(vp))
131 				softdep_setup_allocdirect(ip, nb,
132 				    dofftofsb(fs, bp->b_bio2.bio_offset),
133 				    ip->i_db[nb], fs->fs_bsize, osize, bp);
134 			/* adjust the inode size, we just grew */
135 			ip->i_size = smalllblktosize(fs, nb + 1);
136 			ip->i_db[nb] = dofftofsb(fs, bp->b_bio2.bio_offset);
137 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
138 			if (flags & B_SYNC)
139 				bwrite(bp);
140 			else
141 				bawrite(bp);
142 			/* bp is already released here */
143 		}
144 	}
145 	/*
146 	 * The first NDADDR blocks are direct blocks
147 	 */
148 	if (lbn < NDADDR) {
149 		nb = ip->i_db[lbn];
150 		if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
151 			error = bread(vp, lblktodoff(fs, lbn), fs->fs_bsize, &bp);
152 			if (error) {
153 				brelse(bp);
154 				return (error);
155 			}
156 			bp->b_bio2.bio_offset = fsbtodoff(fs, nb);
157 			*ap->a_bpp = bp;
158 			return (0);
159 		}
160 		if (nb != 0) {
161 			/*
162 			 * Consider need to reallocate a fragment.
163 			 */
164 			osize = fragroundup(fs, blkoff(fs, ip->i_size));
165 			nsize = fragroundup(fs, size);
166 			if (nsize <= osize) {
167 				error = bread(vp, lblktodoff(fs, lbn),
168 					      osize, &bp);
169 				if (error) {
170 					brelse(bp);
171 					return (error);
172 				}
173 				bp->b_bio2.bio_offset = fsbtodoff(fs, nb);
174 			} else {
175 				error = ffs_realloccg(ip, lbn,
176 				    ffs_blkpref(ip, lbn, (int)lbn,
177 					&ip->i_db[0]), osize, nsize, cred, &bp);
178 				if (error)
179 					return (error);
180 				if (DOINGSOFTDEP(vp))
181 					softdep_setup_allocdirect(ip, lbn,
182 					    dofftofsb(fs, bp->b_bio2.bio_offset),
183 					    nb, nsize, osize, bp);
184 			}
185 		} else {
186 			if (ip->i_size < smalllblktosize(fs, lbn + 1))
187 				nsize = fragroundup(fs, size);
188 			else
189 				nsize = fs->fs_bsize;
190 			error = ffs_alloc(ip, lbn,
191 			    ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
192 			    nsize, cred, &newb);
193 			if (error)
194 				return (error);
195 			bp = getblk(vp, lblktodoff(fs, lbn), nsize, 0, 0);
196 			bp->b_bio2.bio_offset = fsbtodoff(fs, newb);
197 			if (flags & B_CLRBUF)
198 				vfs_bio_clrbuf(bp);
199 			if (DOINGSOFTDEP(vp))
200 				softdep_setup_allocdirect(ip, lbn, newb, 0,
201 				    nsize, 0, bp);
202 		}
203 		ip->i_db[lbn] = dofftofsb(fs, bp->b_bio2.bio_offset);
204 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
205 		*ap->a_bpp = bp;
206 		return (0);
207 	}
208 	/*
209 	 * Determine the number of levels of indirection.
210 	 */
211 	pref = 0;
212 	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
213 		return(error);
214 #ifdef DIAGNOSTIC
215 	if (num < 1)
216 		panic ("ffs_balloc: ufs_bmaparray returned indirect block");
217 #endif
218 	/*
219 	 * Get a handle on the data block buffer before working through
220 	 * indirect blocks to avoid a deadlock between the VM system holding
221 	 * a locked VM page and issuing a BMAP (which tries to lock the
222 	 * indirect blocks), and the filesystem holding a locked indirect
223 	 * block and then trying to read a data block (which tries to lock
224 	 * the underlying VM pages).
225 	 */
226 	dbp = getblk(vp, lblktodoff(fs, lbn), fs->fs_bsize, 0, 0);
227 
228 	/*
229 	 * Setup undo history
230 	 */
231 	allocib = NULL;
232 	allocblk = allociblk;
233 	lbns_remfree = lbns;
234 
235 	unwindidx = -1;
236 
237 	/*
238 	 * Fetch the first indirect block directly from the inode, allocating
239 	 * one if necessary.
240 	 */
241 	--num;
242 	nb = ip->i_ib[indirs[0].in_off];
243 	if (nb == 0) {
244 		pref = ffs_blkpref(ip, lbn, 0, NULL);
245 		/*
246 		 * If the filesystem has run out of space we can skip the
247 		 * full fsync/undo of the main [fail] case since no undo
248 		 * history has been built yet.  Hence the goto fail2.
249 		 */
250 	        if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
251 		    cred, &newb)) != 0)
252 			goto fail2;
253 		nb = newb;
254 		*allocblk++ = nb;
255 		*lbns_remfree++ = indirs[1].in_lbn;
256 		bp = getblk(vp, lblktodoff(fs, indirs[1].in_lbn),
257 			    fs->fs_bsize, 0, 0);
258 		bp->b_bio2.bio_offset = fsbtodoff(fs, nb);
259 		vfs_bio_clrbuf(bp);
260 		if (DOINGSOFTDEP(vp)) {
261 			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
262 			    newb, 0, fs->fs_bsize, 0, bp);
263 			bdwrite(bp);
264 		} else {
265 			/*
266 			 * Write synchronously so that indirect blocks
267 			 * never point at garbage.
268 			 */
269 			if (DOINGASYNC(vp))
270 				bdwrite(bp);
271 			else if ((error = bwrite(bp)) != 0)
272 				goto fail;
273 		}
274 		allocib = &ip->i_ib[indirs[0].in_off];
275 		*allocib = nb;
276 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
277 	}
278 
279 	/*
280 	 * Fetch through the indirect blocks, allocating as necessary.
281 	 */
282 	for (i = 1;;) {
283 		error = bread(vp, lblktodoff(fs, indirs[i].in_lbn), (int)fs->fs_bsize, &bp);
284 		if (error) {
285 			brelse(bp);
286 			goto fail;
287 		}
288 		bap = (ufs_daddr_t *)bp->b_data;
289 		nb = bap[indirs[i].in_off];
290 		if (i == num)
291 			break;
292 		i += 1;
293 		if (nb != 0) {
294 			bqrelse(bp);
295 			continue;
296 		}
297 		if (pref == 0)
298 			pref = ffs_blkpref(ip, lbn, 0, NULL);
299 		if ((error =
300 		    ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) != 0) {
301 			brelse(bp);
302 			goto fail;
303 		}
304 		nb = newb;
305 		*allocblk++ = nb;
306 		*lbns_remfree++ = indirs[i].in_lbn;
307 		nbp = getblk(vp, lblktodoff(fs, indirs[i].in_lbn),
308 			     fs->fs_bsize, 0, 0);
309 		nbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
310 		vfs_bio_clrbuf(nbp);
311 		if (DOINGSOFTDEP(vp)) {
312 			softdep_setup_allocindir_meta(nbp, ip, bp,
313 			    indirs[i - 1].in_off, nb);
314 			bdwrite(nbp);
315 		} else {
316 			/*
317 			 * Write synchronously so that indirect blocks
318 			 * never point at garbage.
319 			 */
320 			if ((error = bwrite(nbp)) != 0) {
321 				brelse(bp);
322 				goto fail;
323 			}
324 		}
325 		bap[indirs[i - 1].in_off] = nb;
326 		if (allocib == NULL && unwindidx < 0)
327 			unwindidx = i - 1;
328 		/*
329 		 * If required, write synchronously, otherwise use
330 		 * delayed write.
331 		 */
332 		if (flags & B_SYNC) {
333 			bwrite(bp);
334 		} else {
335 			if (bp->b_bufsize == fs->fs_bsize)
336 				bp->b_flags |= B_CLUSTEROK;
337 			bdwrite(bp);
338 		}
339 	}
340 
341 	/*
342 	 * Get the data block, allocating if necessary.  We have already
343 	 * called getblk() on the data block buffer, dbp.  If we have to
344 	 * allocate it and B_CLRBUF has been set the inference is an intention
345 	 * to zero out the related disk blocks, so we do not have to issue
346 	 * a read.  Instead we simply call vfs_bio_clrbuf().  If B_CLRBUF is
347 	 * not set the caller intends to overwrite the entire contents of the
348 	 * buffer and we don't waste time trying to clean up the contents.
349 	 *
350 	 * bp references the current indirect block.  When allocating,
351 	 * the block must be updated.
352 	 */
353 	if (nb == 0) {
354 		pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
355 		error = ffs_alloc(ip,
356 		    lbn, pref, (int)fs->fs_bsize, cred, &newb);
357 		if (error) {
358 			brelse(bp);
359 			goto fail;
360 		}
361 		nb = newb;
362 		*allocblk++ = nb;
363 		*lbns_remfree++ = lbn;
364 		dbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
365 		if (flags & B_CLRBUF)
366 			vfs_bio_clrbuf(dbp);
367 		if (DOINGSOFTDEP(vp))
368 			softdep_setup_allocindir_page(ip, lbn, bp,
369 			    indirs[i].in_off, nb, 0, dbp);
370 		bap[indirs[i].in_off] = nb;
371 		/*
372 		 * If required, write synchronously, otherwise use
373 		 * delayed write.
374 		 */
375 		if (flags & B_SYNC) {
376 			bwrite(bp);
377 		} else {
378 			if (bp->b_bufsize == fs->fs_bsize)
379 				bp->b_flags |= B_CLUSTEROK;
380 			bdwrite(bp);
381 		}
382 		*ap->a_bpp = dbp;
383 		return (0);
384 	}
385 	brelse(bp);
386 
387 	/*
388 	 * At this point all related indirect blocks have been allocated
389 	 * if necessary and released.  bp is no longer valid.  dbp holds
390 	 * our getblk()'d data block.
391 	 *
392 	 * XXX we previously performed a cluster_read operation here.
393 	 */
394 	if (flags & B_CLRBUF) {
395 		/*
396 		 * If B_CLRBUF is set we must validate the invalid portions
397 		 * of the buffer.  This typically requires a read-before-
398 		 * write.  The strategy call will fill in bio_offset in that
399 		 * case.
400 		 *
401 		 * If we hit this case we do a cluster read if possible
402 		 * since nearby data blocks are likely to be accessed soon
403 		 * too.
404 		 */
405 		if ((dbp->b_flags & B_CACHE) == 0) {
406 			bqrelse(dbp);
407 			seqcount = (flags & B_SEQMASK) >> B_SEQSHIFT;
408 			if (seqcount &&
409 			    (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
410 				error = cluster_read(vp, (off_t)ip->i_size,
411 					    lblktodoff(fs, lbn),
412 					    (int)fs->fs_bsize,
413 					    MAXBSIZE, seqcount, &dbp);
414 			} else {
415 				error = bread(vp, lblktodoff(fs, lbn), (int)fs->fs_bsize, &dbp);
416 			}
417 			if (error)
418 				goto fail;
419 		} else {
420 			dbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
421 		}
422 	} else {
423 		/*
424 		 * If B_CLRBUF is not set the caller intends to overwrite
425 		 * the entire contents of the buffer.  We can simply set
426 		 * bio_offset and we are done.
427 		 */
428 		dbp->b_bio2.bio_offset = fsbtodoff(fs, nb);
429 	}
430 	*ap->a_bpp = dbp;
431 	return (0);
432 fail:
433 	/*
434 	 * If we have failed part way through block allocation, we
435 	 * have to deallocate any indirect blocks that we have allocated.
436 	 * We have to fsync the file before we start to get rid of all
437 	 * of its dependencies so that we do not leave them dangling.
438 	 * We have to sync it at the end so that the soft updates code
439 	 * does not find any untracked changes. Although this is really
440 	 * slow, running out of disk space is not expected to be a common
441 	 * occurence. The error return from fsync is ignored as we already
442 	 * have an error to return to the user.
443 	 */
444 	VOP_FSYNC(vp, MNT_WAIT, 0);
445 	for (deallocated = 0, blkp = allociblk, lbns_remfree = lbns;
446 	     blkp < allocblk; blkp++, lbns_remfree++) {
447 		/*
448 		 * We shall not leave the freed blocks on the vnode
449 		 * buffer object lists.
450 		 */
451 		bp = getblk(vp, *lbns_remfree, fs->fs_bsize, 0, 0);
452 		bp->b_flags |= (B_INVAL | B_RELBUF);
453 		brelse(bp);
454 		deallocated += fs->fs_bsize;
455 	}
456 
457 	if (allocib != NULL) {
458 		*allocib = 0;
459 	} else if (unwindidx >= 0) {
460 		int r;
461 
462 		r = bread(vp, lblktodoff(fs, indirs[unwindidx].in_lbn), (int)fs->fs_bsize, &bp);
463 		if (r) {
464 			panic("Could not unwind indirect block, error %d", r);
465 			brelse(bp);
466 		} else {
467 			bap = (ufs_daddr_t *)bp->b_data;
468 			bap[indirs[unwindidx].in_off] = 0;
469 			if (flags & B_SYNC) {
470 				bwrite(bp);
471 			} else {
472 				if (bp->b_bufsize == fs->fs_bsize)
473 					bp->b_flags |= B_CLUSTEROK;
474 				bdwrite(bp);
475 			}
476 		}
477 	}
478 	if (deallocated) {
479 #ifdef QUOTA
480 		/*
481 		 * Restore user's disk quota because allocation failed.
482 		 */
483 		(void) ufs_chkdq(ip, (long)-btodb(deallocated), cred, FORCE);
484 #endif
485 		ip->i_blocks -= btodb(deallocated);
486 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
487 	}
488 	VOP_FSYNC(vp, MNT_WAIT, 0);
489 
490 	/*
491 	 * After the buffers are invalidated and on-disk pointers are
492 	 * cleared, free the blocks.
493 	 */
494 	for (blkp = allociblk; blkp < allocblk; blkp++) {
495 		ffs_blkfree(ip, *blkp, fs->fs_bsize);
496 	}
497 
498 	/*
499 	 * Cleanup the data block we getblk()'d before returning.
500 	 */
501 fail2:
502 	brelse(dbp);
503 	return (error);
504 }
505 
506