xref: /freebsd/usr.sbin/makefs/ffs/ffs_balloc.c (revision 4d65a7c6)
1d347a0daSSam Leffler /*	$NetBSD: ffs_balloc.c,v 1.13 2004/06/20 22:20:18 jmc Exp $	*/
2d347a0daSSam Leffler /* From NetBSD: ffs_balloc.c,v 1.25 2001/08/08 08:36:36 lukem Exp */
3d347a0daSSam Leffler 
48a16b7a1SPedro F. Giffuni /*-
58a16b7a1SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
68a16b7a1SPedro F. Giffuni  *
7d347a0daSSam Leffler  * Copyright (c) 1982, 1986, 1989, 1993
8d347a0daSSam Leffler  *	The Regents of the University of California.  All rights reserved.
9d347a0daSSam Leffler  *
10d347a0daSSam Leffler  * Redistribution and use in source and binary forms, with or without
11d347a0daSSam Leffler  * modification, are permitted provided that the following conditions
12d347a0daSSam Leffler  * are met:
13d347a0daSSam Leffler  * 1. Redistributions of source code must retain the above copyright
14d347a0daSSam Leffler  *    notice, this list of conditions and the following disclaimer.
15d347a0daSSam Leffler  * 2. Redistributions in binary form must reproduce the above copyright
16d347a0daSSam Leffler  *    notice, this list of conditions and the following disclaimer in the
17d347a0daSSam Leffler  *    documentation and/or other materials provided with the distribution.
18d347a0daSSam Leffler  * 3. Neither the name of the University nor the names of its contributors
19d347a0daSSam Leffler  *    may be used to endorse or promote products derived from this software
20d347a0daSSam Leffler  *    without specific prior written permission.
21d347a0daSSam Leffler  *
22d347a0daSSam Leffler  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23d347a0daSSam Leffler  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24d347a0daSSam Leffler  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25d347a0daSSam Leffler  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26d347a0daSSam Leffler  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27d347a0daSSam Leffler  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28d347a0daSSam Leffler  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29d347a0daSSam Leffler  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30d347a0daSSam Leffler  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31d347a0daSSam Leffler  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32d347a0daSSam Leffler  * SUCH DAMAGE.
33d347a0daSSam Leffler  */
34d347a0daSSam Leffler 
35d347a0daSSam Leffler #include <sys/param.h>
36d347a0daSSam Leffler #include <sys/time.h>
37d347a0daSSam Leffler 
38d347a0daSSam Leffler #include <assert.h>
39d347a0daSSam Leffler #include <errno.h>
40d347a0daSSam Leffler #include <stdio.h>
41d347a0daSSam Leffler #include <stdlib.h>
42d347a0daSSam Leffler #include <string.h>
43d347a0daSSam Leffler 
44d347a0daSSam Leffler #include "makefs.h"
45d347a0daSSam Leffler 
46d347a0daSSam Leffler #include <ufs/ufs/dinode.h>
47d347a0daSSam Leffler #include <ufs/ffs/fs.h>
48d347a0daSSam Leffler 
49d347a0daSSam Leffler #include "ffs/ufs_bswap.h"
50d347a0daSSam Leffler #include "ffs/buf.h"
51d347a0daSSam Leffler #include "ffs/ufs_inode.h"
52d347a0daSSam Leffler #include "ffs/ffs_extern.h"
53d347a0daSSam Leffler 
54d485c77fSKonstantin Belousov static int ffs_balloc_ufs1(struct inode *, off_t, int, struct m_buf **);
55d485c77fSKonstantin Belousov static int ffs_balloc_ufs2(struct inode *, off_t, int, struct m_buf **);
56d347a0daSSam Leffler 
57d347a0daSSam Leffler /*
58d347a0daSSam Leffler  * Balloc defines the structure of file system storage
59d347a0daSSam Leffler  * by allocating the physical blocks on a device given
60d347a0daSSam Leffler  * the inode and the logical block number in a file.
61d347a0daSSam Leffler  *
62d347a0daSSam Leffler  * Assume: flags == B_SYNC | B_CLRBUF
63d347a0daSSam Leffler  */
64d347a0daSSam Leffler 
65d347a0daSSam Leffler int
ffs_balloc(struct inode * ip,off_t offset,int bufsize,struct m_buf ** bpp)66d485c77fSKonstantin Belousov ffs_balloc(struct inode *ip, off_t offset, int bufsize, struct m_buf **bpp)
67d347a0daSSam Leffler {
68d347a0daSSam Leffler 	if (ip->i_fs->fs_magic == FS_UFS2_MAGIC)
69d347a0daSSam Leffler 		return ffs_balloc_ufs2(ip, offset, bufsize, bpp);
70d347a0daSSam Leffler 	else
71d347a0daSSam Leffler 		return ffs_balloc_ufs1(ip, offset, bufsize, bpp);
72d347a0daSSam Leffler }
73d347a0daSSam Leffler 
74d347a0daSSam Leffler static int
ffs_balloc_ufs1(struct inode * ip,off_t offset,int bufsize,struct m_buf ** bpp)75d485c77fSKonstantin Belousov ffs_balloc_ufs1(struct inode *ip, off_t offset, int bufsize,
76d485c77fSKonstantin Belousov     struct m_buf **bpp)
77d347a0daSSam Leffler {
78d347a0daSSam Leffler 	daddr_t lbn, lastlbn;
79d347a0daSSam Leffler 	int size;
80d347a0daSSam Leffler 	int32_t nb;
81d485c77fSKonstantin Belousov 	struct m_buf *bp, *nbp;
82d347a0daSSam Leffler 	struct fs *fs = ip->i_fs;
831dc349abSEd Maste 	struct indir indirs[UFS_NIADDR + 2];
84d347a0daSSam Leffler 	daddr_t newb, pref;
85d347a0daSSam Leffler 	int32_t *bap;
86d347a0daSSam Leffler 	int osize, nsize, num, i, error;
871dc349abSEd Maste 	int32_t *allocblk, allociblk[UFS_NIADDR + 1];
88d347a0daSSam Leffler 	int32_t *allocib;
89d347a0daSSam Leffler 	const int needswap = UFS_FSNEEDSWAP(fs);
90d347a0daSSam Leffler 
91d347a0daSSam Leffler 	lbn = lblkno(fs, offset);
92d347a0daSSam Leffler 	size = blkoff(fs, offset) + bufsize;
93d347a0daSSam Leffler 	if (bpp != NULL) {
94d347a0daSSam Leffler 		*bpp = NULL;
95d347a0daSSam Leffler 	}
96d347a0daSSam Leffler 
97d347a0daSSam Leffler 	assert(size <= fs->fs_bsize);
98d347a0daSSam Leffler 	if (lbn < 0)
99d347a0daSSam Leffler 		return (EFBIG);
100d347a0daSSam Leffler 
101d347a0daSSam Leffler 	/*
102d347a0daSSam Leffler 	 * If the next write will extend the file into a new block,
103d347a0daSSam Leffler 	 * and the file is currently composed of a fragment
104d347a0daSSam Leffler 	 * this fragment has to be extended to be a full block.
105d347a0daSSam Leffler 	 */
106d347a0daSSam Leffler 
107d347a0daSSam Leffler 	lastlbn = lblkno(fs, ip->i_ffs1_size);
1081dc349abSEd Maste 	if (lastlbn < UFS_NDADDR && lastlbn < lbn) {
109d347a0daSSam Leffler 		nb = lastlbn;
110d347a0daSSam Leffler 		osize = blksize(fs, ip, nb);
111d347a0daSSam Leffler 		if (osize < fs->fs_bsize && osize > 0) {
112d347a0daSSam Leffler 			warnx("need to ffs_realloccg; not supported!");
113d347a0daSSam Leffler 			abort();
114d347a0daSSam Leffler 		}
115d347a0daSSam Leffler 	}
116d347a0daSSam Leffler 
117d347a0daSSam Leffler 	/*
1181dc349abSEd Maste 	 * The first UFS_NDADDR blocks are direct blocks
119d347a0daSSam Leffler 	 */
120d347a0daSSam Leffler 
1211dc349abSEd Maste 	if (lbn < UFS_NDADDR) {
122d347a0daSSam Leffler 		nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap);
1233afe6a68SEd Maste 		if (nb != 0 && ip->i_ffs1_size >=
1243afe6a68SEd Maste 		    (uint64_t)lblktosize(fs, lbn + 1)) {
125d347a0daSSam Leffler 
126d347a0daSSam Leffler 			/*
127d347a0daSSam Leffler 			 * The block is an already-allocated direct block
128d347a0daSSam Leffler 			 * and the file already extends past this block,
129d347a0daSSam Leffler 			 * thus this must be a whole block.
130d347a0daSSam Leffler 			 * Just read the block (if requested).
131d347a0daSSam Leffler 			 */
132d347a0daSSam Leffler 
133d347a0daSSam Leffler 			if (bpp != NULL) {
134d485c77fSKonstantin Belousov 				error = bread((void *)ip->i_devvp, lbn,
135d485c77fSKonstantin Belousov 				    fs->fs_bsize, NULL, bpp);
136d347a0daSSam Leffler 				if (error) {
1375b292f9aSEd Maste 					brelse(*bpp);
138d347a0daSSam Leffler 					return (error);
139d347a0daSSam Leffler 				}
140d347a0daSSam Leffler 			}
141d347a0daSSam Leffler 			return (0);
142d347a0daSSam Leffler 		}
143d347a0daSSam Leffler 		if (nb != 0) {
144d347a0daSSam Leffler 
145d347a0daSSam Leffler 			/*
146d347a0daSSam Leffler 			 * Consider need to reallocate a fragment.
147d347a0daSSam Leffler 			 */
148d347a0daSSam Leffler 
149d347a0daSSam Leffler 			osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size));
150d347a0daSSam Leffler 			nsize = fragroundup(fs, size);
151d347a0daSSam Leffler 			if (nsize <= osize) {
152d347a0daSSam Leffler 
153d347a0daSSam Leffler 				/*
154d347a0daSSam Leffler 				 * The existing block is already
155d347a0daSSam Leffler 				 * at least as big as we want.
156d347a0daSSam Leffler 				 * Just read the block (if requested).
157d347a0daSSam Leffler 				 */
158d347a0daSSam Leffler 
159d347a0daSSam Leffler 				if (bpp != NULL) {
160d485c77fSKonstantin Belousov 					error = bread((void *)ip->i_devvp, lbn,
161d485c77fSKonstantin Belousov 					    osize, NULL, bpp);
162d347a0daSSam Leffler 					if (error) {
1635b292f9aSEd Maste 						brelse(*bpp);
164d347a0daSSam Leffler 						return (error);
165d347a0daSSam Leffler 					}
166d347a0daSSam Leffler 				}
167d347a0daSSam Leffler 				return 0;
168d347a0daSSam Leffler 			} else {
169d347a0daSSam Leffler 				warnx("need to ffs_realloccg; not supported!");
170d347a0daSSam Leffler 				abort();
171d347a0daSSam Leffler 			}
172d347a0daSSam Leffler 		} else {
173d347a0daSSam Leffler 
174d347a0daSSam Leffler 			/*
175d347a0daSSam Leffler 			 * the block was not previously allocated,
176d347a0daSSam Leffler 			 * allocate a new block or fragment.
177d347a0daSSam Leffler 			 */
178d347a0daSSam Leffler 
1793afe6a68SEd Maste 			if (ip->i_ffs1_size < (uint64_t)lblktosize(fs, lbn + 1))
180d347a0daSSam Leffler 				nsize = fragroundup(fs, size);
181d347a0daSSam Leffler 			else
182d347a0daSSam Leffler 				nsize = fs->fs_bsize;
183d347a0daSSam Leffler 			error = ffs_alloc(ip, lbn,
184d347a0daSSam Leffler 			    ffs_blkpref_ufs1(ip, lbn, (int)lbn,
185d347a0daSSam Leffler 				&ip->i_ffs1_db[0]),
186d347a0daSSam Leffler 				nsize, &newb);
187d347a0daSSam Leffler 			if (error)
188d347a0daSSam Leffler 				return (error);
189d347a0daSSam Leffler 			if (bpp != NULL) {
190d485c77fSKonstantin Belousov 				bp = getblk((void *)ip->i_devvp, lbn, nsize,
191d485c77fSKonstantin Belousov 				    0, 0, 0);
192d347a0daSSam Leffler 				bp->b_blkno = fsbtodb(fs, newb);
193d347a0daSSam Leffler 				clrbuf(bp);
194d347a0daSSam Leffler 				*bpp = bp;
195d347a0daSSam Leffler 			}
196d347a0daSSam Leffler 		}
197d347a0daSSam Leffler 		ip->i_ffs1_db[lbn] = ufs_rw32((int32_t)newb, needswap);
198d347a0daSSam Leffler 		return (0);
199d347a0daSSam Leffler 	}
200d347a0daSSam Leffler 
201d347a0daSSam Leffler 	/*
202d347a0daSSam Leffler 	 * Determine the number of levels of indirection.
203d347a0daSSam Leffler 	 */
204d347a0daSSam Leffler 
205d347a0daSSam Leffler 	pref = 0;
206d347a0daSSam Leffler 	if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0)
207d347a0daSSam Leffler 		return (error);
208d347a0daSSam Leffler 
209d347a0daSSam Leffler 	if (num < 1) {
210d347a0daSSam Leffler 		warnx("ffs_balloc: ufs_getlbns returned indirect block");
211d347a0daSSam Leffler 		abort();
212d347a0daSSam Leffler 	}
213d347a0daSSam Leffler 
214d347a0daSSam Leffler 	/*
215d347a0daSSam Leffler 	 * Fetch the first indirect block allocating if necessary.
216d347a0daSSam Leffler 	 */
217d347a0daSSam Leffler 
218d347a0daSSam Leffler 	--num;
219d347a0daSSam Leffler 	nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap);
220d347a0daSSam Leffler 	allocib = NULL;
221d347a0daSSam Leffler 	allocblk = allociblk;
222d347a0daSSam Leffler 	if (nb == 0) {
223d347a0daSSam Leffler 		pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
224d347a0daSSam Leffler 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
225d347a0daSSam Leffler 		if (error)
226d347a0daSSam Leffler 			return error;
227d347a0daSSam Leffler 		nb = newb;
228d347a0daSSam Leffler 		*allocblk++ = nb;
229d485c77fSKonstantin Belousov 		bp = getblk((void *)ip->i_devvp, indirs[1].in_lbn,
230d485c77fSKonstantin Belousov 		    fs->fs_bsize, 0, 0, 0);
231d347a0daSSam Leffler 		bp->b_blkno = fsbtodb(fs, nb);
232d347a0daSSam Leffler 		clrbuf(bp);
233d347a0daSSam Leffler 		/*
234d347a0daSSam Leffler 		 * Write synchronously so that indirect blocks
235d347a0daSSam Leffler 		 * never point at garbage.
236d347a0daSSam Leffler 		 */
237d347a0daSSam Leffler 		if ((error = bwrite(bp)) != 0)
238d347a0daSSam Leffler 			return error;
239d347a0daSSam Leffler 		allocib = &ip->i_ffs1_ib[indirs[0].in_off];
240d347a0daSSam Leffler 		*allocib = ufs_rw32((int32_t)nb, needswap);
241d347a0daSSam Leffler 	}
242d347a0daSSam Leffler 
243d347a0daSSam Leffler 	/*
244d347a0daSSam Leffler 	 * Fetch through the indirect blocks, allocating as necessary.
245d347a0daSSam Leffler 	 */
246d347a0daSSam Leffler 
247d347a0daSSam Leffler 	for (i = 1;;) {
248d485c77fSKonstantin Belousov 		error = bread((void *)ip->i_devvp, indirs[i].in_lbn,
249d485c77fSKonstantin Belousov 		    fs->fs_bsize, NULL, &bp);
250d347a0daSSam Leffler 		if (error) {
2515b292f9aSEd Maste 			brelse(bp);
252d347a0daSSam Leffler 			return error;
253d347a0daSSam Leffler 		}
254d347a0daSSam Leffler 		bap = (int32_t *)bp->b_data;
255d347a0daSSam Leffler 		nb = ufs_rw32(bap[indirs[i].in_off], needswap);
256d347a0daSSam Leffler 		if (i == num)
257d347a0daSSam Leffler 			break;
258d347a0daSSam Leffler 		i++;
259d347a0daSSam Leffler 		if (nb != 0) {
2605b292f9aSEd Maste 			brelse(bp);
261d347a0daSSam Leffler 			continue;
262d347a0daSSam Leffler 		}
263d347a0daSSam Leffler 		if (pref == 0)
264d347a0daSSam Leffler 			pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
265d347a0daSSam Leffler 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
266d347a0daSSam Leffler 		if (error) {
2675b292f9aSEd Maste 			brelse(bp);
268d347a0daSSam Leffler 			return error;
269d347a0daSSam Leffler 		}
270d347a0daSSam Leffler 		nb = newb;
271d347a0daSSam Leffler 		*allocblk++ = nb;
272d485c77fSKonstantin Belousov 		nbp = getblk((void *)ip->i_devvp, indirs[i].in_lbn,
273d485c77fSKonstantin Belousov 		    fs->fs_bsize, 0, 0, 0);
274d347a0daSSam Leffler 		nbp->b_blkno = fsbtodb(fs, nb);
275d347a0daSSam Leffler 		clrbuf(nbp);
276d347a0daSSam Leffler 		/*
277d347a0daSSam Leffler 		 * Write synchronously so that indirect blocks
278d347a0daSSam Leffler 		 * never point at garbage.
279d347a0daSSam Leffler 		 */
280d347a0daSSam Leffler 
281d347a0daSSam Leffler 		if ((error = bwrite(nbp)) != 0) {
2825b292f9aSEd Maste 			brelse(bp);
283d347a0daSSam Leffler 			return error;
284d347a0daSSam Leffler 		}
285d347a0daSSam Leffler 		bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap);
286d347a0daSSam Leffler 
287d347a0daSSam Leffler 		bwrite(bp);
288d347a0daSSam Leffler 	}
289d347a0daSSam Leffler 
290d347a0daSSam Leffler 	/*
291d347a0daSSam Leffler 	 * Get the data block, allocating if necessary.
292d347a0daSSam Leffler 	 */
293d347a0daSSam Leffler 
294d347a0daSSam Leffler 	if (nb == 0) {
295d347a0daSSam Leffler 		pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]);
296d347a0daSSam Leffler 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
297d347a0daSSam Leffler 		if (error) {
2985b292f9aSEd Maste 			brelse(bp);
299d347a0daSSam Leffler 			return error;
300d347a0daSSam Leffler 		}
301d347a0daSSam Leffler 		nb = newb;
302d347a0daSSam Leffler 		*allocblk++ = nb;
303d347a0daSSam Leffler 		if (bpp != NULL) {
304d485c77fSKonstantin Belousov 			nbp = getblk((void *)ip->i_devvp, lbn, fs->fs_bsize,
305d485c77fSKonstantin Belousov 			    0, 0, 0);
306d347a0daSSam Leffler 			nbp->b_blkno = fsbtodb(fs, nb);
307d347a0daSSam Leffler 			clrbuf(nbp);
308d347a0daSSam Leffler 			*bpp = nbp;
309d347a0daSSam Leffler 		}
310d347a0daSSam Leffler 		bap[indirs[num].in_off] = ufs_rw32(nb, needswap);
311d347a0daSSam Leffler 
312d347a0daSSam Leffler 		/*
313d347a0daSSam Leffler 		 * If required, write synchronously, otherwise use
314d347a0daSSam Leffler 		 * delayed write.
315d347a0daSSam Leffler 		 */
316d347a0daSSam Leffler 		bwrite(bp);
317d347a0daSSam Leffler 		return (0);
318d347a0daSSam Leffler 	}
3195b292f9aSEd Maste 	brelse(bp);
320d347a0daSSam Leffler 	if (bpp != NULL) {
321d485c77fSKonstantin Belousov 		error = bread((void *)ip->i_devvp, lbn, (int)fs->fs_bsize,
322d485c77fSKonstantin Belousov 		    NULL, &nbp);
323d347a0daSSam Leffler 		if (error) {
3245b292f9aSEd Maste 			brelse(nbp);
325d347a0daSSam Leffler 			return error;
326d347a0daSSam Leffler 		}
327d347a0daSSam Leffler 		*bpp = nbp;
328d347a0daSSam Leffler 	}
329d347a0daSSam Leffler 	return (0);
330d347a0daSSam Leffler }
331d347a0daSSam Leffler 
332d347a0daSSam Leffler static int
ffs_balloc_ufs2(struct inode * ip,off_t offset,int bufsize,struct m_buf ** bpp)333d485c77fSKonstantin Belousov ffs_balloc_ufs2(struct inode *ip, off_t offset, int bufsize,
334d485c77fSKonstantin Belousov     struct m_buf **bpp)
335d347a0daSSam Leffler {
336d347a0daSSam Leffler 	daddr_t lbn, lastlbn;
337d347a0daSSam Leffler 	int size;
338d485c77fSKonstantin Belousov 	struct m_buf *bp, *nbp;
339d347a0daSSam Leffler 	struct fs *fs = ip->i_fs;
3401dc349abSEd Maste 	struct indir indirs[UFS_NIADDR + 2];
341d347a0daSSam Leffler 	daddr_t newb, pref, nb;
342d347a0daSSam Leffler 	int64_t *bap;
343d347a0daSSam Leffler 	int osize, nsize, num, i, error;
3441dc349abSEd Maste 	int64_t *allocblk, allociblk[UFS_NIADDR + 1];
345d347a0daSSam Leffler 	int64_t *allocib;
346d347a0daSSam Leffler 	const int needswap = UFS_FSNEEDSWAP(fs);
347d347a0daSSam Leffler 
348d347a0daSSam Leffler 	lbn = lblkno(fs, offset);
349d347a0daSSam Leffler 	size = blkoff(fs, offset) + bufsize;
350d347a0daSSam Leffler 	if (bpp != NULL) {
351d347a0daSSam Leffler 		*bpp = NULL;
352d347a0daSSam Leffler 	}
353d347a0daSSam Leffler 
354d347a0daSSam Leffler 	assert(size <= fs->fs_bsize);
355d347a0daSSam Leffler 	if (lbn < 0)
356d347a0daSSam Leffler 		return (EFBIG);
357d347a0daSSam Leffler 
358d347a0daSSam Leffler 	/*
359d347a0daSSam Leffler 	 * If the next write will extend the file into a new block,
360d347a0daSSam Leffler 	 * and the file is currently composed of a fragment
361d347a0daSSam Leffler 	 * this fragment has to be extended to be a full block.
362d347a0daSSam Leffler 	 */
363d347a0daSSam Leffler 
364d347a0daSSam Leffler 	lastlbn = lblkno(fs, ip->i_ffs2_size);
3651dc349abSEd Maste 	if (lastlbn < UFS_NDADDR && lastlbn < lbn) {
366d347a0daSSam Leffler 		nb = lastlbn;
367d347a0daSSam Leffler 		osize = blksize(fs, ip, nb);
368d347a0daSSam Leffler 		if (osize < fs->fs_bsize && osize > 0) {
369d347a0daSSam Leffler 			warnx("need to ffs_realloccg; not supported!");
370d347a0daSSam Leffler 			abort();
371d347a0daSSam Leffler 		}
372d347a0daSSam Leffler 	}
373d347a0daSSam Leffler 
374d347a0daSSam Leffler 	/*
3751dc349abSEd Maste 	 * The first UFS_NDADDR blocks are direct blocks
376d347a0daSSam Leffler 	 */
377d347a0daSSam Leffler 
3781dc349abSEd Maste 	if (lbn < UFS_NDADDR) {
379d347a0daSSam Leffler 		nb = ufs_rw64(ip->i_ffs2_db[lbn], needswap);
3803afe6a68SEd Maste 		if (nb != 0 && ip->i_ffs2_size >=
3813afe6a68SEd Maste 		    (uint64_t)lblktosize(fs, lbn + 1)) {
382d347a0daSSam Leffler 
383d347a0daSSam Leffler 			/*
384d347a0daSSam Leffler 			 * The block is an already-allocated direct block
385d347a0daSSam Leffler 			 * and the file already extends past this block,
386d347a0daSSam Leffler 			 * thus this must be a whole block.
387d347a0daSSam Leffler 			 * Just read the block (if requested).
388d347a0daSSam Leffler 			 */
389d347a0daSSam Leffler 
390d347a0daSSam Leffler 			if (bpp != NULL) {
391d485c77fSKonstantin Belousov 				error = bread((void *)ip->i_devvp, lbn,
392d485c77fSKonstantin Belousov 				    fs->fs_bsize, NULL, bpp);
393d347a0daSSam Leffler 				if (error) {
3945b292f9aSEd Maste 					brelse(*bpp);
395d347a0daSSam Leffler 					return (error);
396d347a0daSSam Leffler 				}
397d347a0daSSam Leffler 			}
398d347a0daSSam Leffler 			return (0);
399d347a0daSSam Leffler 		}
400d347a0daSSam Leffler 		if (nb != 0) {
401d347a0daSSam Leffler 
402d347a0daSSam Leffler 			/*
403d347a0daSSam Leffler 			 * Consider need to reallocate a fragment.
404d347a0daSSam Leffler 			 */
405d347a0daSSam Leffler 
406d347a0daSSam Leffler 			osize = fragroundup(fs, blkoff(fs, ip->i_ffs2_size));
407d347a0daSSam Leffler 			nsize = fragroundup(fs, size);
408d347a0daSSam Leffler 			if (nsize <= osize) {
409d347a0daSSam Leffler 
410d347a0daSSam Leffler 				/*
411d347a0daSSam Leffler 				 * The existing block is already
412d347a0daSSam Leffler 				 * at least as big as we want.
413d347a0daSSam Leffler 				 * Just read the block (if requested).
414d347a0daSSam Leffler 				 */
415d347a0daSSam Leffler 
416d347a0daSSam Leffler 				if (bpp != NULL) {
417d485c77fSKonstantin Belousov 					error = bread((void *)ip->i_devvp, lbn,
418d485c77fSKonstantin Belousov 					    osize, NULL, bpp);
419d347a0daSSam Leffler 					if (error) {
4205b292f9aSEd Maste 						brelse(*bpp);
421d347a0daSSam Leffler 						return (error);
422d347a0daSSam Leffler 					}
423d347a0daSSam Leffler 				}
424d347a0daSSam Leffler 				return 0;
425d347a0daSSam Leffler 			} else {
426d347a0daSSam Leffler 				warnx("need to ffs_realloccg; not supported!");
427d347a0daSSam Leffler 				abort();
428d347a0daSSam Leffler 			}
429d347a0daSSam Leffler 		} else {
430d347a0daSSam Leffler 
431d347a0daSSam Leffler 			/*
432d347a0daSSam Leffler 			 * the block was not previously allocated,
433d347a0daSSam Leffler 			 * allocate a new block or fragment.
434d347a0daSSam Leffler 			 */
435d347a0daSSam Leffler 
4363afe6a68SEd Maste 			if (ip->i_ffs2_size < (uint64_t)lblktosize(fs, lbn + 1))
437d347a0daSSam Leffler 				nsize = fragroundup(fs, size);
438d347a0daSSam Leffler 			else
439d347a0daSSam Leffler 				nsize = fs->fs_bsize;
440d347a0daSSam Leffler 			error = ffs_alloc(ip, lbn,
441d347a0daSSam Leffler 			    ffs_blkpref_ufs2(ip, lbn, (int)lbn,
442d347a0daSSam Leffler 				&ip->i_ffs2_db[0]),
443d347a0daSSam Leffler 				nsize, &newb);
444d347a0daSSam Leffler 			if (error)
445d347a0daSSam Leffler 				return (error);
446d347a0daSSam Leffler 			if (bpp != NULL) {
447d485c77fSKonstantin Belousov 				bp = getblk((void *)ip->i_devvp, lbn, nsize,
448d485c77fSKonstantin Belousov 				    0, 0, 0);
449d347a0daSSam Leffler 				bp->b_blkno = fsbtodb(fs, newb);
450d347a0daSSam Leffler 				clrbuf(bp);
451d347a0daSSam Leffler 				*bpp = bp;
452d347a0daSSam Leffler 			}
453d347a0daSSam Leffler 		}
454d347a0daSSam Leffler 		ip->i_ffs2_db[lbn] = ufs_rw64(newb, needswap);
455d347a0daSSam Leffler 		return (0);
456d347a0daSSam Leffler 	}
457d347a0daSSam Leffler 
458d347a0daSSam Leffler 	/*
459d347a0daSSam Leffler 	 * Determine the number of levels of indirection.
460d347a0daSSam Leffler 	 */
461d347a0daSSam Leffler 
462d347a0daSSam Leffler 	pref = 0;
463d347a0daSSam Leffler 	if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0)
464d347a0daSSam Leffler 		return (error);
465d347a0daSSam Leffler 
466d347a0daSSam Leffler 	if (num < 1) {
467d347a0daSSam Leffler 		warnx("ffs_balloc: ufs_getlbns returned indirect block");
468d347a0daSSam Leffler 		abort();
469d347a0daSSam Leffler 	}
470d347a0daSSam Leffler 
471d347a0daSSam Leffler 	/*
472d347a0daSSam Leffler 	 * Fetch the first indirect block allocating if necessary.
473d347a0daSSam Leffler 	 */
474d347a0daSSam Leffler 
475d347a0daSSam Leffler 	--num;
476d347a0daSSam Leffler 	nb = ufs_rw64(ip->i_ffs2_ib[indirs[0].in_off], needswap);
477d347a0daSSam Leffler 	allocib = NULL;
478d347a0daSSam Leffler 	allocblk = allociblk;
479d347a0daSSam Leffler 	if (nb == 0) {
480d347a0daSSam Leffler 		pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
481d347a0daSSam Leffler 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
482d347a0daSSam Leffler 		if (error)
483d347a0daSSam Leffler 			return error;
484d347a0daSSam Leffler 		nb = newb;
485d347a0daSSam Leffler 		*allocblk++ = nb;
486d485c77fSKonstantin Belousov 		bp = getblk((void *)ip->i_devvp, indirs[1].in_lbn,
487d485c77fSKonstantin Belousov 		    fs->fs_bsize, 0, 0, 0);
488d347a0daSSam Leffler 		bp->b_blkno = fsbtodb(fs, nb);
489d347a0daSSam Leffler 		clrbuf(bp);
490d347a0daSSam Leffler 		/*
491d347a0daSSam Leffler 		 * Write synchronously so that indirect blocks
492d347a0daSSam Leffler 		 * never point at garbage.
493d347a0daSSam Leffler 		 */
494d347a0daSSam Leffler 		if ((error = bwrite(bp)) != 0)
495d347a0daSSam Leffler 			return error;
496d347a0daSSam Leffler 		allocib = &ip->i_ffs2_ib[indirs[0].in_off];
497d347a0daSSam Leffler 		*allocib = ufs_rw64(nb, needswap);
498d347a0daSSam Leffler 	}
499d347a0daSSam Leffler 
500d347a0daSSam Leffler 	/*
501d347a0daSSam Leffler 	 * Fetch through the indirect blocks, allocating as necessary.
502d347a0daSSam Leffler 	 */
503d347a0daSSam Leffler 
504d347a0daSSam Leffler 	for (i = 1;;) {
505d485c77fSKonstantin Belousov 		error = bread((void *)ip->i_devvp, indirs[i].in_lbn,
506d485c77fSKonstantin Belousov 		    fs->fs_bsize, NULL, &bp);
507d347a0daSSam Leffler 		if (error) {
5085b292f9aSEd Maste 			brelse(bp);
509d347a0daSSam Leffler 			return error;
510d347a0daSSam Leffler 		}
511d347a0daSSam Leffler 		bap = (int64_t *)bp->b_data;
512d347a0daSSam Leffler 		nb = ufs_rw64(bap[indirs[i].in_off], needswap);
513d347a0daSSam Leffler 		if (i == num)
514d347a0daSSam Leffler 			break;
515d347a0daSSam Leffler 		i++;
516d347a0daSSam Leffler 		if (nb != 0) {
5175b292f9aSEd Maste 			brelse(bp);
518d347a0daSSam Leffler 			continue;
519d347a0daSSam Leffler 		}
520d347a0daSSam Leffler 		if (pref == 0)
521d347a0daSSam Leffler 			pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
522d347a0daSSam Leffler 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
523d347a0daSSam Leffler 		if (error) {
5245b292f9aSEd Maste 			brelse(bp);
525d347a0daSSam Leffler 			return error;
526d347a0daSSam Leffler 		}
527d347a0daSSam Leffler 		nb = newb;
528d347a0daSSam Leffler 		*allocblk++ = nb;
529d485c77fSKonstantin Belousov 		nbp = getblk((void *)ip->i_devvp, indirs[i].in_lbn,
530d485c77fSKonstantin Belousov 		    fs->fs_bsize, 0, 0, 0);
531d347a0daSSam Leffler 		nbp->b_blkno = fsbtodb(fs, nb);
532d347a0daSSam Leffler 		clrbuf(nbp);
533d347a0daSSam Leffler 		/*
534d347a0daSSam Leffler 		 * Write synchronously so that indirect blocks
535d347a0daSSam Leffler 		 * never point at garbage.
536d347a0daSSam Leffler 		 */
537d347a0daSSam Leffler 
538d347a0daSSam Leffler 		if ((error = bwrite(nbp)) != 0) {
5395b292f9aSEd Maste 			brelse(bp);
540d347a0daSSam Leffler 			return error;
541d347a0daSSam Leffler 		}
542d347a0daSSam Leffler 		bap[indirs[i - 1].in_off] = ufs_rw64(nb, needswap);
543d347a0daSSam Leffler 
544d347a0daSSam Leffler 		bwrite(bp);
545d347a0daSSam Leffler 	}
546d347a0daSSam Leffler 
547d347a0daSSam Leffler 	/*
548d347a0daSSam Leffler 	 * Get the data block, allocating if necessary.
549d347a0daSSam Leffler 	 */
550d347a0daSSam Leffler 
551d347a0daSSam Leffler 	if (nb == 0) {
552d347a0daSSam Leffler 		pref = ffs_blkpref_ufs2(ip, lbn, indirs[num].in_off, &bap[0]);
553d347a0daSSam Leffler 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
554d347a0daSSam Leffler 		if (error) {
5555b292f9aSEd Maste 			brelse(bp);
556d347a0daSSam Leffler 			return error;
557d347a0daSSam Leffler 		}
558d347a0daSSam Leffler 		nb = newb;
559d347a0daSSam Leffler 		*allocblk++ = nb;
560d347a0daSSam Leffler 		if (bpp != NULL) {
561d485c77fSKonstantin Belousov 			nbp = getblk((void *)ip->i_devvp, lbn, fs->fs_bsize,
562d485c77fSKonstantin Belousov 			    0, 0, 0);
563d347a0daSSam Leffler 			nbp->b_blkno = fsbtodb(fs, nb);
564d347a0daSSam Leffler 			clrbuf(nbp);
565d347a0daSSam Leffler 			*bpp = nbp;
566d347a0daSSam Leffler 		}
567d347a0daSSam Leffler 		bap[indirs[num].in_off] = ufs_rw64(nb, needswap);
568d347a0daSSam Leffler 
569d347a0daSSam Leffler 		/*
570d347a0daSSam Leffler 		 * If required, write synchronously, otherwise use
571d347a0daSSam Leffler 		 * delayed write.
572d347a0daSSam Leffler 		 */
573d347a0daSSam Leffler 		bwrite(bp);
574d347a0daSSam Leffler 		return (0);
575d347a0daSSam Leffler 	}
5765b292f9aSEd Maste 	brelse(bp);
577d347a0daSSam Leffler 	if (bpp != NULL) {
578d485c77fSKonstantin Belousov 		error = bread((void *)ip->i_devvp, lbn, (int)fs->fs_bsize,
579d485c77fSKonstantin Belousov 		    NULL, &nbp);
580d347a0daSSam Leffler 		if (error) {
5815b292f9aSEd Maste 			brelse(nbp);
582d347a0daSSam Leffler 			return error;
583d347a0daSSam Leffler 		}
584d347a0daSSam Leffler 		*bpp = nbp;
585d347a0daSSam Leffler 	}
586d347a0daSSam Leffler 	return (0);
587d347a0daSSam Leffler }
588