xref: /dragonfly/usr.sbin/makefs/ffs/ffs_balloc.c (revision a765cedf)
1 /*	$NetBSD: ffs_balloc.c,v 1.13 2004/06/20 22:20:18 jmc Exp $	*/
2 /* From NetBSD: ffs_balloc.c,v 1.25 2001/08/08 08:36:36 lukem Exp */
3 
4 /*-
5  * SPDX-License-Identifier: BSD-3-Clause
6  *
7  * Copyright (c) 1982, 1986, 1989, 1993
8  *	The Regents of the University of California.  All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)ffs_balloc.c	8.8 (Berkeley) 6/16/95
35  * $FreeBSD: head/usr.sbin/makefs/ffs/ffs_balloc.c 336736 2018-07-26 13:33:10Z emaste $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/time.h>
40 
41 #include <assert.h>
42 #include <errno.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 
47 #include "makefs.h"
48 
49 #include <vfs/ufs/dinode.h>
50 #include <vfs/ufs/fs.h>
51 
52 #include "ffs/ufs_bswap.h"
53 #include "ffs/buf.h"
54 #include "ffs/ufs_inode.h"
55 #include "ffs/ffs_extern.h"
56 
57 static int ffs_balloc_ufs1(struct inode *, off_t, int, struct m_buf **);
58 #ifndef __DragonFly__ /* XXX UFS2 */
59 static int ffs_balloc_ufs2(struct inode *, off_t, int, struct m_buf **);
60 #endif
61 
62 /*
63  * Balloc defines the structure of file system storage
64  * by allocating the physical blocks on a device given
65  * the inode and the logical block number in a file.
66  *
67  * Assume: flags == B_SYNC | B_CLRBUF
68  */
69 
70 int
71 ffs_balloc(struct inode *ip, off_t offset, int bufsize, struct m_buf **bpp)
72 {
73 #ifndef __DragonFly__ /* XXX UFS2 */
74 	if (ip->i_fs->fs_magic == FS_UFS2_MAGIC)
75 		return ffs_balloc_ufs2(ip, offset, bufsize, bpp);
76 	else
77 #endif
78 		return ffs_balloc_ufs1(ip, offset, bufsize, bpp);
79 }
80 
81 static int
82 ffs_balloc_ufs1(struct inode *ip, off_t offset, int bufsize, struct m_buf **bpp)
83 {
84 	makefs_daddr_t lbn, lastlbn;
85 	int size;
86 	int32_t nb;
87 	struct m_buf *bp, *nbp;
88 	struct fs *fs = ip->i_fs;
89 	struct indir indirs[UFS_NIADDR + 2];
90 	makefs_daddr_t newb, pref;
91 	int32_t *bap;
92 	int osize, nsize, num, i, error;
93 	int32_t *allocblk, allociblk[UFS_NIADDR + 1];
94 	int32_t *allocib;
95 	const int needswap = UFS_FSNEEDSWAP(fs);
96 
97 	lbn = lblkno(fs, offset);
98 	size = blkoff(fs, offset) + bufsize;
99 	if (bpp != NULL) {
100 		*bpp = NULL;
101 	}
102 
103 	assert(size <= fs->fs_bsize);
104 	if (lbn < 0)
105 		return (EFBIG);
106 
107 	/*
108 	 * If the next write will extend the file into a new block,
109 	 * and the file is currently composed of a fragment
110 	 * this fragment has to be extended to be a full block.
111 	 */
112 
113 	lastlbn = lblkno(fs, ip->i_ffs1_size);
114 	if (lastlbn < UFS_NDADDR && lastlbn < lbn) {
115 		nb = lastlbn;
116 		osize = blksize(fs, ip, nb);
117 		if (osize < fs->fs_bsize && osize > 0) {
118 			warnx("need to ffs_realloccg; not supported!");
119 			abort();
120 		}
121 	}
122 
123 	/*
124 	 * The first UFS_NDADDR blocks are direct blocks
125 	 */
126 
127 	if (lbn < UFS_NDADDR) {
128 		nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap);
129 		if (nb != 0 && ip->i_ffs1_size >=
130 		    (uint64_t)lblktosize(fs, lbn + 1)) {
131 
132 			/*
133 			 * The block is an already-allocated direct block
134 			 * and the file already extends past this block,
135 			 * thus this must be a whole block.
136 			 * Just read the block (if requested).
137 			 */
138 
139 			if (bpp != NULL) {
140 				error = bread(ip->i_devvp, lbn, fs->fs_bsize,
141 				    NULL, bpp);
142 				if (error) {
143 					brelse(*bpp);
144 					return (error);
145 				}
146 			}
147 			return (0);
148 		}
149 		if (nb != 0) {
150 
151 			/*
152 			 * Consider need to reallocate a fragment.
153 			 */
154 
155 			osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size));
156 			nsize = fragroundup(fs, size);
157 			if (nsize <= osize) {
158 
159 				/*
160 				 * The existing block is already
161 				 * at least as big as we want.
162 				 * Just read the block (if requested).
163 				 */
164 
165 				if (bpp != NULL) {
166 					error = bread(ip->i_devvp, lbn, osize,
167 					    NULL, bpp);
168 					if (error) {
169 						brelse(*bpp);
170 						return (error);
171 					}
172 				}
173 				return 0;
174 			} else {
175 				warnx("need to ffs_realloccg; not supported!");
176 				abort();
177 			}
178 		} else {
179 
180 			/*
181 			 * the block was not previously allocated,
182 			 * allocate a new block or fragment.
183 			 */
184 
185 			if (ip->i_ffs1_size < (uint64_t)lblktosize(fs, lbn + 1))
186 				nsize = fragroundup(fs, size);
187 			else
188 				nsize = fs->fs_bsize;
189 			error = ffs_alloc(ip, lbn,
190 			    ffs_blkpref_ufs1(ip, lbn, (int)lbn,
191 				&ip->i_ffs1_db[0]),
192 				nsize, &newb);
193 			if (error)
194 				return (error);
195 			if (bpp != NULL) {
196 				bp = getblk(ip->i_devvp, lbn, nsize, 0, 0, 0);
197 				bp->b_blkno = fsbtodb(fs, newb);
198 				clrbuf(bp);
199 				*bpp = bp;
200 			}
201 		}
202 		ip->i_ffs1_db[lbn] = ufs_rw32((int32_t)newb, needswap);
203 		return (0);
204 	}
205 
206 	/*
207 	 * Determine the number of levels of indirection.
208 	 */
209 
210 	pref = 0;
211 	if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0)
212 		return (error);
213 
214 	if (num < 1) {
215 		warnx("ffs_balloc: ufs_getlbns returned indirect block");
216 		abort();
217 	}
218 
219 	/*
220 	 * Fetch the first indirect block allocating if necessary.
221 	 */
222 
223 	--num;
224 	nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap);
225 	allocib = NULL;
226 	allocblk = allociblk;
227 	if (nb == 0) {
228 		pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
229 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
230 		if (error)
231 			return error;
232 		nb = newb;
233 		*allocblk++ = nb;
234 		bp = getblk(ip->i_devvp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, 0);
235 		bp->b_blkno = fsbtodb(fs, nb);
236 		clrbuf(bp);
237 		/*
238 		 * Write synchronously so that indirect blocks
239 		 * never point at garbage.
240 		 */
241 		if ((error = bwrite(bp)) != 0)
242 			return error;
243 		allocib = &ip->i_ffs1_ib[indirs[0].in_off];
244 		*allocib = ufs_rw32((int32_t)nb, needswap);
245 	}
246 
247 	/*
248 	 * Fetch through the indirect blocks, allocating as necessary.
249 	 */
250 
251 	for (i = 1;;) {
252 		error = bread(ip->i_devvp, indirs[i].in_lbn, fs->fs_bsize,
253 		    NULL, &bp);
254 		if (error) {
255 			brelse(bp);
256 			return error;
257 		}
258 		bap = (int32_t *)bp->b_data;
259 		nb = ufs_rw32(bap[indirs[i].in_off], needswap);
260 		if (i == num)
261 			break;
262 		i++;
263 		if (nb != 0) {
264 			brelse(bp);
265 			continue;
266 		}
267 		if (pref == 0)
268 			pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
269 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
270 		if (error) {
271 			brelse(bp);
272 			return error;
273 		}
274 		nb = newb;
275 		*allocblk++ = nb;
276 		nbp = getblk(ip->i_devvp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0);
277 		nbp->b_blkno = fsbtodb(fs, nb);
278 		clrbuf(nbp);
279 		/*
280 		 * Write synchronously so that indirect blocks
281 		 * never point at garbage.
282 		 */
283 
284 		if ((error = bwrite(nbp)) != 0) {
285 			brelse(bp);
286 			return error;
287 		}
288 		bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap);
289 
290 		bwrite(bp);
291 	}
292 
293 	/*
294 	 * Get the data block, allocating if necessary.
295 	 */
296 
297 	if (nb == 0) {
298 		pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]);
299 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
300 		if (error) {
301 			brelse(bp);
302 			return error;
303 		}
304 		nb = newb;
305 		*allocblk++ = nb;
306 		if (bpp != NULL) {
307 			nbp = getblk(ip->i_devvp, lbn, fs->fs_bsize, 0, 0, 0);
308 			nbp->b_blkno = fsbtodb(fs, nb);
309 			clrbuf(nbp);
310 			*bpp = nbp;
311 		}
312 		bap[indirs[num].in_off] = ufs_rw32(nb, needswap);
313 
314 		/*
315 		 * If required, write synchronously, otherwise use
316 		 * delayed write.
317 		 */
318 		bwrite(bp);
319 		return (0);
320 	}
321 	brelse(bp);
322 	if (bpp != NULL) {
323 		error = bread(ip->i_devvp, lbn, (int)fs->fs_bsize, NULL, &nbp);
324 		if (error) {
325 			brelse(nbp);
326 			return error;
327 		}
328 		*bpp = nbp;
329 	}
330 	return (0);
331 }
332 
333 #ifndef __DragonFly__ /* XXX UFS2 */
334 static int
335 ffs_balloc_ufs2(struct inode *ip, off_t offset, int bufsize, struct m_buf **bpp)
336 {
337 	daddr_t lbn, lastlbn;
338 	int size;
339 	struct m_buf *bp, *nbp;
340 	struct fs *fs = ip->i_fs;
341 	struct indir indirs[UFS_NIADDR + 2];
342 	daddr_t newb, pref, nb;
343 	int64_t *bap;
344 	int osize, nsize, num, i, error;
345 	int64_t *allocblk, allociblk[UFS_NIADDR + 1];
346 	int64_t *allocib;
347 	const int needswap = UFS_FSNEEDSWAP(fs);
348 
349 	lbn = lblkno(fs, offset);
350 	size = blkoff(fs, offset) + bufsize;
351 	if (bpp != NULL) {
352 		*bpp = NULL;
353 	}
354 
355 	assert(size <= fs->fs_bsize);
356 	if (lbn < 0)
357 		return (EFBIG);
358 
359 	/*
360 	 * If the next write will extend the file into a new block,
361 	 * and the file is currently composed of a fragment
362 	 * this fragment has to be extended to be a full block.
363 	 */
364 
365 	lastlbn = lblkno(fs, ip->i_ffs2_size);
366 	if (lastlbn < UFS_NDADDR && lastlbn < lbn) {
367 		nb = lastlbn;
368 		osize = blksize(fs, ip, nb);
369 		if (osize < fs->fs_bsize && osize > 0) {
370 			warnx("need to ffs_realloccg; not supported!");
371 			abort();
372 		}
373 	}
374 
375 	/*
376 	 * The first UFS_NDADDR blocks are direct blocks
377 	 */
378 
379 	if (lbn < UFS_NDADDR) {
380 		nb = ufs_rw64(ip->i_ffs2_db[lbn], needswap);
381 		if (nb != 0 && ip->i_ffs2_size >=
382 		    (uint64_t)lblktosize(fs, lbn + 1)) {
383 
384 			/*
385 			 * The block is an already-allocated direct block
386 			 * and the file already extends past this block,
387 			 * thus this must be a whole block.
388 			 * Just read the block (if requested).
389 			 */
390 
391 			if (bpp != NULL) {
392 				error = bread(ip->i_devvp, lbn, fs->fs_bsize,
393 				    NULL, bpp);
394 				if (error) {
395 					brelse(*bpp);
396 					return (error);
397 				}
398 			}
399 			return (0);
400 		}
401 		if (nb != 0) {
402 
403 			/*
404 			 * Consider need to reallocate a fragment.
405 			 */
406 
407 			osize = fragroundup(fs, blkoff(fs, ip->i_ffs2_size));
408 			nsize = fragroundup(fs, size);
409 			if (nsize <= osize) {
410 
411 				/*
412 				 * The existing block is already
413 				 * at least as big as we want.
414 				 * Just read the block (if requested).
415 				 */
416 
417 				if (bpp != NULL) {
418 					error = bread(ip->i_devvp, lbn, osize,
419 					    NULL, bpp);
420 					if (error) {
421 						brelse(*bpp);
422 						return (error);
423 					}
424 				}
425 				return 0;
426 			} else {
427 				warnx("need to ffs_realloccg; not supported!");
428 				abort();
429 			}
430 		} else {
431 
432 			/*
433 			 * the block was not previously allocated,
434 			 * allocate a new block or fragment.
435 			 */
436 
437 			if (ip->i_ffs2_size < (uint64_t)lblktosize(fs, lbn + 1))
438 				nsize = fragroundup(fs, size);
439 			else
440 				nsize = fs->fs_bsize;
441 			error = ffs_alloc(ip, lbn,
442 			    ffs_blkpref_ufs2(ip, lbn, (int)lbn,
443 				&ip->i_ffs2_db[0]),
444 				nsize, &newb);
445 			if (error)
446 				return (error);
447 			if (bpp != NULL) {
448 				bp = getblk(ip->i_devvp, lbn, nsize, 0, 0, 0);
449 				bp->b_blkno = fsbtodb(fs, newb);
450 				clrbuf(bp);
451 				*bpp = bp;
452 			}
453 		}
454 		ip->i_ffs2_db[lbn] = ufs_rw64(newb, needswap);
455 		return (0);
456 	}
457 
458 	/*
459 	 * Determine the number of levels of indirection.
460 	 */
461 
462 	pref = 0;
463 	if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0)
464 		return (error);
465 
466 	if (num < 1) {
467 		warnx("ffs_balloc: ufs_getlbns returned indirect block");
468 		abort();
469 	}
470 
471 	/*
472 	 * Fetch the first indirect block allocating if necessary.
473 	 */
474 
475 	--num;
476 	nb = ufs_rw64(ip->i_ffs2_ib[indirs[0].in_off], needswap);
477 	allocib = NULL;
478 	allocblk = allociblk;
479 	if (nb == 0) {
480 		pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
481 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
482 		if (error)
483 			return error;
484 		nb = newb;
485 		*allocblk++ = nb;
486 		bp = getblk(ip->i_devvp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, 0);
487 		bp->b_blkno = fsbtodb(fs, nb);
488 		clrbuf(bp);
489 		/*
490 		 * Write synchronously so that indirect blocks
491 		 * never point at garbage.
492 		 */
493 		if ((error = bwrite(bp)) != 0)
494 			return error;
495 		allocib = &ip->i_ffs2_ib[indirs[0].in_off];
496 		*allocib = ufs_rw64(nb, needswap);
497 	}
498 
499 	/*
500 	 * Fetch through the indirect blocks, allocating as necessary.
501 	 */
502 
503 	for (i = 1;;) {
504 		error = bread(ip->i_devvp, indirs[i].in_lbn, fs->fs_bsize,
505 		    NULL, &bp);
506 		if (error) {
507 			brelse(bp);
508 			return error;
509 		}
510 		bap = (int64_t *)bp->b_data;
511 		nb = ufs_rw64(bap[indirs[i].in_off], needswap);
512 		if (i == num)
513 			break;
514 		i++;
515 		if (nb != 0) {
516 			brelse(bp);
517 			continue;
518 		}
519 		if (pref == 0)
520 			pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
521 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
522 		if (error) {
523 			brelse(bp);
524 			return error;
525 		}
526 		nb = newb;
527 		*allocblk++ = nb;
528 		nbp = getblk(ip->i_devvp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0);
529 		nbp->b_blkno = fsbtodb(fs, nb);
530 		clrbuf(nbp);
531 		/*
532 		 * Write synchronously so that indirect blocks
533 		 * never point at garbage.
534 		 */
535 
536 		if ((error = bwrite(nbp)) != 0) {
537 			brelse(bp);
538 			return error;
539 		}
540 		bap[indirs[i - 1].in_off] = ufs_rw64(nb, needswap);
541 
542 		bwrite(bp);
543 	}
544 
545 	/*
546 	 * Get the data block, allocating if necessary.
547 	 */
548 
549 	if (nb == 0) {
550 		pref = ffs_blkpref_ufs2(ip, lbn, indirs[num].in_off, &bap[0]);
551 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
552 		if (error) {
553 			brelse(bp);
554 			return error;
555 		}
556 		nb = newb;
557 		*allocblk++ = nb;
558 		if (bpp != NULL) {
559 			nbp = getblk(ip->i_devvp, lbn, fs->fs_bsize, 0, 0, 0);
560 			nbp->b_blkno = fsbtodb(fs, nb);
561 			clrbuf(nbp);
562 			*bpp = nbp;
563 		}
564 		bap[indirs[num].in_off] = ufs_rw64(nb, needswap);
565 
566 		/*
567 		 * If required, write synchronously, otherwise use
568 		 * delayed write.
569 		 */
570 		bwrite(bp);
571 		return (0);
572 	}
573 	brelse(bp);
574 	if (bpp != NULL) {
575 		error = bread(ip->i_devvp, lbn, (int)fs->fs_bsize, NULL, &nbp);
576 		if (error) {
577 			brelse(nbp);
578 			return error;
579 		}
580 		*bpp = nbp;
581 	}
582 	return (0);
583 }
584 #endif
585