xref: /freebsd/sys/ufs/ufs/ufs_bmap.c (revision 06c3fb27)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/buf.h>
41 #include <sys/proc.h>
42 #include <sys/rwlock.h>
43 #include <sys/vnode.h>
44 #include <sys/mount.h>
45 #include <sys/racct.h>
46 #include <sys/resourcevar.h>
47 #include <sys/stat.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 
52 #include <ufs/ufs/extattr.h>
53 #include <ufs/ufs/quota.h>
54 #include <ufs/ufs/inode.h>
55 #include <ufs/ufs/ufsmount.h>
56 #include <ufs/ufs/ufs_extern.h>
57 
58 static ufs_lbn_t lbn_count(struct ufsmount *, int);
59 static int readindir(struct vnode *, ufs_lbn_t, ufs2_daddr_t, struct buf **);
60 
61 /*
62  * Bmap converts the logical block number of a file to its physical block
63  * number on the disk. The conversion is done by using the logical block
64  * number to index into the array of block pointers described by the dinode.
65  */
66 int
67 ufs_bmap(
68 	struct vop_bmap_args /* {
69 		struct vnode *a_vp;
70 		daddr_t a_bn;
71 		struct bufobj **a_bop;
72 		daddr_t *a_bnp;
73 		int *a_runp;
74 		int *a_runb;
75 	} */ *ap)
76 {
77 	ufs2_daddr_t blkno;
78 	int error;
79 
80 	/*
81 	 * Check for underlying vnode requests and ensure that logical
82 	 * to physical mapping is requested.
83 	 */
84 	if (ap->a_bop != NULL)
85 		*ap->a_bop = &VFSTOUFS(ap->a_vp->v_mount)->um_devvp->v_bufobj;
86 	if (ap->a_bnp == NULL)
87 		return (0);
88 
89 	error = ufs_bmaparray(ap->a_vp, ap->a_bn, &blkno, NULL,
90 	    ap->a_runp, ap->a_runb);
91 	*ap->a_bnp = blkno;
92 	return (error);
93 }
94 
95 static int
96 readindir(struct vnode *vp,
97 	ufs_lbn_t lbn,
98 	ufs2_daddr_t daddr,
99 	struct buf **bpp)
100 {
101 	struct buf *bp;
102 	struct mount *mp;
103 	struct ufsmount *ump;
104 	int error;
105 
106 	mp = vp->v_mount;
107 	ump = VFSTOUFS(mp);
108 
109 	bp = getblk(vp, lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
110 	if ((bp->b_flags & B_CACHE) == 0) {
111 		KASSERT(daddr != 0,
112 		    ("readindir: indirect block not in cache"));
113 
114 		bp->b_blkno = blkptrtodb(ump, daddr);
115 		bp->b_iocmd = BIO_READ;
116 		bp->b_flags &= ~B_INVAL;
117 		bp->b_ioflags &= ~BIO_ERROR;
118 		vfs_busy_pages(bp, 0);
119 		bp->b_iooffset = dbtob(bp->b_blkno);
120 		bstrategy(bp);
121 #ifdef RACCT
122 		if (racct_enable) {
123 			PROC_LOCK(curproc);
124 			racct_add_buf(curproc, bp, 0);
125 			PROC_UNLOCK(curproc);
126 		}
127 #endif
128 		curthread->td_ru.ru_inblock++;
129 		error = bufwait(bp);
130 		if (error != 0) {
131 			brelse(bp);
132 			return (error);
133 		}
134 	}
135 	*bpp = bp;
136 	return (0);
137 }
138 
139 /*
140  * Indirect blocks are now on the vnode for the file.  They are given negative
141  * logical block numbers.  Indirect blocks are addressed by the negative
142  * address of the first data block to which they point.  Double indirect blocks
143  * are addressed by one less than the address of the first indirect block to
144  * which they point.  Triple indirect blocks are addressed by one less than
145  * the address of the first double indirect block to which they point.
146  *
147  * ufs_bmaparray does the bmap conversion, and if requested returns the
148  * array of logical blocks which must be traversed to get to a block.
149  * Each entry contains the offset into that block that gets you to the
150  * next block and the disk address of the block (if it is assigned).
151  */
152 
153 int
154 ufs_bmaparray(struct vnode *vp,
155 	ufs2_daddr_t bn,
156 	ufs2_daddr_t *bnp,
157 	struct buf *nbp,
158 	int *runp,
159 	int *runb)
160 {
161 	struct inode *ip;
162 	struct buf *bp;
163 	struct ufsmount *ump;
164 	struct mount *mp;
165 	struct indir a[UFS_NIADDR+1], *ap;
166 	ufs2_daddr_t daddr;
167 	ufs_lbn_t metalbn;
168 	int error, num, maxrun = 0;
169 	int *nump;
170 
171 	ap = NULL;
172 	ip = VTOI(vp);
173 	mp = vp->v_mount;
174 	ump = VFSTOUFS(mp);
175 
176 	if (runp) {
177 		maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;
178 		*runp = 0;
179 	}
180 
181 	if (runb) {
182 		*runb = 0;
183 	}
184 
185 	ap = a;
186 	nump = &num;
187 	error = ufs_getlbns(vp, bn, ap, nump);
188 	if (error)
189 		return (error);
190 
191 	num = *nump;
192 	if (num == 0) {
193 		if (bn >= 0 && bn < UFS_NDADDR) {
194 			*bnp = blkptrtodb(ump, DIP(ip, i_db[bn]));
195 		} else if (bn < 0 && bn >= -UFS_NXADDR) {
196 			*bnp = blkptrtodb(ump, ip->i_din2->di_extb[-1 - bn]);
197 			if (*bnp == 0)
198 				*bnp = -1;
199 			if (nbp == NULL) {
200 				/* indirect block not found */
201 				return (EINVAL);
202 			}
203 			nbp->b_xflags |= BX_ALTDATA;
204 			return (0);
205 		} else {
206 			/* blkno out of range */
207 			return (EINVAL);
208 		}
209 		/*
210 		 * Since this is FFS independent code, we are out of
211 		 * scope for the definitions of BLK_NOCOPY and
212 		 * BLK_SNAP, but we do know that they will fall in
213 		 * the range 1..um_seqinc, so we use that test and
214 		 * return a request for a zeroed out buffer if attempts
215 		 * are made to read a BLK_NOCOPY or BLK_SNAP block.
216 		 */
217 		if (IS_SNAPSHOT(ip) && DIP(ip, i_db[bn]) > 0 &&
218 		    DIP(ip, i_db[bn]) < ump->um_seqinc) {
219 			*bnp = -1;
220 		} else if (*bnp == 0) {
221 			*bnp = IS_SNAPSHOT(ip) ? blkptrtodb(ump,
222 			    bn * ump->um_seqinc) : -1;
223 		} else if (runp) {
224 			ufs2_daddr_t bnb = bn;
225 			for (++bn; bn < UFS_NDADDR && *runp < maxrun &&
226 			    is_sequential(ump, DIP(ip, i_db[bn - 1]),
227 			    DIP(ip, i_db[bn]));
228 			    ++bn, ++*runp);
229 			bn = bnb;
230 			if (runb && (bn > 0)) {
231 				for (--bn; (bn >= 0) && (*runb < maxrun) &&
232 					is_sequential(ump, DIP(ip, i_db[bn]),
233 						DIP(ip, i_db[bn+1]));
234 						--bn, ++*runb);
235 			}
236 		}
237 		return (0);
238 	}
239 
240 	/* Get disk address out of indirect block array */
241 	daddr = DIP(ip, i_ib[ap->in_off]);
242 
243 	for (bp = NULL, ++ap; --num; ++ap) {
244 		/*
245 		 * Exit the loop if there is no disk address assigned yet and
246 		 * the indirect block isn't in the cache, or if we were
247 		 * looking for an indirect block and we've found it.
248 		 */
249 
250 		metalbn = ap->in_lbn;
251 		if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
252 			break;
253 		/*
254 		 * If we get here, we've either got the block in the cache
255 		 * or we have a disk address for it, go fetch it.
256 		 */
257 		if (bp)
258 			bqrelse(bp);
259 		error = readindir(vp, metalbn, daddr, &bp);
260 		if (error != 0)
261 			return (error);
262 
263 		if (I_IS_UFS1(ip))
264 			daddr = ((ufs1_daddr_t *)bp->b_data)[ap->in_off];
265 		else
266 			daddr = ((ufs2_daddr_t *)bp->b_data)[ap->in_off];
267 		if ((error = UFS_CHECK_BLKNO(mp, ip->i_number, daddr,
268 		     mp->mnt_stat.f_iosize)) != 0) {
269 			bqrelse(bp);
270 			return (error);
271 		}
272 		if (I_IS_UFS1(ip)) {
273 			if (num == 1 && daddr && runp) {
274 				for (bn = ap->in_off + 1;
275 				    bn < MNINDIR(ump) && *runp < maxrun &&
276 				    is_sequential(ump,
277 				    ((ufs1_daddr_t *)bp->b_data)[bn - 1],
278 				    ((ufs1_daddr_t *)bp->b_data)[bn]);
279 				    ++bn, ++*runp);
280 				bn = ap->in_off;
281 				if (runb && bn) {
282 					for (--bn; bn >= 0 && *runb < maxrun &&
283 					    is_sequential(ump,
284 					    ((ufs1_daddr_t *)bp->b_data)[bn],
285 					    ((ufs1_daddr_t *)bp->b_data)[bn+1]);
286 					    --bn, ++*runb);
287 				}
288 			}
289 			continue;
290 		}
291 		if (num == 1 && daddr && runp) {
292 			for (bn = ap->in_off + 1;
293 			    bn < MNINDIR(ump) && *runp < maxrun &&
294 			    is_sequential(ump,
295 			    ((ufs2_daddr_t *)bp->b_data)[bn - 1],
296 			    ((ufs2_daddr_t *)bp->b_data)[bn]);
297 			    ++bn, ++*runp);
298 			bn = ap->in_off;
299 			if (runb && bn) {
300 				for (--bn; bn >= 0 && *runb < maxrun &&
301 				    is_sequential(ump,
302 				    ((ufs2_daddr_t *)bp->b_data)[bn],
303 				    ((ufs2_daddr_t *)bp->b_data)[bn + 1]);
304 				    --bn, ++*runb);
305 			}
306 		}
307 	}
308 	if (bp)
309 		bqrelse(bp);
310 
311 	/*
312 	 * Since this is FFS independent code, we are out of scope for the
313 	 * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they
314 	 * will fall in the range 1..um_seqinc, so we use that test and
315 	 * return a request for a zeroed out buffer if attempts are made
316 	 * to read a BLK_NOCOPY or BLK_SNAP block.
317 	 */
318 	if (IS_SNAPSHOT(ip) && daddr > 0 && daddr < ump->um_seqinc){
319 		*bnp = -1;
320 		return (0);
321 	}
322 	*bnp = blkptrtodb(ump, daddr);
323 	if (*bnp == 0) {
324 		if (IS_SNAPSHOT(ip))
325 			*bnp = blkptrtodb(ump, bn * ump->um_seqinc);
326 		else
327 			*bnp = -1;
328 	}
329 	return (0);
330 }
331 
332 static ufs_lbn_t
333 lbn_count(struct ufsmount *ump, int level)
334 {
335 	ufs_lbn_t blockcnt;
336 
337 	for (blockcnt = 1; level > 0; level--)
338 		blockcnt *= MNINDIR(ump);
339 	return (blockcnt);
340 }
341 
342 int
343 ufs_bmap_seekdata(struct vnode *vp, off_t *offp)
344 {
345 	struct buf *bp;
346 	struct indir a[UFS_NIADDR + 1], *ap;
347 	struct inode *ip;
348 	struct mount *mp;
349 	struct ufsmount *ump;
350 	vm_object_t obj;
351 	ufs2_daddr_t bn, daddr, nextbn;
352 	uint64_t bsize;
353 	off_t numblks;
354 	int error, num, num1, off;
355 
356 	bp = NULL;
357 	error = 0;
358 	ip = VTOI(vp);
359 	mp = vp->v_mount;
360 	ump = VFSTOUFS(mp);
361 
362 	if (vp->v_type != VREG || IS_SNAPSHOT(ip))
363 		return (EINVAL);
364 	if (*offp < 0 || *offp >= ip->i_size)
365 		return (ENXIO);
366 
367 	/*
368 	 * We could have pages on the vnode' object queue which still
369 	 * do not have the data blocks allocated.  Convert all dirty
370 	 * pages into buffer writes to ensure that we see all
371 	 * allocated data.
372 	 */
373 	obj = vp->v_object;
374 	if (obj != NULL) {
375 		VM_OBJECT_WLOCK(obj);
376 		vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
377 		VM_OBJECT_WUNLOCK(obj);
378 	}
379 
380 	bsize = mp->mnt_stat.f_iosize;
381 	for (bn = *offp / bsize, numblks = howmany(ip->i_size, bsize);
382 	    bn < numblks; bn = nextbn) {
383 		if (bn < UFS_NDADDR) {
384 			daddr = DIP(ip, i_db[bn]);
385 			if (daddr != 0)
386 				break;
387 			nextbn = bn + 1;
388 			continue;
389 		}
390 
391 		ap = a;
392 		error = ufs_getlbns(vp, bn, ap, &num);
393 		if (error != 0)
394 			break;
395 		MPASS(num >= 2);
396 		daddr = DIP(ip, i_ib[ap->in_off]);
397 		ap++, num--;
398 		for (nextbn = UFS_NDADDR, num1 = num - 1; num1 > 0; num1--)
399 			nextbn += lbn_count(ump, num1);
400 		if (daddr == 0) {
401 			nextbn += lbn_count(ump, num);
402 			continue;
403 		}
404 
405 		for (; daddr != 0 && num > 0; ap++, num--) {
406 			if (bp != NULL)
407 				bqrelse(bp);
408 			error = readindir(vp, ap->in_lbn, daddr, &bp);
409 			if (error != 0)
410 				return (error);
411 
412 			/*
413 			 * Scan the indirect block until we find a non-zero
414 			 * pointer.
415 			 */
416 			off = ap->in_off;
417 			do {
418 				daddr = I_IS_UFS1(ip) ?
419 				    ((ufs1_daddr_t *)bp->b_data)[off] :
420 				    ((ufs2_daddr_t *)bp->b_data)[off];
421 			} while (daddr == 0 && ++off < MNINDIR(ump));
422 			nextbn += off * lbn_count(ump, num - 1);
423 
424 			/*
425 			 * We need to recompute the LBNs of indirect
426 			 * blocks, so restart with the updated block offset.
427 			 */
428 			if (off != ap->in_off)
429 				break;
430 		}
431 		if (num == 0) {
432 			/*
433 			 * We found a data block.
434 			 */
435 			bn = nextbn;
436 			break;
437 		}
438 	}
439 	if (bp != NULL)
440 		bqrelse(bp);
441 	if (bn >= numblks)
442 		error = ENXIO;
443 	if (error == 0 && *offp < bn * bsize)
444 		*offp = bn * bsize;
445 	return (error);
446 }
447 
448 /*
449  * Create an array of logical block number/offset pairs which represent the
450  * path of indirect blocks required to access a data block.  The first "pair"
451  * contains the logical block number of the appropriate single, double or
452  * triple indirect block and the offset into the inode indirect block array.
453  * Note, the logical block number of the inode single/double/triple indirect
454  * block appears twice in the array, once with the offset into the i_ib and
455  * once with the offset into the page itself.
456  */
457 int
458 ufs_getlbns(struct vnode *vp,
459 	ufs2_daddr_t bn,
460 	struct indir *ap,
461 	int *nump)
462 {
463 	ufs2_daddr_t blockcnt;
464 	ufs_lbn_t metalbn, realbn;
465 	struct ufsmount *ump;
466 	int i, numlevels, off;
467 
468 	ump = VFSTOUFS(vp->v_mount);
469 	if (nump)
470 		*nump = 0;
471 	numlevels = 0;
472 	realbn = bn;
473 	if (bn < 0)
474 		bn = -bn;
475 
476 	/* The first UFS_NDADDR blocks are direct blocks. */
477 	if (bn < UFS_NDADDR)
478 		return (0);
479 
480 	/*
481 	 * Determine the number of levels of indirection.  After this loop
482 	 * is done, blockcnt indicates the number of data blocks possible
483 	 * at the previous level of indirection, and UFS_NIADDR - i is the
484 	 * number of levels of indirection needed to locate the requested block.
485 	 */
486 	for (blockcnt = 1, i = UFS_NIADDR, bn -= UFS_NDADDR; ;
487 	    i--, bn -= blockcnt) {
488 		if (i == 0)
489 			return (EFBIG);
490 		blockcnt *= MNINDIR(ump);
491 		if (bn < blockcnt)
492 			break;
493 	}
494 
495 	/* Calculate the address of the first meta-block. */
496 	if (realbn >= 0)
497 		metalbn = -(realbn - bn + UFS_NIADDR - i);
498 	else
499 		metalbn = -(-realbn - bn + UFS_NIADDR - i);
500 
501 	/*
502 	 * At each iteration, off is the offset into the bap array which is
503 	 * an array of disk addresses at the current level of indirection.
504 	 * The logical block number and the offset in that block are stored
505 	 * into the argument array.
506 	 */
507 	ap->in_lbn = metalbn;
508 	ap->in_off = off = UFS_NIADDR - i;
509 	ap++;
510 	for (++numlevels; i <= UFS_NIADDR; i++) {
511 		/* If searching for a meta-data block, quit when found. */
512 		if (metalbn == realbn)
513 			break;
514 
515 		blockcnt /= MNINDIR(ump);
516 		off = (bn / blockcnt) % MNINDIR(ump);
517 
518 		++numlevels;
519 		ap->in_lbn = metalbn;
520 		ap->in_off = off;
521 		++ap;
522 
523 		metalbn -= -1 + off * blockcnt;
524 	}
525 	if (nump)
526 		*nump = numlevels;
527 	return (0);
528 }
529