xref: /freebsd/sbin/dump/traverse.c (revision 16038816)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1980, 1988, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifndef lint
33 #if 0
34 static char sccsid[] = "@(#)traverse.c	8.7 (Berkeley) 6/15/95";
35 #endif
36 static const char rcsid[] =
37   "$FreeBSD$";
38 #endif /* not lint */
39 
40 #include <sys/param.h>
41 #include <sys/stat.h>
42 
43 #include <ufs/ufs/dir.h>
44 #include <ufs/ufs/dinode.h>
45 #include <ufs/ffs/fs.h>
46 
47 #include <protocols/dumprestore.h>
48 
49 #include <assert.h>
50 #include <ctype.h>
51 #include <errno.h>
52 #include <inttypes.h>
53 #include <limits.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <string.h>
57 #include <timeconv.h>
58 #include <unistd.h>
59 
60 #include "dump.h"
61 
62 union dinode {
63 	struct ufs1_dinode dp1;
64 	struct ufs2_dinode dp2;
65 };
66 #define	DIP(dp, field) \
67 	((sblock->fs_magic == FS_UFS1_MAGIC) ? \
68 	(dp)->dp1.field : (dp)->dp2.field)
69 #define DIP_SET(dp, field, val) do {\
70 	if (sblock->fs_magic == FS_UFS1_MAGIC) \
71 		(dp)->dp1.field = (val); \
72 	else \
73 		(dp)->dp2.field = (val); \
74 	} while (0)
75 
76 #define	HASDUMPEDFILE	0x1
77 #define	HASSUBDIRS	0x2
78 
79 static	int dirindir(ino_t ino, ufs2_daddr_t blkno, int level, long *size,
80     long *tapesize, int nodump, ino_t maxino);
81 static	void dmpindir(union dinode *dp, ino_t ino, ufs2_daddr_t blk, int level,
82     off_t *size);
83 static	void ufs1_blksout(ufs1_daddr_t *blkp, int frags, ino_t ino);
84 static	void ufs2_blksout(union dinode *dp, ufs2_daddr_t *blkp, int frags,
85     ino_t ino, int last);
86 static	int appendextdata(union dinode *dp);
87 static	void writeextdata(union dinode *dp, ino_t ino, int added);
88 static	int searchdir(ino_t ino, ufs2_daddr_t blkno, long size, long filesize,
89     long *tapesize, int nodump, ino_t maxino);
90 static	long blockest(union dinode *dp);
91 
92 /*
93  * This is an estimation of the number of TP_BSIZE blocks in the file.
94  * It estimates the number of blocks in files with holes by assuming
95  * that all of the blocks accounted for by di_blocks are data blocks
96  * (when some of the blocks are usually used for indirect pointers);
97  * hence the estimate may be high.
98  */
99 static long
100 blockest(union dinode *dp)
101 {
102 	long blkest, sizeest;
103 
104 	/*
105 	 * dp->di_size is the size of the file in bytes.
106 	 * dp->di_blocks stores the number of sectors actually in the file.
107 	 * If there are more sectors than the size would indicate, this just
108 	 *	means that there are indirect blocks in the file or unused
109 	 *	sectors in the last file block; we can safely ignore these
110 	 *	(blkest = sizeest below).
111 	 * If the file is bigger than the number of sectors would indicate,
112 	 *	then the file has holes in it.	In this case we must use the
113 	 *	block count to estimate the number of data blocks used, but
114 	 *	we use the actual size for estimating the number of indirect
115 	 *	dump blocks (sizeest vs. blkest in the indirect block
116 	 *	calculation).
117 	 */
118 	if ((DIP(dp, di_flags) & SF_SNAPSHOT) != 0)
119 		return (1);
120 	blkest = howmany(dbtob(DIP(dp, di_blocks)), TP_BSIZE);
121 	sizeest = howmany(DIP(dp, di_size), TP_BSIZE);
122 	if (blkest > sizeest)
123 		blkest = sizeest;
124 	if (DIP(dp, di_size) > sblock->fs_bsize * UFS_NDADDR) {
125 		/* calculate the number of indirect blocks on the dump tape */
126 		blkest += howmany(sizeest -
127 		    UFS_NDADDR * sblock->fs_bsize / TP_BSIZE, TP_NINDIR);
128 	}
129 	return (blkest + 1);
130 }
131 
132 /* Auxiliary macro to pick up files changed since previous dump. */
133 #define	CHANGEDSINCE(dp, t) \
134 	(DIP(dp, di_mtime) >= (t) || DIP(dp, di_ctime) >= (t))
135 
136 /* The WANTTODUMP macro decides whether a file should be dumped. */
137 #ifdef UF_NODUMP
138 #define	WANTTODUMP(dp) \
139 	(CHANGEDSINCE(dp, spcl.c_ddate) && \
140 	 (nonodump || (DIP(dp, di_flags) & UF_NODUMP) != UF_NODUMP))
141 #else
142 #define	WANTTODUMP(dp) CHANGEDSINCE(dp, spcl.c_ddate)
143 #endif
144 
145 /*
146  * Dump pass 1.
147  *
148  * Walk the inode list for a file system to find all allocated inodes
149  * that have been modified since the previous dump time. Also, find all
150  * the directories in the file system.
151  */
152 int
153 mapfiles(ino_t maxino, long *tapesize)
154 {
155 	int i, cg, mode, inosused;
156 	int anydirskipped = 0;
157 	union dinode *dp;
158 	struct cg *cgp;
159 	ino_t ino;
160 	u_char *cp;
161 
162 	if ((cgp = malloc(sblock->fs_cgsize)) == NULL)
163 		quit("mapfiles: cannot allocate memory.\n");
164 	for (cg = 0; cg < sblock->fs_ncg; cg++) {
165 		ino = cg * sblock->fs_ipg;
166 		blkread(fsbtodb(sblock, cgtod(sblock, cg)), (char *)cgp,
167 		    sblock->fs_cgsize);
168 		if (sblock->fs_magic == FS_UFS2_MAGIC)
169 			inosused = cgp->cg_initediblk;
170 		else
171 			inosused = sblock->fs_ipg;
172 		/*
173 		 * If we are using soft updates, then we can trust the
174 		 * cylinder group inode allocation maps to tell us which
175 		 * inodes are allocated. We will scan the used inode map
176 		 * to find the inodes that are really in use, and then
177 		 * read only those inodes in from disk.
178 		 */
179 		if (sblock->fs_flags & FS_DOSOFTDEP) {
180 			if (!cg_chkmagic(cgp))
181 				quit("mapfiles: cg %d: bad magic number\n", cg);
182 			cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT];
183 			for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) {
184 				if (*cp == 0)
185 					continue;
186 				for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) {
187 					if (*cp & i)
188 						break;
189 					inosused--;
190 				}
191 				break;
192 			}
193 			if (inosused <= 0)
194 				continue;
195 		}
196 		for (i = 0; i < inosused; i++, ino++) {
197 			if (ino < UFS_ROOTINO ||
198 			    (dp = getino(ino, &mode)) == NULL ||
199 			    (mode & IFMT) == 0)
200 				continue;
201 			if (ino >= maxino) {
202 				msg("Skipping inode %ju >= maxino %ju\n",
203 				    (uintmax_t)ino, (uintmax_t)maxino);
204 				continue;
205 			}
206 			/*
207 			 * Everything must go in usedinomap so that a check
208 			 * for "in dumpdirmap but not in usedinomap" to detect
209 			 * dirs with nodump set has a chance of succeeding
210 			 * (this is used in mapdirs()).
211 			 */
212 			SETINO(ino, usedinomap);
213 			if (mode == IFDIR)
214 				SETINO(ino, dumpdirmap);
215 			if (WANTTODUMP(dp)) {
216 				SETINO(ino, dumpinomap);
217 				if (mode != IFREG &&
218 				    mode != IFDIR &&
219 				    mode != IFLNK)
220 					*tapesize += 1;
221 				else
222 					*tapesize += blockest(dp);
223 				continue;
224 			}
225 			if (mode == IFDIR) {
226 				if (!nonodump &&
227 				    (DIP(dp, di_flags) & UF_NODUMP))
228 					CLRINO(ino, usedinomap);
229 				anydirskipped = 1;
230 			}
231 		}
232 	}
233 	/*
234 	 * Restore gets very upset if the root is not dumped,
235 	 * so ensure that it always is dumped.
236 	 */
237 	SETINO(UFS_ROOTINO, dumpinomap);
238 	return (anydirskipped);
239 }
240 
241 /*
242  * Dump pass 2.
243  *
244  * Scan each directory on the file system to see if it has any modified
245  * files in it. If it does, and has not already been added to the dump
246  * list (because it was itself modified), then add it. If a directory
247  * has not been modified itself, contains no modified files and has no
248  * subdirectories, then it can be deleted from the dump list and from
249  * the list of directories. By deleting it from the list of directories,
250  * its parent may now qualify for the same treatment on this or a later
251  * pass using this algorithm.
252  */
253 int
254 mapdirs(ino_t maxino, long *tapesize)
255 {
256 	union dinode *dp;
257 	int i, isdir, nodump;
258 	char *map;
259 	ino_t ino;
260 	union dinode di;
261 	long filesize;
262 	int ret, change = 0;
263 
264 	isdir = 0;		/* XXX just to get gcc to shut up */
265 	for (map = dumpdirmap, ino = 1; ino < maxino; ino++) {
266 		if (((ino - 1) % CHAR_BIT) == 0)	/* map is offset by 1 */
267 			isdir = *map++;
268 		else
269 			isdir >>= 1;
270 		/*
271 		 * If a directory has been removed from usedinomap, it
272 		 * either has the nodump flag set, or has inherited
273 		 * it.  Although a directory can't be in dumpinomap if
274 		 * it isn't in usedinomap, we have to go through it to
275 		 * propagate the nodump flag.
276 		 */
277 		nodump = !nonodump && (TSTINO(ino, usedinomap) == 0);
278 		if ((isdir & 1) == 0 || (TSTINO(ino, dumpinomap) && !nodump))
279 			continue;
280 		dp = getino(ino, &i);
281 		/*
282 		 * inode buf may change in searchdir().
283 		 */
284 		if (sblock->fs_magic == FS_UFS1_MAGIC)
285 			di.dp1 = dp->dp1;
286 		else
287 			di.dp2 = dp->dp2;
288 		filesize = DIP(&di, di_size);
289 		for (ret = 0, i = 0; filesize > 0 && i < UFS_NDADDR; i++) {
290 			if (DIP(&di, di_db[i]) != 0)
291 				ret |= searchdir(ino, DIP(&di, di_db[i]),
292 				    (long)sblksize(sblock, DIP(&di, di_size),
293 				    i), filesize, tapesize, nodump, maxino);
294 			if (ret & HASDUMPEDFILE)
295 				filesize = 0;
296 			else
297 				filesize -= sblock->fs_bsize;
298 		}
299 		for (i = 0; filesize > 0 && i < UFS_NIADDR; i++) {
300 			if (DIP(&di, di_ib[i]) == 0)
301 				continue;
302 			ret |= dirindir(ino, DIP(&di, di_ib[i]), i, &filesize,
303 			    tapesize, nodump, maxino);
304 		}
305 		if (ret & HASDUMPEDFILE) {
306 			SETINO(ino, dumpinomap);
307 			*tapesize += blockest(&di);
308 			change = 1;
309 			continue;
310 		}
311 		if (nodump) {
312 			if (ret & HASSUBDIRS)
313 				change = 1;	/* subdirs inherit nodump */
314 			CLRINO(ino, dumpdirmap);
315 		} else if ((ret & HASSUBDIRS) == 0)
316 			if (!TSTINO(ino, dumpinomap)) {
317 				CLRINO(ino, dumpdirmap);
318 				change = 1;
319 			}
320 	}
321 	return (change);
322 }
323 
324 /*
325  * Read indirect blocks, and pass the data blocks to be searched
326  * as directories. Quit as soon as any entry is found that will
327  * require the directory to be dumped.
328  */
329 static int
330 dirindir(
331 	ino_t ino,
332 	ufs2_daddr_t blkno,
333 	int ind_level,
334 	long *filesize,
335 	long *tapesize,
336 	int nodump,
337 	ino_t maxino)
338 {
339 	union {
340 		ufs1_daddr_t ufs1[MAXBSIZE / sizeof(ufs1_daddr_t)];
341 		ufs2_daddr_t ufs2[MAXBSIZE / sizeof(ufs2_daddr_t)];
342 	} idblk;
343 	int ret = 0;
344 	int i;
345 
346 	blkread(fsbtodb(sblock, blkno), (char *)&idblk, (int)sblock->fs_bsize);
347 	if (ind_level <= 0) {
348 		for (i = 0; *filesize > 0 && i < NINDIR(sblock); i++) {
349 			if (sblock->fs_magic == FS_UFS1_MAGIC)
350 				blkno = idblk.ufs1[i];
351 			else
352 				blkno = idblk.ufs2[i];
353 			if (blkno != 0)
354 				ret |= searchdir(ino, blkno, sblock->fs_bsize,
355 					*filesize, tapesize, nodump, maxino);
356 			if (ret & HASDUMPEDFILE)
357 				*filesize = 0;
358 			else
359 				*filesize -= sblock->fs_bsize;
360 		}
361 		return (ret);
362 	}
363 	ind_level--;
364 	for (i = 0; *filesize > 0 && i < NINDIR(sblock); i++) {
365 		if (sblock->fs_magic == FS_UFS1_MAGIC)
366 			blkno = idblk.ufs1[i];
367 		else
368 			blkno = idblk.ufs2[i];
369 		if (blkno != 0)
370 			ret |= dirindir(ino, blkno, ind_level, filesize,
371 			    tapesize, nodump, maxino);
372 	}
373 	return (ret);
374 }
375 
376 /*
377  * Scan a disk block containing directory information looking to see if
378  * any of the entries are on the dump list and to see if the directory
379  * contains any subdirectories.
380  */
381 static int
382 searchdir(
383 	ino_t ino,
384 	ufs2_daddr_t blkno,
385 	long size,
386 	long filesize,
387 	long *tapesize,
388 	int nodump,
389 	ino_t maxino)
390 {
391 	int mode;
392 	struct direct *dp;
393 	union dinode *ip;
394 	long loc, ret = 0;
395 	static caddr_t dblk;
396 
397 	if (dblk == NULL && (dblk = malloc(sblock->fs_bsize)) == NULL)
398 		quit("searchdir: cannot allocate indirect memory.\n");
399 	blkread(fsbtodb(sblock, blkno), dblk, (int)size);
400 	if (filesize < size)
401 		size = filesize;
402 	for (loc = 0; loc < size; ) {
403 		dp = (struct direct *)(dblk + loc);
404 		if (dp->d_reclen == 0) {
405 			msg("corrupted directory, inumber %ju\n",
406 			    (uintmax_t)ino);
407 			break;
408 		}
409 		loc += dp->d_reclen;
410 		if (dp->d_ino == 0)
411 			continue;
412 		if (dp->d_ino >= maxino) {
413 			msg("corrupted directory entry, d_ino %ju >= %ju\n",
414 			    (uintmax_t)dp->d_ino, (uintmax_t)maxino);
415 			break;
416 		}
417 		if (dp->d_name[0] == '.') {
418 			if (dp->d_name[1] == '\0')
419 				continue;
420 			if (dp->d_name[1] == '.' && dp->d_name[2] == '\0')
421 				continue;
422 		}
423 		if (nodump) {
424 			ip = getino(dp->d_ino, &mode);
425 			if (TSTINO(dp->d_ino, dumpinomap)) {
426 				CLRINO(dp->d_ino, dumpinomap);
427 				*tapesize -= blockest(ip);
428 			}
429 			/*
430 			 * Add back to dumpdirmap and remove from usedinomap
431 			 * to propagate nodump.
432 			 */
433 			if (mode == IFDIR) {
434 				SETINO(dp->d_ino, dumpdirmap);
435 				CLRINO(dp->d_ino, usedinomap);
436 				ret |= HASSUBDIRS;
437 			}
438 		} else {
439 			if (TSTINO(dp->d_ino, dumpinomap)) {
440 				ret |= HASDUMPEDFILE;
441 				if (ret & HASSUBDIRS)
442 					break;
443 			}
444 			if (TSTINO(dp->d_ino, dumpdirmap)) {
445 				ret |= HASSUBDIRS;
446 				if (ret & HASDUMPEDFILE)
447 					break;
448 			}
449 		}
450 	}
451 	return (ret);
452 }
453 
454 /*
455  * Dump passes 3 and 4.
456  *
457  * Dump the contents of an inode to tape.
458  */
459 void
460 dumpino(union dinode *dp, ino_t ino)
461 {
462 	int ind_level, cnt, last, added;
463 	off_t size;
464 	char buf[TP_BSIZE];
465 
466 	if (newtape) {
467 		newtape = 0;
468 		dumpmap(dumpinomap, TS_BITS, ino);
469 	}
470 	CLRINO(ino, dumpinomap);
471 	/*
472 	 * Zero out the size of a snapshot so that it will be dumped
473 	 * as a zero length file.
474 	 */
475 	if ((DIP(dp, di_flags) & SF_SNAPSHOT) != 0) {
476 		DIP_SET(dp, di_size, 0);
477 		DIP_SET(dp, di_flags, DIP(dp, di_flags) & ~SF_SNAPSHOT);
478 	}
479 	if (sblock->fs_magic == FS_UFS1_MAGIC) {
480 		spcl.c_mode = dp->dp1.di_mode;
481 		spcl.c_size = dp->dp1.di_size;
482 		spcl.c_extsize = 0;
483 		spcl.c_atime = _time32_to_time(dp->dp1.di_atime);
484 		spcl.c_atimensec = dp->dp1.di_atimensec;
485 		spcl.c_mtime = _time32_to_time(dp->dp1.di_mtime);
486 		spcl.c_mtimensec = dp->dp1.di_mtimensec;
487 		spcl.c_birthtime = 0;
488 		spcl.c_birthtimensec = 0;
489 		spcl.c_rdev = dp->dp1.di_rdev;
490 		spcl.c_file_flags = dp->dp1.di_flags;
491 		spcl.c_uid = dp->dp1.di_uid;
492 		spcl.c_gid = dp->dp1.di_gid;
493 	} else {
494 		spcl.c_mode = dp->dp2.di_mode;
495 		spcl.c_size = dp->dp2.di_size;
496 		spcl.c_extsize = dp->dp2.di_extsize;
497 		spcl.c_atime = _time64_to_time(dp->dp2.di_atime);
498 		spcl.c_atimensec = dp->dp2.di_atimensec;
499 		spcl.c_mtime = _time64_to_time(dp->dp2.di_mtime);
500 		spcl.c_mtimensec = dp->dp2.di_mtimensec;
501 		spcl.c_birthtime = _time64_to_time(dp->dp2.di_birthtime);
502 		spcl.c_birthtimensec = dp->dp2.di_birthnsec;
503 		spcl.c_rdev = dp->dp2.di_rdev;
504 		spcl.c_file_flags = dp->dp2.di_flags;
505 		spcl.c_uid = dp->dp2.di_uid;
506 		spcl.c_gid = dp->dp2.di_gid;
507 	}
508 	spcl.c_type = TS_INODE;
509 	spcl.c_count = 0;
510 	switch (DIP(dp, di_mode) & S_IFMT) {
511 
512 	case 0:
513 		/*
514 		 * Freed inode.
515 		 */
516 		return;
517 
518 	case S_IFLNK:
519 		/*
520 		 * Check for short symbolic link.
521 		 */
522 		if (DIP(dp, di_size) > 0 &&
523 		    DIP(dp, di_size) < sblock->fs_maxsymlinklen) {
524 			spcl.c_addr[0] = 1;
525 			spcl.c_count = 1;
526 			added = appendextdata(dp);
527 			writeheader(ino);
528 			if (sblock->fs_magic == FS_UFS1_MAGIC)
529 				memmove(buf, (caddr_t)dp->dp1.di_db,
530 				    (u_long)DIP(dp, di_size));
531 			else
532 				memmove(buf, (caddr_t)dp->dp2.di_db,
533 				    (u_long)DIP(dp, di_size));
534 			buf[DIP(dp, di_size)] = '\0';
535 			writerec(buf, 0);
536 			writeextdata(dp, ino, added);
537 			return;
538 		}
539 		/* FALLTHROUGH */
540 
541 	case S_IFDIR:
542 	case S_IFREG:
543 		if (DIP(dp, di_size) > 0)
544 			break;
545 		/* FALLTHROUGH */
546 
547 	case S_IFIFO:
548 	case S_IFSOCK:
549 	case S_IFCHR:
550 	case S_IFBLK:
551 		added = appendextdata(dp);
552 		writeheader(ino);
553 		writeextdata(dp, ino, added);
554 		return;
555 
556 	default:
557 		msg("Warning: undefined file type 0%o\n",
558 		    DIP(dp, di_mode) & IFMT);
559 		return;
560 	}
561 	if (DIP(dp, di_size) > UFS_NDADDR * sblock->fs_bsize) {
562 		cnt = UFS_NDADDR * sblock->fs_frag;
563 		last = 0;
564 	} else {
565 		cnt = howmany(DIP(dp, di_size), sblock->fs_fsize);
566 		last = 1;
567 	}
568 	if (sblock->fs_magic == FS_UFS1_MAGIC)
569 		ufs1_blksout(&dp->dp1.di_db[0], cnt, ino);
570 	else
571 		ufs2_blksout(dp, &dp->dp2.di_db[0], cnt, ino, last);
572 	if ((size = DIP(dp, di_size) - UFS_NDADDR * sblock->fs_bsize) <= 0)
573 		return;
574 	for (ind_level = 0; ind_level < UFS_NIADDR; ind_level++) {
575 		dmpindir(dp, ino, DIP(dp, di_ib[ind_level]), ind_level, &size);
576 		if (size <= 0)
577 			return;
578 	}
579 }
580 
581 /*
582  * Read indirect blocks, and pass the data blocks to be dumped.
583  */
584 static void
585 dmpindir(union dinode *dp, ino_t ino, ufs2_daddr_t blk, int ind_level,
586 	off_t *size)
587 {
588 	union {
589 		ufs1_daddr_t ufs1[MAXBSIZE / sizeof(ufs1_daddr_t)];
590 		ufs2_daddr_t ufs2[MAXBSIZE / sizeof(ufs2_daddr_t)];
591 	} idblk;
592 	int i, cnt, last;
593 
594 	if (blk != 0)
595 		blkread(fsbtodb(sblock, blk), (char *)&idblk,
596 		    (int)sblock->fs_bsize);
597 	else
598 		memset(&idblk, 0, sblock->fs_bsize);
599 	if (ind_level <= 0) {
600 		if (*size > NINDIR(sblock) * sblock->fs_bsize) {
601 			cnt = NINDIR(sblock) * sblock->fs_frag;
602 			last = 0;
603 		} else {
604 			cnt = howmany(*size, sblock->fs_fsize);
605 			last = 1;
606 		}
607 		*size -= NINDIR(sblock) * sblock->fs_bsize;
608 		if (sblock->fs_magic == FS_UFS1_MAGIC)
609 			ufs1_blksout(idblk.ufs1, cnt, ino);
610 		else
611 			ufs2_blksout(dp, idblk.ufs2, cnt, ino, last);
612 		return;
613 	}
614 	ind_level--;
615 	for (i = 0; i < NINDIR(sblock); i++) {
616 		if (sblock->fs_magic == FS_UFS1_MAGIC)
617 			dmpindir(dp, ino, idblk.ufs1[i], ind_level, size);
618 		else
619 			dmpindir(dp, ino, idblk.ufs2[i], ind_level, size);
620 		if (*size <= 0)
621 			return;
622 	}
623 }
624 
625 /*
626  * Collect up the data into tape record sized buffers and output them.
627  */
628 static void
629 ufs1_blksout(ufs1_daddr_t *blkp, int frags, ino_t ino)
630 {
631 	ufs1_daddr_t *bp;
632 	int i, j, count, blks, tbperdb;
633 
634 	blks = howmany(frags * sblock->fs_fsize, TP_BSIZE);
635 	tbperdb = sblock->fs_bsize >> tp_bshift;
636 	for (i = 0; i < blks; i += TP_NINDIR) {
637 		if (i + TP_NINDIR > blks)
638 			count = blks;
639 		else
640 			count = i + TP_NINDIR;
641 		assert(count <= TP_NINDIR + i);
642 		for (j = i; j < count; j++)
643 			if (blkp[j / tbperdb] != 0)
644 				spcl.c_addr[j - i] = 1;
645 			else
646 				spcl.c_addr[j - i] = 0;
647 		spcl.c_count = count - i;
648 		writeheader(ino);
649 		bp = &blkp[i / tbperdb];
650 		for (j = i; j < count; j += tbperdb, bp++)
651 			if (*bp != 0) {
652 				if (j + tbperdb <= count)
653 					dumpblock(*bp, (int)sblock->fs_bsize);
654 				else
655 					dumpblock(*bp, (count - j) * TP_BSIZE);
656 			}
657 		spcl.c_type = TS_ADDR;
658 	}
659 }
660 
661 /*
662  * Collect up the data into tape record sized buffers and output them.
663  */
664 static void
665 ufs2_blksout(union dinode *dp, ufs2_daddr_t *blkp, int frags, ino_t ino,
666 	int last)
667 {
668 	ufs2_daddr_t *bp;
669 	int i, j, count, resid, blks, tbperdb, added;
670 	static int writingextdata = 0;
671 
672 	/*
673 	 * Calculate the number of TP_BSIZE blocks to be dumped.
674 	 * For filesystems with a fragment size bigger than TP_BSIZE,
675 	 * only part of the final fragment may need to be dumped.
676 	 */
677 	blks = howmany(frags * sblock->fs_fsize, TP_BSIZE);
678 	if (last) {
679 		if (writingextdata)
680 			resid = howmany(fragoff(sblock, spcl.c_extsize),
681 			    TP_BSIZE);
682 		else
683 			resid = howmany(fragoff(sblock, dp->dp2.di_size),
684 			    TP_BSIZE);
685 		if (resid > 0)
686 			blks -= howmany(sblock->fs_fsize, TP_BSIZE) - resid;
687 	}
688 	tbperdb = sblock->fs_bsize >> tp_bshift;
689 	for (i = 0; i < blks; i += TP_NINDIR) {
690 		if (i + TP_NINDIR > blks)
691 			count = blks;
692 		else
693 			count = i + TP_NINDIR;
694 		assert(count <= TP_NINDIR + i);
695 		for (j = i; j < count; j++)
696 			if (blkp[j / tbperdb] != 0)
697 				spcl.c_addr[j - i] = 1;
698 			else
699 				spcl.c_addr[j - i] = 0;
700 		spcl.c_count = count - i;
701 		if (last && count == blks && !writingextdata)
702 			added = appendextdata(dp);
703 		writeheader(ino);
704 		bp = &blkp[i / tbperdb];
705 		for (j = i; j < count; j += tbperdb, bp++)
706 			if (*bp != 0) {
707 				if (j + tbperdb <= count)
708 					dumpblock(*bp, (int)sblock->fs_bsize);
709 				else
710 					dumpblock(*bp, (count - j) * TP_BSIZE);
711 			}
712 		spcl.c_type = TS_ADDR;
713 		spcl.c_count = 0;
714 		if (last && count == blks && !writingextdata) {
715 			writingextdata = 1;
716 			writeextdata(dp, ino, added);
717 			writingextdata = 0;
718 		}
719 	}
720 }
721 
722 /*
723  * If there is room in the current block for the extended attributes
724  * as well as the file data, update the header to reflect the added
725  * attribute data at the end. Attributes are placed at the end so that
726  * old versions of restore will correctly restore the file and simply
727  * discard the extra data at the end that it does not understand.
728  * The attribute data is dumped following the file data by the
729  * writeextdata() function (below).
730  */
731 static int
732 appendextdata(union dinode *dp)
733 {
734 	int i, blks, tbperdb;
735 
736 	/*
737 	 * If no extended attributes, there is nothing to do.
738 	 */
739 	if (spcl.c_extsize == 0)
740 		return (0);
741 	/*
742 	 * If there is not enough room at the end of this block
743 	 * to add the extended attributes, then rather than putting
744 	 * part of them here, we simply push them entirely into a
745 	 * new block rather than putting some here and some later.
746 	 */
747 	if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize)
748 		blks = howmany(UFS_NXADDR * sblock->fs_bsize, TP_BSIZE);
749 	else
750 		blks = howmany(spcl.c_extsize, TP_BSIZE);
751 	if (spcl.c_count + blks > TP_NINDIR)
752 		return (0);
753 	/*
754 	 * Update the block map in the header to indicate the added
755 	 * extended attribute. They will be appended after the file
756 	 * data by the writeextdata() routine.
757 	 */
758 	tbperdb = sblock->fs_bsize >> tp_bshift;
759 	assert(spcl.c_count + blks <= TP_NINDIR);
760 	for (i = 0; i < blks; i++)
761 		if (&dp->dp2.di_extb[i / tbperdb] != 0)
762 				spcl.c_addr[spcl.c_count + i] = 1;
763 			else
764 				spcl.c_addr[spcl.c_count + i] = 0;
765 	spcl.c_count += blks;
766 	return (blks);
767 }
768 
769 /*
770  * Dump the extended attribute data. If there was room in the file
771  * header, then all we need to do is output the data blocks. If there
772  * was not room in the file header, then an additional TS_ADDR header
773  * is created to hold the attribute data.
774  */
775 static void
776 writeextdata(union dinode *dp, ino_t ino, int added)
777 {
778 	int i, frags, blks, tbperdb, last;
779 	ufs2_daddr_t *bp;
780 	off_t size;
781 
782 	/*
783 	 * If no extended attributes, there is nothing to do.
784 	 */
785 	if (spcl.c_extsize == 0)
786 		return;
787 	/*
788 	 * If there was no room in the file block for the attributes,
789 	 * dump them out in a new block, otherwise just dump the data.
790 	 */
791 	if (added == 0) {
792 		if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize) {
793 			frags = UFS_NXADDR * sblock->fs_frag;
794 			last = 0;
795 		} else {
796 			frags = howmany(spcl.c_extsize, sblock->fs_fsize);
797 			last = 1;
798 		}
799 		ufs2_blksout(dp, &dp->dp2.di_extb[0], frags, ino, last);
800 	} else {
801 		if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize)
802 			blks = howmany(UFS_NXADDR * sblock->fs_bsize, TP_BSIZE);
803 		else
804 			blks = howmany(spcl.c_extsize, TP_BSIZE);
805 		tbperdb = sblock->fs_bsize >> tp_bshift;
806 		for (i = 0; i < blks; i += tbperdb) {
807 			bp = &dp->dp2.di_extb[i / tbperdb];
808 			if (*bp != 0) {
809 				if (i + tbperdb <= blks)
810 					dumpblock(*bp, (int)sblock->fs_bsize);
811 				else
812 					dumpblock(*bp, (blks - i) * TP_BSIZE);
813 			}
814 		}
815 
816 	}
817 	/*
818 	 * If an indirect block is added for extended attributes, then
819 	 * di_exti below should be changed to the structure element
820 	 * that references the extended attribute indirect block. This
821 	 * definition is here only to make it compile without complaint.
822 	 */
823 #define di_exti di_spare[0]
824 	/*
825 	 * If the extended attributes fall into an indirect block,
826 	 * dump it as well.
827 	 */
828 	if ((size = spcl.c_extsize - UFS_NXADDR * sblock->fs_bsize) > 0)
829 		dmpindir(dp, ino, dp->dp2.di_exti, 0, &size);
830 }
831 
832 /*
833  * Dump a map to the tape.
834  */
835 void
836 dumpmap(char *map, int type, ino_t ino)
837 {
838 	int i;
839 	char *cp;
840 
841 	spcl.c_type = type;
842 	spcl.c_count = howmany(mapsize * sizeof(char), TP_BSIZE);
843 	writeheader(ino);
844 	for (i = 0, cp = map; i < spcl.c_count; i++, cp += TP_BSIZE)
845 		writerec(cp, 0);
846 }
847 
848 /*
849  * Write a header record to the dump tape.
850  */
851 void
852 writeheader(ino_t ino)
853 {
854 	int32_t sum, cnt, *lp;
855 
856 	if (rsync_friendly >= 2) {
857 		/* don't track changes to access time */
858 		spcl.c_atime = spcl.c_mtime;
859 		spcl.c_atimensec = spcl.c_mtimensec;
860 	}
861 	spcl.c_inumber = ino;
862 	spcl.c_magic = FS_UFS2_MAGIC;
863 	spcl.c_checksum = 0;
864 	lp = (int32_t *)&spcl;
865 	sum = 0;
866 	cnt = sizeof(union u_spcl) / (4 * sizeof(int32_t));
867 	while (--cnt >= 0) {
868 		sum += *lp++;
869 		sum += *lp++;
870 		sum += *lp++;
871 		sum += *lp++;
872 	}
873 	spcl.c_checksum = CHECKSUM - sum;
874 	writerec((char *)&spcl, 1);
875 }
876 
877 union dinode *
878 getino(ino_t inum, int *modep)
879 {
880 	static ino_t minino, maxino;
881 	static caddr_t inoblock;
882 	struct ufs1_dinode *dp1;
883 	struct ufs2_dinode *dp2;
884 
885 	if (inoblock == NULL && (inoblock = malloc(sblock->fs_bsize)) == NULL)
886 		quit("cannot allocate inode memory.\n");
887 	curino = inum;
888 	if (inum >= minino && inum < maxino)
889 		goto gotit;
890 	blkread(fsbtodb(sblock, ino_to_fsba(sblock, inum)), inoblock,
891 	    (int)sblock->fs_bsize);
892 	minino = inum - (inum % INOPB(sblock));
893 	maxino = minino + INOPB(sblock);
894 gotit:
895 	if (sblock->fs_magic == FS_UFS1_MAGIC) {
896 		dp1 = &((struct ufs1_dinode *)inoblock)[inum - minino];
897 		*modep = (dp1->di_mode & IFMT);
898 		return ((union dinode *)dp1);
899 	}
900 	dp2 = &((struct ufs2_dinode *)inoblock)[inum - minino];
901 	*modep = (dp2->di_mode & IFMT);
902 	return ((union dinode *)dp2);
903 }
904 
905 /*
906  * Read a chunk of data from the disk.
907  * Try to recover from hard errors by reading in sector sized pieces.
908  * Error recovery is attempted at most BREADEMAX times before seeking
909  * consent from the operator to continue.
910  */
911 int	breaderrors = 0;
912 #define	BREADEMAX 32
913 
914 void
915 blkread(ufs2_daddr_t blkno, char *buf, int size)
916 {
917 	int secsize, bytes, resid, xfer, base, cnt, i;
918 	static char *tmpbuf;
919 	off_t offset;
920 
921 loop:
922 	offset = blkno << dev_bshift;
923 	secsize = sblock->fs_fsize;
924 	base = offset % secsize;
925 	resid = size % secsize;
926 	/*
927 	 * If the transfer request starts or ends on a non-sector
928 	 * boundary, we must read the entire sector and copy out
929 	 * just the part that we need.
930 	 */
931 	if (base == 0 && resid == 0) {
932 		cnt = cread(diskfd, buf, size, offset);
933 		if (cnt == size)
934 			return;
935 	} else {
936 		if (tmpbuf == NULL && (tmpbuf = malloc(secsize)) == NULL)
937 			quit("buffer malloc failed\n");
938 		xfer = 0;
939 		bytes = size;
940 		if (base != 0) {
941 			cnt = cread(diskfd, tmpbuf, secsize, offset - base);
942 			if (cnt != secsize)
943 				goto bad;
944 			xfer = MIN(secsize - base, size);
945 			offset += xfer;
946 			bytes -= xfer;
947 			resid = bytes % secsize;
948 			memcpy(buf, &tmpbuf[base], xfer);
949 		}
950 		if (bytes >= secsize) {
951 			cnt = cread(diskfd, &buf[xfer], bytes - resid, offset);
952 			if (cnt != bytes - resid)
953 				goto bad;
954 			xfer += cnt;
955 			offset += cnt;
956 		}
957 		if (resid == 0)
958 			return;
959 		cnt = cread(diskfd, tmpbuf, secsize, offset);
960 		if (cnt == secsize) {
961 			memcpy(&buf[xfer], tmpbuf, resid);
962 			return;
963 		}
964 	}
965 bad:
966 	if (blkno + (size / dev_bsize) > fsbtodb(sblock, sblock->fs_size)) {
967 		/*
968 		 * Trying to read the final fragment.
969 		 *
970 		 * NB - dump only works in TP_BSIZE blocks, hence
971 		 * rounds `dev_bsize' fragments up to TP_BSIZE pieces.
972 		 * It should be smarter about not actually trying to
973 		 * read more than it can get, but for the time being
974 		 * we punt and scale back the read only when it gets
975 		 * us into trouble. (mkm 9/25/83)
976 		 */
977 		size -= dev_bsize;
978 		goto loop;
979 	}
980 	if (cnt == -1)
981 		msg("read error from %s: %s: [block %jd]: count=%d\n",
982 			disk, strerror(errno), (intmax_t)blkno, size);
983 	else
984 		msg("short read error from %s: [block %jd]: count=%d, got=%d\n",
985 			disk, (intmax_t)blkno, size, cnt);
986 	if (++breaderrors > BREADEMAX) {
987 		msg("More than %d block read errors from %s\n",
988 			BREADEMAX, disk);
989 		broadcast("DUMP IS AILING!\n");
990 		msg("This is an unrecoverable error.\n");
991 		if (!query("Do you want to attempt to continue?")){
992 			dumpabort(0);
993 			/*NOTREACHED*/
994 		} else
995 			breaderrors = 0;
996 	}
997 	/*
998 	 * Zero buffer, then try to read each sector of buffer separately,
999 	 * and bypass the cache.
1000 	 */
1001 	memset(buf, 0, size);
1002 	for (i = 0; i < size; i += dev_bsize, buf += dev_bsize, blkno++) {
1003 		if ((cnt = pread(diskfd, buf, (int)dev_bsize,
1004 		    ((off_t)blkno << dev_bshift))) == dev_bsize)
1005 			continue;
1006 		if (cnt == -1) {
1007 			msg("read error from %s: %s: [sector %jd]: count=%ld\n",
1008 			    disk, strerror(errno), (intmax_t)blkno, dev_bsize);
1009 			continue;
1010 		}
1011 		msg("short read from %s: [sector %jd]: count=%ld, got=%d\n",
1012 		    disk, (intmax_t)blkno, dev_bsize, cnt);
1013 	}
1014 }
1015