xref: /freebsd/sbin/dump/traverse.c (revision 4f52dfbb)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1980, 1988, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifndef lint
33 #if 0
34 static char sccsid[] = "@(#)traverse.c	8.7 (Berkeley) 6/15/95";
35 #endif
36 static const char rcsid[] =
37   "$FreeBSD$";
38 #endif /* not lint */
39 
40 #include <sys/param.h>
41 #include <sys/stat.h>
42 
43 #include <ufs/ufs/dir.h>
44 #include <ufs/ufs/dinode.h>
45 #include <ufs/ffs/fs.h>
46 
47 #include <protocols/dumprestore.h>
48 
49 #include <ctype.h>
50 #include <errno.h>
51 #include <inttypes.h>
52 #include <limits.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <timeconv.h>
57 #include <unistd.h>
58 
59 #include "dump.h"
60 
61 union dinode {
62 	struct ufs1_dinode dp1;
63 	struct ufs2_dinode dp2;
64 };
65 #define	DIP(dp, field) \
66 	((sblock->fs_magic == FS_UFS1_MAGIC) ? \
67 	(dp)->dp1.field : (dp)->dp2.field)
68 #define DIP_SET(dp, field, val) do {\
69 	if (sblock->fs_magic == FS_UFS1_MAGIC) \
70 		(dp)->dp1.field = (val); \
71 	else \
72 		(dp)->dp2.field = (val); \
73 	} while (0)
74 
75 #define	HASDUMPEDFILE	0x1
76 #define	HASSUBDIRS	0x2
77 
78 static	int dirindir(ino_t ino, ufs2_daddr_t blkno, int level, long *size,
79     long *tapesize, int nodump, ino_t maxino);
80 static	void dmpindir(union dinode *dp, ino_t ino, ufs2_daddr_t blk, int level,
81     off_t *size);
82 static	void ufs1_blksout(ufs1_daddr_t *blkp, int frags, ino_t ino);
83 static	void ufs2_blksout(union dinode *dp, ufs2_daddr_t *blkp, int frags,
84     ino_t ino, int last);
85 static	int appendextdata(union dinode *dp);
86 static	void writeextdata(union dinode *dp, ino_t ino, int added);
87 static	int searchdir(ino_t ino, ufs2_daddr_t blkno, long size, long filesize,
88     long *tapesize, int nodump, ino_t maxino);
89 static	long blockest(union dinode *dp);
90 
91 /*
92  * This is an estimation of the number of TP_BSIZE blocks in the file.
93  * It estimates the number of blocks in files with holes by assuming
94  * that all of the blocks accounted for by di_blocks are data blocks
95  * (when some of the blocks are usually used for indirect pointers);
96  * hence the estimate may be high.
97  */
98 static long
99 blockest(union dinode *dp)
100 {
101 	long blkest, sizeest;
102 
103 	/*
104 	 * dp->di_size is the size of the file in bytes.
105 	 * dp->di_blocks stores the number of sectors actually in the file.
106 	 * If there are more sectors than the size would indicate, this just
107 	 *	means that there are indirect blocks in the file or unused
108 	 *	sectors in the last file block; we can safely ignore these
109 	 *	(blkest = sizeest below).
110 	 * If the file is bigger than the number of sectors would indicate,
111 	 *	then the file has holes in it.	In this case we must use the
112 	 *	block count to estimate the number of data blocks used, but
113 	 *	we use the actual size for estimating the number of indirect
114 	 *	dump blocks (sizeest vs. blkest in the indirect block
115 	 *	calculation).
116 	 */
117 	if ((DIP(dp, di_flags) & SF_SNAPSHOT) != 0)
118 		return (1);
119 	blkest = howmany(dbtob(DIP(dp, di_blocks)), TP_BSIZE);
120 	sizeest = howmany(DIP(dp, di_size), TP_BSIZE);
121 	if (blkest > sizeest)
122 		blkest = sizeest;
123 	if (DIP(dp, di_size) > sblock->fs_bsize * UFS_NDADDR) {
124 		/* calculate the number of indirect blocks on the dump tape */
125 		blkest += howmany(sizeest -
126 		    UFS_NDADDR * sblock->fs_bsize / TP_BSIZE, TP_NINDIR);
127 	}
128 	return (blkest + 1);
129 }
130 
131 /* Auxiliary macro to pick up files changed since previous dump. */
132 #define	CHANGEDSINCE(dp, t) \
133 	(DIP(dp, di_mtime) >= (t) || DIP(dp, di_ctime) >= (t))
134 
135 /* The WANTTODUMP macro decides whether a file should be dumped. */
136 #ifdef UF_NODUMP
137 #define	WANTTODUMP(dp) \
138 	(CHANGEDSINCE(dp, spcl.c_ddate) && \
139 	 (nonodump || (DIP(dp, di_flags) & UF_NODUMP) != UF_NODUMP))
140 #else
141 #define	WANTTODUMP(dp) CHANGEDSINCE(dp, spcl.c_ddate)
142 #endif
143 
144 /*
145  * Dump pass 1.
146  *
147  * Walk the inode list for a file system to find all allocated inodes
148  * that have been modified since the previous dump time. Also, find all
149  * the directories in the file system.
150  */
151 int
152 mapfiles(ino_t maxino, long *tapesize)
153 {
154 	int i, cg, mode, inosused;
155 	int anydirskipped = 0;
156 	union dinode *dp;
157 	struct cg *cgp;
158 	ino_t ino;
159 	u_char *cp;
160 
161 	if ((cgp = malloc(sblock->fs_cgsize)) == NULL)
162 		quit("mapfiles: cannot allocate memory.\n");
163 	for (cg = 0; cg < sblock->fs_ncg; cg++) {
164 		ino = cg * sblock->fs_ipg;
165 		blkread(fsbtodb(sblock, cgtod(sblock, cg)), (char *)cgp,
166 		    sblock->fs_cgsize);
167 		if (sblock->fs_magic == FS_UFS2_MAGIC)
168 			inosused = cgp->cg_initediblk;
169 		else
170 			inosused = sblock->fs_ipg;
171 		/*
172 		 * If we are using soft updates, then we can trust the
173 		 * cylinder group inode allocation maps to tell us which
174 		 * inodes are allocated. We will scan the used inode map
175 		 * to find the inodes that are really in use, and then
176 		 * read only those inodes in from disk.
177 		 */
178 		if (sblock->fs_flags & FS_DOSOFTDEP) {
179 			if (!cg_chkmagic(cgp))
180 				quit("mapfiles: cg %d: bad magic number\n", cg);
181 			cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT];
182 			for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) {
183 				if (*cp == 0)
184 					continue;
185 				for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) {
186 					if (*cp & i)
187 						break;
188 					inosused--;
189 				}
190 				break;
191 			}
192 			if (inosused <= 0)
193 				continue;
194 		}
195 		for (i = 0; i < inosused; i++, ino++) {
196 			if (ino < UFS_ROOTINO ||
197 			    (dp = getinode(ino, &mode)) == NULL ||
198 			    (mode & IFMT) == 0)
199 				continue;
200 			if (ino >= maxino) {
201 				msg("Skipping inode %ju >= maxino %ju\n",
202 				    (uintmax_t)ino, (uintmax_t)maxino);
203 				continue;
204 			}
205 			/*
206 			 * Everything must go in usedinomap so that a check
207 			 * for "in dumpdirmap but not in usedinomap" to detect
208 			 * dirs with nodump set has a chance of succeeding
209 			 * (this is used in mapdirs()).
210 			 */
211 			SETINO(ino, usedinomap);
212 			if (mode == IFDIR)
213 				SETINO(ino, dumpdirmap);
214 			if (WANTTODUMP(dp)) {
215 				SETINO(ino, dumpinomap);
216 				if (mode != IFREG &&
217 				    mode != IFDIR &&
218 				    mode != IFLNK)
219 					*tapesize += 1;
220 				else
221 					*tapesize += blockest(dp);
222 				continue;
223 			}
224 			if (mode == IFDIR) {
225 				if (!nonodump &&
226 				    (DIP(dp, di_flags) & UF_NODUMP))
227 					CLRINO(ino, usedinomap);
228 				anydirskipped = 1;
229 			}
230 		}
231 	}
232 	/*
233 	 * Restore gets very upset if the root is not dumped,
234 	 * so ensure that it always is dumped.
235 	 */
236 	SETINO(UFS_ROOTINO, dumpinomap);
237 	return (anydirskipped);
238 }
239 
240 /*
241  * Dump pass 2.
242  *
243  * Scan each directory on the file system to see if it has any modified
244  * files in it. If it does, and has not already been added to the dump
245  * list (because it was itself modified), then add it. If a directory
246  * has not been modified itself, contains no modified files and has no
247  * subdirectories, then it can be deleted from the dump list and from
248  * the list of directories. By deleting it from the list of directories,
249  * its parent may now qualify for the same treatment on this or a later
250  * pass using this algorithm.
251  */
252 int
253 mapdirs(ino_t maxino, long *tapesize)
254 {
255 	union dinode *dp;
256 	int i, isdir, nodump;
257 	char *map;
258 	ino_t ino;
259 	union dinode di;
260 	long filesize;
261 	int ret, change = 0;
262 
263 	isdir = 0;		/* XXX just to get gcc to shut up */
264 	for (map = dumpdirmap, ino = 1; ino < maxino; ino++) {
265 		if (((ino - 1) % CHAR_BIT) == 0)	/* map is offset by 1 */
266 			isdir = *map++;
267 		else
268 			isdir >>= 1;
269 		/*
270 		 * If a directory has been removed from usedinomap, it
271 		 * either has the nodump flag set, or has inherited
272 		 * it.  Although a directory can't be in dumpinomap if
273 		 * it isn't in usedinomap, we have to go through it to
274 		 * propagate the nodump flag.
275 		 */
276 		nodump = !nonodump && (TSTINO(ino, usedinomap) == 0);
277 		if ((isdir & 1) == 0 || (TSTINO(ino, dumpinomap) && !nodump))
278 			continue;
279 		dp = getinode(ino, &i);
280 		/*
281 		 * inode buf may change in searchdir().
282 		 */
283 		if (sblock->fs_magic == FS_UFS1_MAGIC)
284 			di.dp1 = dp->dp1;
285 		else
286 			di.dp2 = dp->dp2;
287 		filesize = DIP(&di, di_size);
288 		for (ret = 0, i = 0; filesize > 0 && i < UFS_NDADDR; i++) {
289 			if (DIP(&di, di_db[i]) != 0)
290 				ret |= searchdir(ino, DIP(&di, di_db[i]),
291 				    (long)sblksize(sblock, DIP(&di, di_size),
292 				    i), filesize, tapesize, nodump, maxino);
293 			if (ret & HASDUMPEDFILE)
294 				filesize = 0;
295 			else
296 				filesize -= sblock->fs_bsize;
297 		}
298 		for (i = 0; filesize > 0 && i < UFS_NIADDR; i++) {
299 			if (DIP(&di, di_ib[i]) == 0)
300 				continue;
301 			ret |= dirindir(ino, DIP(&di, di_ib[i]), i, &filesize,
302 			    tapesize, nodump, maxino);
303 		}
304 		if (ret & HASDUMPEDFILE) {
305 			SETINO(ino, dumpinomap);
306 			*tapesize += blockest(&di);
307 			change = 1;
308 			continue;
309 		}
310 		if (nodump) {
311 			if (ret & HASSUBDIRS)
312 				change = 1;	/* subdirs inherit nodump */
313 			CLRINO(ino, dumpdirmap);
314 		} else if ((ret & HASSUBDIRS) == 0)
315 			if (!TSTINO(ino, dumpinomap)) {
316 				CLRINO(ino, dumpdirmap);
317 				change = 1;
318 			}
319 	}
320 	return (change);
321 }
322 
323 /*
324  * Read indirect blocks, and pass the data blocks to be searched
325  * as directories. Quit as soon as any entry is found that will
326  * require the directory to be dumped.
327  */
328 static int
329 dirindir(
330 	ino_t ino,
331 	ufs2_daddr_t blkno,
332 	int ind_level,
333 	long *filesize,
334 	long *tapesize,
335 	int nodump,
336 	ino_t maxino)
337 {
338 	union {
339 		ufs1_daddr_t ufs1[MAXBSIZE / sizeof(ufs1_daddr_t)];
340 		ufs2_daddr_t ufs2[MAXBSIZE / sizeof(ufs2_daddr_t)];
341 	} idblk;
342 	int ret = 0;
343 	int i;
344 
345 	blkread(fsbtodb(sblock, blkno), (char *)&idblk, (int)sblock->fs_bsize);
346 	if (ind_level <= 0) {
347 		for (i = 0; *filesize > 0 && i < NINDIR(sblock); i++) {
348 			if (sblock->fs_magic == FS_UFS1_MAGIC)
349 				blkno = idblk.ufs1[i];
350 			else
351 				blkno = idblk.ufs2[i];
352 			if (blkno != 0)
353 				ret |= searchdir(ino, blkno, sblock->fs_bsize,
354 					*filesize, tapesize, nodump, maxino);
355 			if (ret & HASDUMPEDFILE)
356 				*filesize = 0;
357 			else
358 				*filesize -= sblock->fs_bsize;
359 		}
360 		return (ret);
361 	}
362 	ind_level--;
363 	for (i = 0; *filesize > 0 && i < NINDIR(sblock); i++) {
364 		if (sblock->fs_magic == FS_UFS1_MAGIC)
365 			blkno = idblk.ufs1[i];
366 		else
367 			blkno = idblk.ufs2[i];
368 		if (blkno != 0)
369 			ret |= dirindir(ino, blkno, ind_level, filesize,
370 			    tapesize, nodump, maxino);
371 	}
372 	return (ret);
373 }
374 
375 /*
376  * Scan a disk block containing directory information looking to see if
377  * any of the entries are on the dump list and to see if the directory
378  * contains any subdirectories.
379  */
380 static int
381 searchdir(
382 	ino_t ino,
383 	ufs2_daddr_t blkno,
384 	long size,
385 	long filesize,
386 	long *tapesize,
387 	int nodump,
388 	ino_t maxino)
389 {
390 	int mode;
391 	struct direct *dp;
392 	union dinode *ip;
393 	long loc, ret = 0;
394 	static caddr_t dblk;
395 
396 	if (dblk == NULL && (dblk = malloc(sblock->fs_bsize)) == NULL)
397 		quit("searchdir: cannot allocate indirect memory.\n");
398 	blkread(fsbtodb(sblock, blkno), dblk, (int)size);
399 	if (filesize < size)
400 		size = filesize;
401 	for (loc = 0; loc < size; ) {
402 		dp = (struct direct *)(dblk + loc);
403 		if (dp->d_reclen == 0) {
404 			msg("corrupted directory, inumber %ju\n",
405 			    (uintmax_t)ino);
406 			break;
407 		}
408 		loc += dp->d_reclen;
409 		if (dp->d_ino == 0)
410 			continue;
411 		if (dp->d_ino >= maxino) {
412 			msg("corrupted directory entry, d_ino %ju >= %ju\n",
413 			    (uintmax_t)dp->d_ino, (uintmax_t)maxino);
414 			break;
415 		}
416 		if (dp->d_name[0] == '.') {
417 			if (dp->d_name[1] == '\0')
418 				continue;
419 			if (dp->d_name[1] == '.' && dp->d_name[2] == '\0')
420 				continue;
421 		}
422 		if (nodump) {
423 			ip = getinode(dp->d_ino, &mode);
424 			if (TSTINO(dp->d_ino, dumpinomap)) {
425 				CLRINO(dp->d_ino, dumpinomap);
426 				*tapesize -= blockest(ip);
427 			}
428 			/*
429 			 * Add back to dumpdirmap and remove from usedinomap
430 			 * to propagate nodump.
431 			 */
432 			if (mode == IFDIR) {
433 				SETINO(dp->d_ino, dumpdirmap);
434 				CLRINO(dp->d_ino, usedinomap);
435 				ret |= HASSUBDIRS;
436 			}
437 		} else {
438 			if (TSTINO(dp->d_ino, dumpinomap)) {
439 				ret |= HASDUMPEDFILE;
440 				if (ret & HASSUBDIRS)
441 					break;
442 			}
443 			if (TSTINO(dp->d_ino, dumpdirmap)) {
444 				ret |= HASSUBDIRS;
445 				if (ret & HASDUMPEDFILE)
446 					break;
447 			}
448 		}
449 	}
450 	return (ret);
451 }
452 
453 /*
454  * Dump passes 3 and 4.
455  *
456  * Dump the contents of an inode to tape.
457  */
458 void
459 dumpino(union dinode *dp, ino_t ino)
460 {
461 	int ind_level, cnt, last, added;
462 	off_t size;
463 	char buf[TP_BSIZE];
464 
465 	if (newtape) {
466 		newtape = 0;
467 		dumpmap(dumpinomap, TS_BITS, ino);
468 	}
469 	CLRINO(ino, dumpinomap);
470 	/*
471 	 * Zero out the size of a snapshot so that it will be dumped
472 	 * as a zero length file.
473 	 */
474 	if ((DIP(dp, di_flags) & SF_SNAPSHOT) != 0) {
475 		DIP_SET(dp, di_size, 0);
476 		DIP_SET(dp, di_flags, DIP(dp, di_flags) & ~SF_SNAPSHOT);
477 	}
478 	if (sblock->fs_magic == FS_UFS1_MAGIC) {
479 		spcl.c_mode = dp->dp1.di_mode;
480 		spcl.c_size = dp->dp1.di_size;
481 		spcl.c_extsize = 0;
482 		spcl.c_atime = _time32_to_time(dp->dp1.di_atime);
483 		spcl.c_atimensec = dp->dp1.di_atimensec;
484 		spcl.c_mtime = _time32_to_time(dp->dp1.di_mtime);
485 		spcl.c_mtimensec = dp->dp1.di_mtimensec;
486 		spcl.c_birthtime = 0;
487 		spcl.c_birthtimensec = 0;
488 		spcl.c_rdev = dp->dp1.di_rdev;
489 		spcl.c_file_flags = dp->dp1.di_flags;
490 		spcl.c_uid = dp->dp1.di_uid;
491 		spcl.c_gid = dp->dp1.di_gid;
492 	} else {
493 		spcl.c_mode = dp->dp2.di_mode;
494 		spcl.c_size = dp->dp2.di_size;
495 		spcl.c_extsize = dp->dp2.di_extsize;
496 		spcl.c_atime = _time64_to_time(dp->dp2.di_atime);
497 		spcl.c_atimensec = dp->dp2.di_atimensec;
498 		spcl.c_mtime = _time64_to_time(dp->dp2.di_mtime);
499 		spcl.c_mtimensec = dp->dp2.di_mtimensec;
500 		spcl.c_birthtime = _time64_to_time(dp->dp2.di_birthtime);
501 		spcl.c_birthtimensec = dp->dp2.di_birthnsec;
502 		spcl.c_rdev = dp->dp2.di_rdev;
503 		spcl.c_file_flags = dp->dp2.di_flags;
504 		spcl.c_uid = dp->dp2.di_uid;
505 		spcl.c_gid = dp->dp2.di_gid;
506 	}
507 	spcl.c_type = TS_INODE;
508 	spcl.c_count = 0;
509 	switch (DIP(dp, di_mode) & S_IFMT) {
510 
511 	case 0:
512 		/*
513 		 * Freed inode.
514 		 */
515 		return;
516 
517 	case S_IFLNK:
518 		/*
519 		 * Check for short symbolic link.
520 		 */
521 		if (DIP(dp, di_size) > 0 &&
522 		    DIP(dp, di_size) < sblock->fs_maxsymlinklen) {
523 			spcl.c_addr[0] = 1;
524 			spcl.c_count = 1;
525 			added = appendextdata(dp);
526 			writeheader(ino);
527 			if (sblock->fs_magic == FS_UFS1_MAGIC)
528 				memmove(buf, (caddr_t)dp->dp1.di_db,
529 				    (u_long)DIP(dp, di_size));
530 			else
531 				memmove(buf, (caddr_t)dp->dp2.di_db,
532 				    (u_long)DIP(dp, di_size));
533 			buf[DIP(dp, di_size)] = '\0';
534 			writerec(buf, 0);
535 			writeextdata(dp, ino, added);
536 			return;
537 		}
538 		/* FALLTHROUGH */
539 
540 	case S_IFDIR:
541 	case S_IFREG:
542 		if (DIP(dp, di_size) > 0)
543 			break;
544 		/* FALLTHROUGH */
545 
546 	case S_IFIFO:
547 	case S_IFSOCK:
548 	case S_IFCHR:
549 	case S_IFBLK:
550 		added = appendextdata(dp);
551 		writeheader(ino);
552 		writeextdata(dp, ino, added);
553 		return;
554 
555 	default:
556 		msg("Warning: undefined file type 0%o\n",
557 		    DIP(dp, di_mode) & IFMT);
558 		return;
559 	}
560 	if (DIP(dp, di_size) > UFS_NDADDR * sblock->fs_bsize) {
561 		cnt = UFS_NDADDR * sblock->fs_frag;
562 		last = 0;
563 	} else {
564 		cnt = howmany(DIP(dp, di_size), sblock->fs_fsize);
565 		last = 1;
566 	}
567 	if (sblock->fs_magic == FS_UFS1_MAGIC)
568 		ufs1_blksout(&dp->dp1.di_db[0], cnt, ino);
569 	else
570 		ufs2_blksout(dp, &dp->dp2.di_db[0], cnt, ino, last);
571 	if ((size = DIP(dp, di_size) - UFS_NDADDR * sblock->fs_bsize) <= 0)
572 		return;
573 	for (ind_level = 0; ind_level < UFS_NIADDR; ind_level++) {
574 		dmpindir(dp, ino, DIP(dp, di_ib[ind_level]), ind_level, &size);
575 		if (size <= 0)
576 			return;
577 	}
578 }
579 
580 /*
581  * Read indirect blocks, and pass the data blocks to be dumped.
582  */
583 static void
584 dmpindir(union dinode *dp, ino_t ino, ufs2_daddr_t blk, int ind_level,
585 	off_t *size)
586 {
587 	union {
588 		ufs1_daddr_t ufs1[MAXBSIZE / sizeof(ufs1_daddr_t)];
589 		ufs2_daddr_t ufs2[MAXBSIZE / sizeof(ufs2_daddr_t)];
590 	} idblk;
591 	int i, cnt, last;
592 
593 	if (blk != 0)
594 		blkread(fsbtodb(sblock, blk), (char *)&idblk,
595 		    (int)sblock->fs_bsize);
596 	else
597 		memset(&idblk, 0, sblock->fs_bsize);
598 	if (ind_level <= 0) {
599 		if (*size > NINDIR(sblock) * sblock->fs_bsize) {
600 			cnt = NINDIR(sblock) * sblock->fs_frag;
601 			last = 0;
602 		} else {
603 			cnt = howmany(*size, sblock->fs_fsize);
604 			last = 1;
605 		}
606 		*size -= NINDIR(sblock) * sblock->fs_bsize;
607 		if (sblock->fs_magic == FS_UFS1_MAGIC)
608 			ufs1_blksout(idblk.ufs1, cnt, ino);
609 		else
610 			ufs2_blksout(dp, idblk.ufs2, cnt, ino, last);
611 		return;
612 	}
613 	ind_level--;
614 	for (i = 0; i < NINDIR(sblock); i++) {
615 		if (sblock->fs_magic == FS_UFS1_MAGIC)
616 			dmpindir(dp, ino, idblk.ufs1[i], ind_level, size);
617 		else
618 			dmpindir(dp, ino, idblk.ufs2[i], ind_level, size);
619 		if (*size <= 0)
620 			return;
621 	}
622 }
623 
624 /*
625  * Collect up the data into tape record sized buffers and output them.
626  */
627 static void
628 ufs1_blksout(ufs1_daddr_t *blkp, int frags, ino_t ino)
629 {
630 	ufs1_daddr_t *bp;
631 	int i, j, count, blks, tbperdb;
632 
633 	blks = howmany(frags * sblock->fs_fsize, TP_BSIZE);
634 	tbperdb = sblock->fs_bsize >> tp_bshift;
635 	for (i = 0; i < blks; i += TP_NINDIR) {
636 		if (i + TP_NINDIR > blks)
637 			count = blks;
638 		else
639 			count = i + TP_NINDIR;
640 		for (j = i; j < count; j++)
641 			if (blkp[j / tbperdb] != 0)
642 				spcl.c_addr[j - i] = 1;
643 			else
644 				spcl.c_addr[j - i] = 0;
645 		spcl.c_count = count - i;
646 		writeheader(ino);
647 		bp = &blkp[i / tbperdb];
648 		for (j = i; j < count; j += tbperdb, bp++)
649 			if (*bp != 0) {
650 				if (j + tbperdb <= count)
651 					dumpblock(*bp, (int)sblock->fs_bsize);
652 				else
653 					dumpblock(*bp, (count - j) * TP_BSIZE);
654 			}
655 		spcl.c_type = TS_ADDR;
656 	}
657 }
658 
659 /*
660  * Collect up the data into tape record sized buffers and output them.
661  */
662 static void
663 ufs2_blksout(union dinode *dp, ufs2_daddr_t *blkp, int frags, ino_t ino,
664 	int last)
665 {
666 	ufs2_daddr_t *bp;
667 	int i, j, count, resid, blks, tbperdb, added;
668 	static int writingextdata = 0;
669 
670 	/*
671 	 * Calculate the number of TP_BSIZE blocks to be dumped.
672 	 * For filesystems with a fragment size bigger than TP_BSIZE,
673 	 * only part of the final fragment may need to be dumped.
674 	 */
675 	blks = howmany(frags * sblock->fs_fsize, TP_BSIZE);
676 	if (last) {
677 		if (writingextdata)
678 			resid = howmany(fragoff(sblock, spcl.c_extsize),
679 			    TP_BSIZE);
680 		else
681 			resid = howmany(fragoff(sblock, dp->dp2.di_size),
682 			    TP_BSIZE);
683 		if (resid > 0)
684 			blks -= howmany(sblock->fs_fsize, TP_BSIZE) - resid;
685 	}
686 	tbperdb = sblock->fs_bsize >> tp_bshift;
687 	for (i = 0; i < blks; i += TP_NINDIR) {
688 		if (i + TP_NINDIR > blks)
689 			count = blks;
690 		else
691 			count = i + TP_NINDIR;
692 		for (j = i; j < count; j++)
693 			if (blkp[j / tbperdb] != 0)
694 				spcl.c_addr[j - i] = 1;
695 			else
696 				spcl.c_addr[j - i] = 0;
697 		spcl.c_count = count - i;
698 		if (last && count == blks && !writingextdata)
699 			added = appendextdata(dp);
700 		writeheader(ino);
701 		bp = &blkp[i / tbperdb];
702 		for (j = i; j < count; j += tbperdb, bp++)
703 			if (*bp != 0) {
704 				if (j + tbperdb <= count)
705 					dumpblock(*bp, (int)sblock->fs_bsize);
706 				else
707 					dumpblock(*bp, (count - j) * TP_BSIZE);
708 			}
709 		spcl.c_type = TS_ADDR;
710 		spcl.c_count = 0;
711 		if (last && count == blks && !writingextdata) {
712 			writingextdata = 1;
713 			writeextdata(dp, ino, added);
714 			writingextdata = 0;
715 		}
716 	}
717 }
718 
719 /*
720  * If there is room in the current block for the extended attributes
721  * as well as the file data, update the header to reflect the added
722  * attribute data at the end. Attributes are placed at the end so that
723  * old versions of restore will correctly restore the file and simply
724  * discard the extra data at the end that it does not understand.
725  * The attribute data is dumped following the file data by the
726  * writeextdata() function (below).
727  */
728 static int
729 appendextdata(union dinode *dp)
730 {
731 	int i, blks, tbperdb;
732 
733 	/*
734 	 * If no extended attributes, there is nothing to do.
735 	 */
736 	if (spcl.c_extsize == 0)
737 		return (0);
738 	/*
739 	 * If there is not enough room at the end of this block
740 	 * to add the extended attributes, then rather than putting
741 	 * part of them here, we simply push them entirely into a
742 	 * new block rather than putting some here and some later.
743 	 */
744 	if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize)
745 		blks = howmany(UFS_NXADDR * sblock->fs_bsize, TP_BSIZE);
746 	else
747 		blks = howmany(spcl.c_extsize, TP_BSIZE);
748 	if (spcl.c_count + blks > TP_NINDIR)
749 		return (0);
750 	/*
751 	 * Update the block map in the header to indicate the added
752 	 * extended attribute. They will be appended after the file
753 	 * data by the writeextdata() routine.
754 	 */
755 	tbperdb = sblock->fs_bsize >> tp_bshift;
756 	for (i = 0; i < blks; i++)
757 		if (&dp->dp2.di_extb[i / tbperdb] != 0)
758 				spcl.c_addr[spcl.c_count + i] = 1;
759 			else
760 				spcl.c_addr[spcl.c_count + i] = 0;
761 	spcl.c_count += blks;
762 	return (blks);
763 }
764 
765 /*
766  * Dump the extended attribute data. If there was room in the file
767  * header, then all we need to do is output the data blocks. If there
768  * was not room in the file header, then an additional TS_ADDR header
769  * is created to hold the attribute data.
770  */
771 static void
772 writeextdata(union dinode *dp, ino_t ino, int added)
773 {
774 	int i, frags, blks, tbperdb, last;
775 	ufs2_daddr_t *bp;
776 	off_t size;
777 
778 	/*
779 	 * If no extended attributes, there is nothing to do.
780 	 */
781 	if (spcl.c_extsize == 0)
782 		return;
783 	/*
784 	 * If there was no room in the file block for the attributes,
785 	 * dump them out in a new block, otherwise just dump the data.
786 	 */
787 	if (added == 0) {
788 		if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize) {
789 			frags = UFS_NXADDR * sblock->fs_frag;
790 			last = 0;
791 		} else {
792 			frags = howmany(spcl.c_extsize, sblock->fs_fsize);
793 			last = 1;
794 		}
795 		ufs2_blksout(dp, &dp->dp2.di_extb[0], frags, ino, last);
796 	} else {
797 		if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize)
798 			blks = howmany(UFS_NXADDR * sblock->fs_bsize, TP_BSIZE);
799 		else
800 			blks = howmany(spcl.c_extsize, TP_BSIZE);
801 		tbperdb = sblock->fs_bsize >> tp_bshift;
802 		for (i = 0; i < blks; i += tbperdb) {
803 			bp = &dp->dp2.di_extb[i / tbperdb];
804 			if (*bp != 0) {
805 				if (i + tbperdb <= blks)
806 					dumpblock(*bp, (int)sblock->fs_bsize);
807 				else
808 					dumpblock(*bp, (blks - i) * TP_BSIZE);
809 			}
810 		}
811 
812 	}
813 	/*
814 	 * If an indirect block is added for extended attributes, then
815 	 * di_exti below should be changed to the structure element
816 	 * that references the extended attribute indirect block. This
817 	 * definition is here only to make it compile without complaint.
818 	 */
819 #define di_exti di_spare[0]
820 	/*
821 	 * If the extended attributes fall into an indirect block,
822 	 * dump it as well.
823 	 */
824 	if ((size = spcl.c_extsize - UFS_NXADDR * sblock->fs_bsize) > 0)
825 		dmpindir(dp, ino, dp->dp2.di_exti, 0, &size);
826 }
827 
828 /*
829  * Dump a map to the tape.
830  */
831 void
832 dumpmap(char *map, int type, ino_t ino)
833 {
834 	int i;
835 	char *cp;
836 
837 	spcl.c_type = type;
838 	spcl.c_count = howmany(mapsize * sizeof(char), TP_BSIZE);
839 	writeheader(ino);
840 	for (i = 0, cp = map; i < spcl.c_count; i++, cp += TP_BSIZE)
841 		writerec(cp, 0);
842 }
843 
844 /*
845  * Write a header record to the dump tape.
846  */
847 void
848 writeheader(ino_t ino)
849 {
850 	int32_t sum, cnt, *lp;
851 
852 	if (rsync_friendly >= 2) {
853 		/* don't track changes to access time */
854 		spcl.c_atime = spcl.c_mtime;
855 		spcl.c_atimensec = spcl.c_mtimensec;
856 	}
857 	spcl.c_inumber = ino;
858 	spcl.c_magic = FS_UFS2_MAGIC;
859 	spcl.c_checksum = 0;
860 	lp = (int32_t *)&spcl;
861 	sum = 0;
862 	cnt = sizeof(union u_spcl) / (4 * sizeof(int32_t));
863 	while (--cnt >= 0) {
864 		sum += *lp++;
865 		sum += *lp++;
866 		sum += *lp++;
867 		sum += *lp++;
868 	}
869 	spcl.c_checksum = CHECKSUM - sum;
870 	writerec((char *)&spcl, 1);
871 }
872 
873 union dinode *
874 getinode(ino_t inum, int *modep)
875 {
876 	static ino_t minino, maxino;
877 	static caddr_t inoblock;
878 	struct ufs1_dinode *dp1;
879 	struct ufs2_dinode *dp2;
880 
881 	if (inoblock == NULL && (inoblock = malloc(sblock->fs_bsize)) == NULL)
882 		quit("cannot allocate inode memory.\n");
883 	curino = inum;
884 	if (inum >= minino && inum < maxino)
885 		goto gotit;
886 	blkread(fsbtodb(sblock, ino_to_fsba(sblock, inum)), inoblock,
887 	    (int)sblock->fs_bsize);
888 	minino = inum - (inum % INOPB(sblock));
889 	maxino = minino + INOPB(sblock);
890 gotit:
891 	if (sblock->fs_magic == FS_UFS1_MAGIC) {
892 		dp1 = &((struct ufs1_dinode *)inoblock)[inum - minino];
893 		*modep = (dp1->di_mode & IFMT);
894 		return ((union dinode *)dp1);
895 	}
896 	dp2 = &((struct ufs2_dinode *)inoblock)[inum - minino];
897 	*modep = (dp2->di_mode & IFMT);
898 	return ((union dinode *)dp2);
899 }
900 
901 /*
902  * Read a chunk of data from the disk.
903  * Try to recover from hard errors by reading in sector sized pieces.
904  * Error recovery is attempted at most BREADEMAX times before seeking
905  * consent from the operator to continue.
906  */
907 int	breaderrors = 0;
908 #define	BREADEMAX 32
909 
910 void
911 blkread(ufs2_daddr_t blkno, char *buf, int size)
912 {
913 	int secsize, bytes, resid, xfer, base, cnt, i;
914 	static char *tmpbuf;
915 	off_t offset;
916 
917 loop:
918 	offset = blkno << dev_bshift;
919 	secsize = sblock->fs_fsize;
920 	base = offset % secsize;
921 	resid = size % secsize;
922 	/*
923 	 * If the transfer request starts or ends on a non-sector
924 	 * boundary, we must read the entire sector and copy out
925 	 * just the part that we need.
926 	 */
927 	if (base == 0 && resid == 0) {
928 		cnt = cread(diskfd, buf, size, offset);
929 		if (cnt == size)
930 			return;
931 	} else {
932 		if (tmpbuf == NULL && (tmpbuf = malloc(secsize)) == NULL)
933 			quit("buffer malloc failed\n");
934 		xfer = 0;
935 		bytes = size;
936 		if (base != 0) {
937 			cnt = cread(diskfd, tmpbuf, secsize, offset - base);
938 			if (cnt != secsize)
939 				goto bad;
940 			xfer = MIN(secsize - base, size);
941 			offset += xfer;
942 			bytes -= xfer;
943 			resid = bytes % secsize;
944 			memcpy(buf, &tmpbuf[base], xfer);
945 		}
946 		if (bytes >= secsize) {
947 			cnt = cread(diskfd, &buf[xfer], bytes - resid, offset);
948 			if (cnt != bytes - resid)
949 				goto bad;
950 			xfer += cnt;
951 			offset += cnt;
952 		}
953 		if (resid == 0)
954 			return;
955 		cnt = cread(diskfd, tmpbuf, secsize, offset);
956 		if (cnt == secsize) {
957 			memcpy(&buf[xfer], tmpbuf, resid);
958 			return;
959 		}
960 	}
961 bad:
962 	if (blkno + (size / dev_bsize) > fsbtodb(sblock, sblock->fs_size)) {
963 		/*
964 		 * Trying to read the final fragment.
965 		 *
966 		 * NB - dump only works in TP_BSIZE blocks, hence
967 		 * rounds `dev_bsize' fragments up to TP_BSIZE pieces.
968 		 * It should be smarter about not actually trying to
969 		 * read more than it can get, but for the time being
970 		 * we punt and scale back the read only when it gets
971 		 * us into trouble. (mkm 9/25/83)
972 		 */
973 		size -= dev_bsize;
974 		goto loop;
975 	}
976 	if (cnt == -1)
977 		msg("read error from %s: %s: [block %jd]: count=%d\n",
978 			disk, strerror(errno), (intmax_t)blkno, size);
979 	else
980 		msg("short read error from %s: [block %jd]: count=%d, got=%d\n",
981 			disk, (intmax_t)blkno, size, cnt);
982 	if (++breaderrors > BREADEMAX) {
983 		msg("More than %d block read errors from %s\n",
984 			BREADEMAX, disk);
985 		broadcast("DUMP IS AILING!\n");
986 		msg("This is an unrecoverable error.\n");
987 		if (!query("Do you want to attempt to continue?")){
988 			dumpabort(0);
989 			/*NOTREACHED*/
990 		} else
991 			breaderrors = 0;
992 	}
993 	/*
994 	 * Zero buffer, then try to read each sector of buffer separately,
995 	 * and bypass the cache.
996 	 */
997 	memset(buf, 0, size);
998 	for (i = 0; i < size; i += dev_bsize, buf += dev_bsize, blkno++) {
999 		if ((cnt = pread(diskfd, buf, (int)dev_bsize,
1000 		    ((off_t)blkno << dev_bshift))) == dev_bsize)
1001 			continue;
1002 		if (cnt == -1) {
1003 			msg("read error from %s: %s: [sector %jd]: count=%ld\n",
1004 			    disk, strerror(errno), (intmax_t)blkno, dev_bsize);
1005 			continue;
1006 		}
1007 		msg("short read from %s: [sector %jd]: count=%ld, got=%d\n",
1008 		    disk, (intmax_t)blkno, dev_bsize, cnt);
1009 	}
1010 }
1011