xref: /freebsd/sys/ufs/ffs/ffs_vfsops.c (revision da5137ab)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1991, 1993, 1994
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_quota.h"
38 #include "opt_ufs.h"
39 #include "opt_ffs.h"
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/gsb_crc32.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/conf.h>
56 #include <sys/fcntl.h>
57 #include <sys/ioccom.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/rwlock.h>
61 #include <sys/sysctl.h>
62 #include <sys/vmmeter.h>
63 
64 #include <security/mac/mac_framework.h>
65 
66 #include <ufs/ufs/dir.h>
67 #include <ufs/ufs/extattr.h>
68 #include <ufs/ufs/gjournal.h>
69 #include <ufs/ufs/quota.h>
70 #include <ufs/ufs/ufsmount.h>
71 #include <ufs/ufs/inode.h>
72 #include <ufs/ufs/ufs_extern.h>
73 
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76 
77 #include <vm/vm.h>
78 #include <vm/uma.h>
79 #include <vm/vm_page.h>
80 
81 #include <geom/geom.h>
82 #include <geom/geom_vfs.h>
83 
84 #include <ddb/ddb.h>
85 
86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
87 VFS_SMR_DECLARE;
88 
89 static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
90 static void	ffs_oldfscompat_read(struct fs *, struct ufsmount *,
91 		    ufs2_daddr_t);
92 static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
93 static int	ffs_sync_lazy(struct mount *mp);
94 static int	ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
95 static int	ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
96 
97 static vfs_init_t ffs_init;
98 static vfs_uninit_t ffs_uninit;
99 static vfs_extattrctl_t ffs_extattrctl;
100 static vfs_cmount_t ffs_cmount;
101 static vfs_unmount_t ffs_unmount;
102 static vfs_mount_t ffs_mount;
103 static vfs_statfs_t ffs_statfs;
104 static vfs_fhtovp_t ffs_fhtovp;
105 static vfs_sync_t ffs_sync;
106 
107 static struct vfsops ufs_vfsops = {
108 	.vfs_extattrctl =	ffs_extattrctl,
109 	.vfs_fhtovp =		ffs_fhtovp,
110 	.vfs_init =		ffs_init,
111 	.vfs_mount =		ffs_mount,
112 	.vfs_cmount =		ffs_cmount,
113 	.vfs_quotactl =		ufs_quotactl,
114 	.vfs_root =		vfs_cache_root,
115 	.vfs_cachedroot =	ufs_root,
116 	.vfs_statfs =		ffs_statfs,
117 	.vfs_sync =		ffs_sync,
118 	.vfs_uninit =		ffs_uninit,
119 	.vfs_unmount =		ffs_unmount,
120 	.vfs_vget =		ffs_vget,
121 	.vfs_susp_clean =	process_deferred_inactive,
122 };
123 
124 VFS_SET(ufs_vfsops, ufs, 0);
125 MODULE_VERSION(ufs, 1);
126 
127 static b_strategy_t ffs_geom_strategy;
128 static b_write_t ffs_bufwrite;
129 
130 static struct buf_ops ffs_ops = {
131 	.bop_name =	"FFS",
132 	.bop_write =	ffs_bufwrite,
133 	.bop_strategy =	ffs_geom_strategy,
134 	.bop_sync =	bufsync,
135 #ifdef NO_FFS_SNAPSHOT
136 	.bop_bdflush =	bufbdflush,
137 #else
138 	.bop_bdflush =	ffs_bdflush,
139 #endif
140 };
141 
142 /*
143  * Note that userquota and groupquota options are not currently used
144  * by UFS/FFS code and generally mount(8) does not pass those options
145  * from userland, but they can be passed by loader(8) via
146  * vfs.root.mountfrom.options.
147  */
148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
149     "noclusterw", "noexec", "export", "force", "from", "groupquota",
150     "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
151     "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
152 
153 static int ffs_enxio_enable = 1;
154 SYSCTL_DECL(_vfs_ffs);
155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
156     &ffs_enxio_enable, 0,
157     "enable mapping of other disk I/O errors to ENXIO");
158 
159 /*
160  * Return buffer with the contents of block "offset" from the beginning of
161  * directory "ip".  If "res" is non-zero, fill it in with a pointer to the
162  * remaining space in the directory.
163  */
164 static int
165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
166 {
167 	struct inode *ip;
168 	struct fs *fs;
169 	struct buf *bp;
170 	ufs_lbn_t lbn;
171 	int bsize, error;
172 
173 	ip = VTOI(vp);
174 	fs = ITOFS(ip);
175 	lbn = lblkno(fs, offset);
176 	bsize = blksize(fs, ip, lbn);
177 
178 	*bpp = NULL;
179 	error = bread(vp, lbn, bsize, NOCRED, &bp);
180 	if (error) {
181 		return (error);
182 	}
183 	if (res)
184 		*res = (char *)bp->b_data + blkoff(fs, offset);
185 	*bpp = bp;
186 	return (0);
187 }
188 
189 /*
190  * Load up the contents of an inode and copy the appropriate pieces
191  * to the incore copy.
192  */
193 static int
194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
195 {
196 	struct ufs1_dinode *dip1;
197 	struct ufs2_dinode *dip2;
198 	int error;
199 
200 	if (I_IS_UFS1(ip)) {
201 		dip1 = ip->i_din1;
202 		*dip1 =
203 		    *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
204 		ip->i_mode = dip1->di_mode;
205 		ip->i_nlink = dip1->di_nlink;
206 		ip->i_effnlink = dip1->di_nlink;
207 		ip->i_size = dip1->di_size;
208 		ip->i_flags = dip1->di_flags;
209 		ip->i_gen = dip1->di_gen;
210 		ip->i_uid = dip1->di_uid;
211 		ip->i_gid = dip1->di_gid;
212 		return (0);
213 	}
214 	dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
215 	if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
216 	    !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
217 		printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
218 		    (intmax_t)ino);
219 		return (error);
220 	}
221 	*ip->i_din2 = *dip2;
222 	dip2 = ip->i_din2;
223 	ip->i_mode = dip2->di_mode;
224 	ip->i_nlink = dip2->di_nlink;
225 	ip->i_effnlink = dip2->di_nlink;
226 	ip->i_size = dip2->di_size;
227 	ip->i_flags = dip2->di_flags;
228 	ip->i_gen = dip2->di_gen;
229 	ip->i_uid = dip2->di_uid;
230 	ip->i_gid = dip2->di_gid;
231 	return (0);
232 }
233 
234 /*
235  * Verify that a filesystem block number is a valid data block.
236  * This routine is only called on untrusted filesystems.
237  */
238 static int
239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
240 {
241 	struct fs *fs;
242 	struct ufsmount *ump;
243 	ufs2_daddr_t end_daddr;
244 	int cg, havemtx;
245 
246 	KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
247 	    ("ffs_check_blkno called on a trusted file system"));
248 	ump = VFSTOUFS(mp);
249 	fs = ump->um_fs;
250 	cg = dtog(fs, daddr);
251 	end_daddr = daddr + numfrags(fs, blksize);
252 	/*
253 	 * Verify that the block number is a valid data block. Also check
254 	 * that it does not point to an inode block or a superblock. Accept
255 	 * blocks that are unalloacted (0) or part of snapshot metadata
256 	 * (BLK_NOCOPY or BLK_SNAP).
257 	 *
258 	 * Thus, the block must be in a valid range for the filesystem and
259 	 * either in the space before a backup superblock (except the first
260 	 * cylinder group where that space is used by the bootstrap code) or
261 	 * after the inode blocks and before the end of the cylinder group.
262 	 */
263 	if ((uint64_t)daddr <= BLK_SNAP ||
264 	    ((uint64_t)end_daddr <= fs->fs_size &&
265 	    ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
266 	    (daddr >= cgdmin(fs, cg) &&
267 	    end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
268 		return (0);
269 	if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
270 		UFS_LOCK(ump);
271 	if (ppsratecheck(&ump->um_last_integritymsg,
272 	    &ump->um_secs_integritymsg, 1)) {
273 		UFS_UNLOCK(ump);
274 		uprintf("\n%s: inode %jd, out-of-range indirect block "
275 		    "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
276 		if (havemtx)
277 			UFS_LOCK(ump);
278 	} else if (!havemtx)
279 		UFS_UNLOCK(ump);
280 	return (EINTEGRITY);
281 }
282 
283 /*
284  * On first ENXIO error, initiate an asynchronous forcible unmount.
285  * Used to unmount filesystems whose underlying media has gone away.
286  *
287  * Return true if a cleanup is in progress.
288  */
289 int
290 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
291 {
292 	int retval;
293 
294 	UFS_LOCK(ump);
295 	retval = ffs_fsfail_cleanup_locked(ump, error);
296 	UFS_UNLOCK(ump);
297 	return (retval);
298 }
299 
300 int
301 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
302 {
303 	mtx_assert(UFS_MTX(ump), MA_OWNED);
304 	if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
305 		ump->um_flags |= UM_FSFAIL_CLEANUP;
306 		/*
307 		 * Queue an async forced unmount.
308 		 */
309 		vfs_ref(ump->um_mountp);
310 		dounmount(ump->um_mountp,
311 		    MNT_FORCE | MNT_RECURSE | MNT_DEFERRED, curthread);
312 		printf("UFS: forcibly unmounting %s from %s\n",
313 		    ump->um_mountp->mnt_stat.f_mntfromname,
314 		    ump->um_mountp->mnt_stat.f_mntonname);
315 	}
316 	return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
317 }
318 
319 /*
320  * Wrapper used during ENXIO cleanup to allocate empty buffers when
321  * the kernel is unable to read the real one. They are needed so that
322  * the soft updates code can use them to unwind its dependencies.
323  */
324 int
325 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
326     daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
327     struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
328     struct buf **bpp)
329 {
330 	int error;
331 
332 	flags |= GB_CVTENXIO;
333 	error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
334 	    cred, flags, ckhashfunc, bpp);
335 	if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
336 		error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
337 		KASSERT(error == 0, ("getblkx failed"));
338 		vfs_bio_bzero_buf(*bpp, 0, size);
339 	}
340 	return (error);
341 }
342 
343 static int
344 ffs_mount(struct mount *mp)
345 {
346 	struct vnode *devvp, *odevvp;
347 	struct thread *td;
348 	struct ufsmount *ump = NULL;
349 	struct fs *fs;
350 	int error, flags;
351 	int error1 __diagused;
352 	uint64_t mntorflags, saved_mnt_flag;
353 	accmode_t accmode;
354 	struct nameidata ndp;
355 	char *fspec;
356 	bool mounted_softdep;
357 
358 	td = curthread;
359 	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
360 		return (EINVAL);
361 	if (uma_inode == NULL) {
362 		uma_inode = uma_zcreate("FFS inode",
363 		    sizeof(struct inode), NULL, NULL, NULL, NULL,
364 		    UMA_ALIGN_PTR, 0);
365 		uma_ufs1 = uma_zcreate("FFS1 dinode",
366 		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
367 		    UMA_ALIGN_PTR, 0);
368 		uma_ufs2 = uma_zcreate("FFS2 dinode",
369 		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
370 		    UMA_ALIGN_PTR, 0);
371 		VFS_SMR_ZONE_SET(uma_inode);
372 	}
373 
374 	vfs_deleteopt(mp->mnt_optnew, "groupquota");
375 	vfs_deleteopt(mp->mnt_optnew, "userquota");
376 
377 	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
378 	if (error)
379 		return (error);
380 
381 	mntorflags = 0;
382 	if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
383 		mntorflags |= MNT_UNTRUSTED;
384 
385 	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
386 		mntorflags |= MNT_ACLS;
387 
388 	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
389 		mntorflags |= MNT_SNAPSHOT;
390 		/*
391 		 * Once we have set the MNT_SNAPSHOT flag, do not
392 		 * persist "snapshot" in the options list.
393 		 */
394 		vfs_deleteopt(mp->mnt_optnew, "snapshot");
395 		vfs_deleteopt(mp->mnt_opt, "snapshot");
396 	}
397 
398 	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
399 		if (mntorflags & MNT_ACLS) {
400 			vfs_mount_error(mp,
401 			    "\"acls\" and \"nfsv4acls\" options "
402 			    "are mutually exclusive");
403 			return (EINVAL);
404 		}
405 		mntorflags |= MNT_NFS4ACLS;
406 	}
407 
408 	MNT_ILOCK(mp);
409 	mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
410 	mp->mnt_flag |= mntorflags;
411 	MNT_IUNLOCK(mp);
412 
413 	/*
414 	 * If this is a snapshot request, take the snapshot.
415 	 */
416 	if (mp->mnt_flag & MNT_SNAPSHOT)
417 		return (ffs_snapshot(mp, fspec));
418 
419 	/*
420 	 * Must not call namei() while owning busy ref.
421 	 */
422 	if (mp->mnt_flag & MNT_UPDATE)
423 		vfs_unbusy(mp);
424 
425 	/*
426 	 * Not an update, or updating the name: look up the name
427 	 * and verify that it refers to a sensible disk device.
428 	 */
429 	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec);
430 	error = namei(&ndp);
431 	if ((mp->mnt_flag & MNT_UPDATE) != 0) {
432 		/*
433 		 * Unmount does not start if MNT_UPDATE is set.  Mount
434 		 * update busies mp before setting MNT_UPDATE.  We
435 		 * must be able to retain our busy ref successfully,
436 		 * without sleep.
437 		 */
438 		error1 = vfs_busy(mp, MBF_NOWAIT);
439 		MPASS(error1 == 0);
440 	}
441 	if (error != 0)
442 		return (error);
443 	NDFREE_PNBUF(&ndp);
444 	if (!vn_isdisk_error(ndp.ni_vp, &error)) {
445 		vput(ndp.ni_vp);
446 		return (error);
447 	}
448 
449 	/*
450 	 * If mount by non-root, then verify that user has necessary
451 	 * permissions on the device.
452 	 */
453 	accmode = VREAD;
454 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
455 		accmode |= VWRITE;
456 	error = VOP_ACCESS(ndp.ni_vp, accmode, td->td_ucred, td);
457 	if (error)
458 		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
459 	if (error) {
460 		vput(ndp.ni_vp);
461 		return (error);
462 	}
463 
464 	/*
465 	 * New mount
466 	 *
467 	 * We need the name for the mount point (also used for
468 	 * "last mounted on") copied in. If an error occurs,
469 	 * the mount point is discarded by the upper level code.
470 	 * Note that vfs_mount_alloc() populates f_mntonname for us.
471 	 */
472 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
473 		if ((error = ffs_mountfs(ndp.ni_vp, mp, td)) != 0) {
474 			vrele(ndp.ni_vp);
475 			return (error);
476 		}
477 	} else {
478 		/*
479 		 * When updating, check whether changing from read-only to
480 		 * read/write; if there is no device name, that's all we do.
481 		 */
482 		ump = VFSTOUFS(mp);
483 		fs = ump->um_fs;
484 		odevvp = ump->um_odevvp;
485 		devvp = ump->um_devvp;
486 
487 		/*
488 		 * If it's not the same vnode, or at least the same device
489 		 * then it's not correct.
490 		 */
491 		if (ndp.ni_vp->v_rdev != ump->um_odevvp->v_rdev)
492 			error = EINVAL; /* needs translation */
493 		vput(ndp.ni_vp);
494 		if (error)
495 			return (error);
496 		if (fs->fs_ronly == 0 &&
497 		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
498 			/*
499 			 * Flush any dirty data and suspend filesystem.
500 			 */
501 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
502 				return (error);
503 			error = vfs_write_suspend_umnt(mp);
504 			if (error != 0)
505 				return (error);
506 
507 			fs->fs_ronly = 1;
508 			if (MOUNTEDSOFTDEP(mp)) {
509 				MNT_ILOCK(mp);
510 				mp->mnt_flag &= ~MNT_SOFTDEP;
511 				MNT_IUNLOCK(mp);
512 				mounted_softdep = true;
513 			} else
514 				mounted_softdep = false;
515 
516 			/*
517 			 * Check for and optionally get rid of files open
518 			 * for writing.
519 			 */
520 			flags = WRITECLOSE;
521 			if (mp->mnt_flag & MNT_FORCE)
522 				flags |= FORCECLOSE;
523 			if (mounted_softdep) {
524 				error = softdep_flushfiles(mp, flags, td);
525 			} else {
526 				error = ffs_flushfiles(mp, flags, td);
527 			}
528 			if (error) {
529 				fs->fs_ronly = 0;
530 				if (mounted_softdep) {
531 					MNT_ILOCK(mp);
532 					mp->mnt_flag |= MNT_SOFTDEP;
533 					MNT_IUNLOCK(mp);
534 				}
535 				vfs_write_resume(mp, 0);
536 				return (error);
537 			}
538 
539 			if (fs->fs_pendingblocks != 0 ||
540 			    fs->fs_pendinginodes != 0) {
541 				printf("WARNING: %s Update error: blocks %jd "
542 				    "files %d\n", fs->fs_fsmnt,
543 				    (intmax_t)fs->fs_pendingblocks,
544 				    fs->fs_pendinginodes);
545 				fs->fs_pendingblocks = 0;
546 				fs->fs_pendinginodes = 0;
547 			}
548 			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
549 				fs->fs_clean = 1;
550 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
551 				fs->fs_ronly = 0;
552 				fs->fs_clean = 0;
553 				if (mounted_softdep) {
554 					MNT_ILOCK(mp);
555 					mp->mnt_flag |= MNT_SOFTDEP;
556 					MNT_IUNLOCK(mp);
557 				}
558 				vfs_write_resume(mp, 0);
559 				return (error);
560 			}
561 			if (mounted_softdep)
562 				softdep_unmount(mp);
563 			g_topology_lock();
564 			/*
565 			 * Drop our write and exclusive access.
566 			 */
567 			g_access(ump->um_cp, 0, -1, -1);
568 			g_topology_unlock();
569 			MNT_ILOCK(mp);
570 			mp->mnt_flag |= MNT_RDONLY;
571 			MNT_IUNLOCK(mp);
572 			/*
573 			 * Allow the writers to note that filesystem
574 			 * is ro now.
575 			 */
576 			vfs_write_resume(mp, 0);
577 		}
578 		if ((mp->mnt_flag & MNT_RELOAD) &&
579 		    (error = ffs_reload(mp, 0)) != 0)
580 			return (error);
581 		if (fs->fs_ronly &&
582 		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
583 			/*
584 			 * If upgrade to read-write by non-root, then verify
585 			 * that user has necessary permissions on the device.
586 			 */
587 			vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
588 			error = VOP_ACCESS(odevvp, VREAD | VWRITE,
589 			    td->td_ucred, td);
590 			if (error)
591 				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
592 			VOP_UNLOCK(odevvp);
593 			if (error) {
594 				return (error);
595 			}
596 			fs->fs_flags &= ~FS_UNCLEAN;
597 			if (fs->fs_clean == 0) {
598 				fs->fs_flags |= FS_UNCLEAN;
599 				if ((mp->mnt_flag & MNT_FORCE) ||
600 				    ((fs->fs_flags &
601 				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
602 				     (fs->fs_flags & FS_DOSOFTDEP))) {
603 					printf("WARNING: %s was not properly "
604 					   "dismounted\n", fs->fs_fsmnt);
605 				} else {
606 					vfs_mount_error(mp,
607 					   "R/W mount of %s denied. %s.%s",
608 					   fs->fs_fsmnt,
609 					   "Filesystem is not clean - run fsck",
610 					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
611 					   " Forced mount will invalidate"
612 					   " journal contents");
613 					return (EPERM);
614 				}
615 			}
616 			g_topology_lock();
617 			/*
618 			 * Request exclusive write access.
619 			 */
620 			error = g_access(ump->um_cp, 0, 1, 1);
621 			g_topology_unlock();
622 			if (error)
623 				return (error);
624 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
625 				return (error);
626 			error = vfs_write_suspend_umnt(mp);
627 			if (error != 0)
628 				return (error);
629 			fs->fs_ronly = 0;
630 			MNT_ILOCK(mp);
631 			saved_mnt_flag = MNT_RDONLY;
632 			if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
633 			    MNT_ASYNC) != 0)
634 				saved_mnt_flag |= MNT_ASYNC;
635 			mp->mnt_flag &= ~saved_mnt_flag;
636 			MNT_IUNLOCK(mp);
637 			fs->fs_mtime = time_second;
638 			/* check to see if we need to start softdep */
639 			if ((fs->fs_flags & FS_DOSOFTDEP) &&
640 			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
641 				fs->fs_ronly = 1;
642 				MNT_ILOCK(mp);
643 				mp->mnt_flag |= saved_mnt_flag;
644 				MNT_IUNLOCK(mp);
645 				vfs_write_resume(mp, 0);
646 				return (error);
647 			}
648 			fs->fs_clean = 0;
649 			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
650 				fs->fs_ronly = 1;
651 				if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
652 					softdep_unmount(mp);
653 				MNT_ILOCK(mp);
654 				mp->mnt_flag |= saved_mnt_flag;
655 				MNT_IUNLOCK(mp);
656 				vfs_write_resume(mp, 0);
657 				return (error);
658 			}
659 			if (fs->fs_snapinum[0] != 0)
660 				ffs_snapshot_mount(mp);
661 			vfs_write_resume(mp, 0);
662 		}
663 		/*
664 		 * Soft updates is incompatible with "async",
665 		 * so if we are doing softupdates stop the user
666 		 * from setting the async flag in an update.
667 		 * Softdep_mount() clears it in an initial mount
668 		 * or ro->rw remount.
669 		 */
670 		if (MOUNTEDSOFTDEP(mp)) {
671 			/* XXX: Reset too late ? */
672 			MNT_ILOCK(mp);
673 			mp->mnt_flag &= ~MNT_ASYNC;
674 			MNT_IUNLOCK(mp);
675 		}
676 		/*
677 		 * Keep MNT_ACLS flag if it is stored in superblock.
678 		 */
679 		if ((fs->fs_flags & FS_ACLS) != 0) {
680 			/* XXX: Set too late ? */
681 			MNT_ILOCK(mp);
682 			mp->mnt_flag |= MNT_ACLS;
683 			MNT_IUNLOCK(mp);
684 		}
685 
686 		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
687 			/* XXX: Set too late ? */
688 			MNT_ILOCK(mp);
689 			mp->mnt_flag |= MNT_NFS4ACLS;
690 			MNT_IUNLOCK(mp);
691 		}
692 
693 	}
694 
695 	MNT_ILOCK(mp);
696 	/*
697 	 * This is racy versus lookup, see ufs_fplookup_vexec for details.
698 	 */
699 	if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
700 		panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
701 	if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
702 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
703 	MNT_IUNLOCK(mp);
704 
705 	vfs_mountedfrom(mp, fspec);
706 	return (0);
707 }
708 
709 /*
710  * Compatibility with old mount system call.
711  */
712 
713 static int
714 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
715 {
716 	struct ufs_args args;
717 	int error;
718 
719 	if (data == NULL)
720 		return (EINVAL);
721 	error = copyin(data, &args, sizeof args);
722 	if (error)
723 		return (error);
724 
725 	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
726 	ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
727 	error = kernel_mount(ma, flags);
728 
729 	return (error);
730 }
731 
732 /*
733  * Reload all incore data for a filesystem (used after running fsck on
734  * the root filesystem and finding things to fix). If the 'force' flag
735  * is 0, the filesystem must be mounted read-only.
736  *
737  * Things to do to update the mount:
738  *	1) invalidate all cached meta-data.
739  *	2) re-read superblock from disk.
740  *	3) re-read summary information from disk.
741  *	4) invalidate all inactive vnodes.
742  *	5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary
743  *	   writers, if requested.
744  *	6) invalidate all cached file data.
745  *	7) re-read inode data for all active vnodes.
746  */
747 int
748 ffs_reload(struct mount *mp, int flags)
749 {
750 	struct vnode *vp, *mvp, *devvp;
751 	struct inode *ip;
752 	void *space;
753 	struct buf *bp;
754 	struct fs *fs, *newfs;
755 	struct ufsmount *ump;
756 	ufs2_daddr_t sblockloc;
757 	int i, blks, error;
758 	u_long size;
759 	int32_t *lp;
760 
761 	ump = VFSTOUFS(mp);
762 
763 	MNT_ILOCK(mp);
764 	if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
765 		MNT_IUNLOCK(mp);
766 		return (EINVAL);
767 	}
768 	MNT_IUNLOCK(mp);
769 
770 	/*
771 	 * Step 1: invalidate all cached meta-data.
772 	 */
773 	devvp = VFSTOUFS(mp)->um_devvp;
774 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
775 	if (vinvalbuf(devvp, 0, 0, 0) != 0)
776 		panic("ffs_reload: dirty1");
777 	VOP_UNLOCK(devvp);
778 
779 	/*
780 	 * Step 2: re-read superblock from disk.
781 	 */
782 	fs = VFSTOUFS(mp)->um_fs;
783 	if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
784 	    NOCRED, &bp)) != 0)
785 		return (error);
786 	newfs = (struct fs *)bp->b_data;
787 	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
788 	     newfs->fs_magic != FS_UFS2_MAGIC) ||
789 	    newfs->fs_bsize > MAXBSIZE ||
790 	    newfs->fs_bsize < sizeof(struct fs)) {
791 			brelse(bp);
792 			return (EIO);		/* XXX needs translation */
793 	}
794 	/*
795 	 * Preserve the summary information, read-only status, and
796 	 * superblock location by copying these fields into our new
797 	 * superblock before using it to update the existing superblock.
798 	 */
799 	newfs->fs_si = fs->fs_si;
800 	newfs->fs_ronly = fs->fs_ronly;
801 	sblockloc = fs->fs_sblockloc;
802 	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
803 	brelse(bp);
804 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
805 	ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
806 	UFS_LOCK(ump);
807 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
808 		printf("WARNING: %s: reload pending error: blocks %jd "
809 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
810 		    fs->fs_pendinginodes);
811 		fs->fs_pendingblocks = 0;
812 		fs->fs_pendinginodes = 0;
813 	}
814 	UFS_UNLOCK(ump);
815 
816 	/*
817 	 * Step 3: re-read summary information from disk.
818 	 */
819 	size = fs->fs_cssize;
820 	blks = howmany(size, fs->fs_fsize);
821 	if (fs->fs_contigsumsize > 0)
822 		size += fs->fs_ncg * sizeof(int32_t);
823 	size += fs->fs_ncg * sizeof(u_int8_t);
824 	free(fs->fs_csp, M_UFSMNT);
825 	space = malloc(size, M_UFSMNT, M_WAITOK);
826 	fs->fs_csp = space;
827 	for (i = 0; i < blks; i += fs->fs_frag) {
828 		size = fs->fs_bsize;
829 		if (i + fs->fs_frag > blks)
830 			size = (blks - i) * fs->fs_fsize;
831 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
832 		    NOCRED, &bp);
833 		if (error)
834 			return (error);
835 		bcopy(bp->b_data, space, (u_int)size);
836 		space = (char *)space + size;
837 		brelse(bp);
838 	}
839 	/*
840 	 * We no longer know anything about clusters per cylinder group.
841 	 */
842 	if (fs->fs_contigsumsize > 0) {
843 		fs->fs_maxcluster = lp = space;
844 		for (i = 0; i < fs->fs_ncg; i++)
845 			*lp++ = fs->fs_contigsumsize;
846 		space = lp;
847 	}
848 	size = fs->fs_ncg * sizeof(u_int8_t);
849 	fs->fs_contigdirs = (u_int8_t *)space;
850 	bzero(fs->fs_contigdirs, size);
851 	if ((flags & FFSR_UNSUSPEND) != 0) {
852 		MNT_ILOCK(mp);
853 		mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
854 		wakeup(&mp->mnt_flag);
855 		MNT_IUNLOCK(mp);
856 	}
857 
858 loop:
859 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
860 		/*
861 		 * Skip syncer vnode.
862 		 */
863 		if (vp->v_type == VNON) {
864 			VI_UNLOCK(vp);
865 			continue;
866 		}
867 		/*
868 		 * Step 4: invalidate all cached file data.
869 		 */
870 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
871 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
872 			goto loop;
873 		}
874 		if (vinvalbuf(vp, 0, 0, 0))
875 			panic("ffs_reload: dirty2");
876 		/*
877 		 * Step 5: re-read inode data for all active vnodes.
878 		 */
879 		ip = VTOI(vp);
880 		error =
881 		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
882 		    (int)fs->fs_bsize, NOCRED, &bp);
883 		if (error) {
884 			vput(vp);
885 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
886 			return (error);
887 		}
888 		if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
889 			brelse(bp);
890 			vput(vp);
891 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
892 			return (error);
893 		}
894 		ip->i_effnlink = ip->i_nlink;
895 		brelse(bp);
896 		vput(vp);
897 	}
898 	return (0);
899 }
900 
901 /*
902  * Common code for mount and mountroot
903  */
904 static int
905 ffs_mountfs(odevvp, mp, td)
906 	struct vnode *odevvp;
907 	struct mount *mp;
908 	struct thread *td;
909 {
910 	struct ufsmount *ump;
911 	struct fs *fs;
912 	struct cdev *dev;
913 	int error, i, len, ronly;
914 	struct ucred *cred;
915 	struct g_consumer *cp;
916 	struct mount *nmp;
917 	struct vnode *devvp;
918 	int candelete, canspeedup;
919 	off_t loc;
920 
921 	fs = NULL;
922 	ump = NULL;
923 	cred = td ? td->td_ucred : NOCRED;
924 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
925 
926 	devvp = mntfs_allocvp(mp, odevvp);
927 	VOP_UNLOCK(odevvp);
928 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
929 	KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
930 	dev = devvp->v_rdev;
931 	KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
932 	if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
933 	    (uintptr_t)mp) == 0) {
934 		mntfs_freevp(devvp);
935 		return (EBUSY);
936 	}
937 	g_topology_lock();
938 	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
939 	g_topology_unlock();
940 	if (error != 0) {
941 		atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
942 		mntfs_freevp(devvp);
943 		return (error);
944 	}
945 	dev_ref(dev);
946 	devvp->v_bufobj.bo_ops = &ffs_ops;
947 	BO_LOCK(&odevvp->v_bufobj);
948 	odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
949 	BO_UNLOCK(&odevvp->v_bufobj);
950 	VOP_UNLOCK(devvp);
951 	if (dev->si_iosize_max != 0)
952 		mp->mnt_iosize_max = dev->si_iosize_max;
953 	if (mp->mnt_iosize_max > maxphys)
954 		mp->mnt_iosize_max = maxphys;
955 	if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
956 		error = EINVAL;
957 		vfs_mount_error(mp,
958 		    "Invalid sectorsize %d for superblock size %d",
959 		    cp->provider->sectorsize, SBLOCKSIZE);
960 		goto out;
961 	}
962 	/* fetch the superblock and summary information */
963 	loc = STDSB;
964 	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_FORCE)) != 0)
965 		loc = STDSB_NOHASHFAIL;
966 	if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0)
967 		goto out;
968 	fs->fs_flags &= ~FS_UNCLEAN;
969 	if (fs->fs_clean == 0) {
970 		fs->fs_flags |= FS_UNCLEAN;
971 		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
972 		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
973 		     (fs->fs_flags & FS_DOSOFTDEP))) {
974 			printf("WARNING: %s was not properly dismounted\n",
975 			    fs->fs_fsmnt);
976 		} else {
977 			vfs_mount_error(mp, "R/W mount of %s denied. %s%s",
978 			    fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
979 			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
980 			    " Forced mount will invalidate journal contents");
981 			error = EPERM;
982 			goto out;
983 		}
984 		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
985 		    (mp->mnt_flag & MNT_FORCE)) {
986 			printf("WARNING: %s: lost blocks %jd files %d\n",
987 			    fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
988 			    fs->fs_pendinginodes);
989 			fs->fs_pendingblocks = 0;
990 			fs->fs_pendinginodes = 0;
991 		}
992 	}
993 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
994 		printf("WARNING: %s: mount pending error: blocks %jd "
995 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
996 		    fs->fs_pendinginodes);
997 		fs->fs_pendingblocks = 0;
998 		fs->fs_pendinginodes = 0;
999 	}
1000 	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
1001 #ifdef UFS_GJOURNAL
1002 		/*
1003 		 * Get journal provider name.
1004 		 */
1005 		len = 1024;
1006 		mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK);
1007 		if (g_io_getattr("GJOURNAL::provider", cp, &len,
1008 		    mp->mnt_gjprovider) == 0) {
1009 			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
1010 			    M_UFSMNT, M_WAITOK);
1011 			MNT_ILOCK(mp);
1012 			mp->mnt_flag |= MNT_GJOURNAL;
1013 			MNT_IUNLOCK(mp);
1014 		} else {
1015 			if ((mp->mnt_flag & MNT_RDONLY) == 0)
1016 				printf("WARNING: %s: GJOURNAL flag on fs "
1017 				    "but no gjournal provider below\n",
1018 				    mp->mnt_stat.f_mntonname);
1019 			free(mp->mnt_gjprovider, M_UFSMNT);
1020 			mp->mnt_gjprovider = NULL;
1021 		}
1022 #else
1023 		printf("WARNING: %s: GJOURNAL flag on fs but no "
1024 		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
1025 #endif
1026 	} else {
1027 		mp->mnt_gjprovider = NULL;
1028 	}
1029 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
1030 	ump->um_cp = cp;
1031 	ump->um_bo = &devvp->v_bufobj;
1032 	ump->um_fs = fs;
1033 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1034 		ump->um_fstype = UFS1;
1035 		ump->um_balloc = ffs_balloc_ufs1;
1036 	} else {
1037 		ump->um_fstype = UFS2;
1038 		ump->um_balloc = ffs_balloc_ufs2;
1039 	}
1040 	ump->um_blkatoff = ffs_blkatoff;
1041 	ump->um_truncate = ffs_truncate;
1042 	ump->um_update = ffs_update;
1043 	ump->um_valloc = ffs_valloc;
1044 	ump->um_vfree = ffs_vfree;
1045 	ump->um_ifree = ffs_ifree;
1046 	ump->um_rdonly = ffs_rdonly;
1047 	ump->um_snapgone = ffs_snapgone;
1048 	if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1049 		ump->um_check_blkno = ffs_check_blkno;
1050 	else
1051 		ump->um_check_blkno = NULL;
1052 	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1053 	sx_init(&ump->um_checkpath_lock, "uchpth");
1054 	ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc);
1055 	fs->fs_ronly = ronly;
1056 	fs->fs_active = NULL;
1057 	mp->mnt_data = ump;
1058 	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1059 	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1060 	nmp = NULL;
1061 	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1062 	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1063 		if (nmp)
1064 			vfs_rel(nmp);
1065 		vfs_getnewfsid(mp);
1066 	}
1067 	ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1068 	MNT_ILOCK(mp);
1069 	mp->mnt_flag |= MNT_LOCAL;
1070 	MNT_IUNLOCK(mp);
1071 	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1072 #ifdef MAC
1073 		MNT_ILOCK(mp);
1074 		mp->mnt_flag |= MNT_MULTILABEL;
1075 		MNT_IUNLOCK(mp);
1076 #else
1077 		printf("WARNING: %s: multilabel flag on fs but "
1078 		    "no MAC support\n", mp->mnt_stat.f_mntonname);
1079 #endif
1080 	}
1081 	if ((fs->fs_flags & FS_ACLS) != 0) {
1082 #ifdef UFS_ACL
1083 		MNT_ILOCK(mp);
1084 
1085 		if (mp->mnt_flag & MNT_NFS4ACLS)
1086 			printf("WARNING: %s: ACLs flag on fs conflicts with "
1087 			    "\"nfsv4acls\" mount option; option ignored\n",
1088 			    mp->mnt_stat.f_mntonname);
1089 		mp->mnt_flag &= ~MNT_NFS4ACLS;
1090 		mp->mnt_flag |= MNT_ACLS;
1091 
1092 		MNT_IUNLOCK(mp);
1093 #else
1094 		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1095 		    mp->mnt_stat.f_mntonname);
1096 #endif
1097 	}
1098 	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1099 #ifdef UFS_ACL
1100 		MNT_ILOCK(mp);
1101 
1102 		if (mp->mnt_flag & MNT_ACLS)
1103 			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1104 			    "with \"acls\" mount option; option ignored\n",
1105 			    mp->mnt_stat.f_mntonname);
1106 		mp->mnt_flag &= ~MNT_ACLS;
1107 		mp->mnt_flag |= MNT_NFS4ACLS;
1108 
1109 		MNT_IUNLOCK(mp);
1110 #else
1111 		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1112 		    "ACLs support\n", mp->mnt_stat.f_mntonname);
1113 #endif
1114 	}
1115 	if ((fs->fs_flags & FS_TRIM) != 0) {
1116 		len = sizeof(int);
1117 		if (g_io_getattr("GEOM::candelete", cp, &len,
1118 		    &candelete) == 0) {
1119 			if (candelete)
1120 				ump->um_flags |= UM_CANDELETE;
1121 			else
1122 				printf("WARNING: %s: TRIM flag on fs but disk "
1123 				    "does not support TRIM\n",
1124 				    mp->mnt_stat.f_mntonname);
1125 		} else {
1126 			printf("WARNING: %s: TRIM flag on fs but disk does "
1127 			    "not confirm that it supports TRIM\n",
1128 			    mp->mnt_stat.f_mntonname);
1129 		}
1130 		if (((ump->um_flags) & UM_CANDELETE) != 0) {
1131 			ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1132 			    taskqueue_thread_enqueue, &ump->um_trim_tq);
1133 			taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1134 			    "%s trim", mp->mnt_stat.f_mntonname);
1135 			ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1136 			    &ump->um_trimlisthashsize);
1137 		}
1138 	}
1139 
1140 	len = sizeof(int);
1141 	if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1142 		if (canspeedup)
1143 			ump->um_flags |= UM_CANSPEEDUP;
1144 	}
1145 
1146 	ump->um_mountp = mp;
1147 	ump->um_dev = dev;
1148 	ump->um_devvp = devvp;
1149 	ump->um_odevvp = odevvp;
1150 	ump->um_nindir = fs->fs_nindir;
1151 	ump->um_bptrtodb = fs->fs_fsbtodb;
1152 	ump->um_seqinc = fs->fs_frag;
1153 	for (i = 0; i < MAXQUOTAS; i++)
1154 		ump->um_quotas[i] = NULLVP;
1155 #ifdef UFS_EXTATTR
1156 	ufs_extattr_uepm_init(&ump->um_extattr);
1157 #endif
1158 	/*
1159 	 * Set FS local "last mounted on" information (NULL pad)
1160 	 */
1161 	bzero(fs->fs_fsmnt, MAXMNTLEN);
1162 	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1163 	mp->mnt_stat.f_iosize = fs->fs_bsize;
1164 
1165 	if (mp->mnt_flag & MNT_ROOTFS) {
1166 		/*
1167 		 * Root mount; update timestamp in mount structure.
1168 		 * this will be used by the common root mount code
1169 		 * to update the system clock.
1170 		 */
1171 		mp->mnt_time = fs->fs_time;
1172 	}
1173 
1174 	if (ronly == 0) {
1175 		fs->fs_mtime = time_second;
1176 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1177 		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1178 			ffs_flushfiles(mp, FORCECLOSE, td);
1179 			goto out;
1180 		}
1181 		if (fs->fs_snapinum[0] != 0)
1182 			ffs_snapshot_mount(mp);
1183 		fs->fs_fmod = 1;
1184 		fs->fs_clean = 0;
1185 		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1186 	}
1187 	/*
1188 	 * Initialize filesystem state information in mount struct.
1189 	 */
1190 	MNT_ILOCK(mp);
1191 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1192 	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1193 	MNT_IUNLOCK(mp);
1194 #ifdef UFS_EXTATTR
1195 #ifdef UFS_EXTATTR_AUTOSTART
1196 	/*
1197 	 *
1198 	 * Auto-starting does the following:
1199 	 *	- check for /.attribute in the fs, and extattr_start if so
1200 	 *	- for each file in .attribute, enable that file with
1201 	 * 	  an attribute of the same name.
1202 	 * Not clear how to report errors -- probably eat them.
1203 	 * This would all happen while the filesystem was busy/not
1204 	 * available, so would effectively be "atomic".
1205 	 */
1206 	(void) ufs_extattr_autostart(mp, td);
1207 #endif /* !UFS_EXTATTR_AUTOSTART */
1208 #endif /* !UFS_EXTATTR */
1209 	return (0);
1210 out:
1211 	if (fs != NULL) {
1212 		free(fs->fs_csp, M_UFSMNT);
1213 		free(fs->fs_si, M_UFSMNT);
1214 		free(fs, M_UFSMNT);
1215 	}
1216 	if (cp != NULL) {
1217 		g_topology_lock();
1218 		g_vfs_close(cp);
1219 		g_topology_unlock();
1220 	}
1221 	if (ump != NULL) {
1222 		mtx_destroy(UFS_MTX(ump));
1223 		sx_destroy(&ump->um_checkpath_lock);
1224 		if (mp->mnt_gjprovider != NULL) {
1225 			free(mp->mnt_gjprovider, M_UFSMNT);
1226 			mp->mnt_gjprovider = NULL;
1227 		}
1228 		MPASS(ump->um_softdep == NULL);
1229 		free(ump, M_UFSMNT);
1230 		mp->mnt_data = NULL;
1231 	}
1232 	BO_LOCK(&odevvp->v_bufobj);
1233 	odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1234 	BO_UNLOCK(&odevvp->v_bufobj);
1235 	atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1236 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1237 	mntfs_freevp(devvp);
1238 	dev_rel(dev);
1239 	return (error);
1240 }
1241 
1242 /*
1243  * A read function for use by filesystem-layer routines.
1244  */
1245 static int
1246 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1247 {
1248 	struct buf *bp;
1249 	int error;
1250 
1251 	KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1252 	*bufp = malloc(size, M_UFSMNT, M_WAITOK);
1253 	if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1254 	    &bp)) != 0)
1255 		return (error);
1256 	bcopy(bp->b_data, *bufp, size);
1257 	bp->b_flags |= B_INVAL | B_NOCACHE;
1258 	brelse(bp);
1259 	return (0);
1260 }
1261 
1262 static int bigcgs = 0;
1263 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1264 
1265 /*
1266  * Sanity checks for loading old filesystem superblocks.
1267  * See ffs_oldfscompat_write below for unwound actions.
1268  *
1269  * XXX - Parts get retired eventually.
1270  * Unfortunately new bits get added.
1271  */
1272 static void
1273 ffs_oldfscompat_read(fs, ump, sblockloc)
1274 	struct fs *fs;
1275 	struct ufsmount *ump;
1276 	ufs2_daddr_t sblockloc;
1277 {
1278 	off_t maxfilesize;
1279 
1280 	/*
1281 	 * If not yet done, update fs_flags location and value of fs_sblockloc.
1282 	 */
1283 	if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1284 		fs->fs_flags = fs->fs_old_flags;
1285 		fs->fs_old_flags |= FS_FLAGS_UPDATED;
1286 		fs->fs_sblockloc = sblockloc;
1287 	}
1288 	/*
1289 	 * If not yet done, update UFS1 superblock with new wider fields.
1290 	 */
1291 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1292 		fs->fs_maxbsize = fs->fs_bsize;
1293 		fs->fs_time = fs->fs_old_time;
1294 		fs->fs_size = fs->fs_old_size;
1295 		fs->fs_dsize = fs->fs_old_dsize;
1296 		fs->fs_csaddr = fs->fs_old_csaddr;
1297 		fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1298 		fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1299 		fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1300 		fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1301 	}
1302 	if (fs->fs_magic == FS_UFS1_MAGIC &&
1303 	    fs->fs_old_inodefmt < FS_44INODEFMT) {
1304 		fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1305 		fs->fs_qbmask = ~fs->fs_bmask;
1306 		fs->fs_qfmask = ~fs->fs_fmask;
1307 	}
1308 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1309 		ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1310 		maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1311 		if (fs->fs_maxfilesize > maxfilesize)
1312 			fs->fs_maxfilesize = maxfilesize;
1313 	}
1314 	/* Compatibility for old filesystems */
1315 	if (fs->fs_avgfilesize <= 0)
1316 		fs->fs_avgfilesize = AVFILESIZ;
1317 	if (fs->fs_avgfpdir <= 0)
1318 		fs->fs_avgfpdir = AFPDIR;
1319 	if (bigcgs) {
1320 		fs->fs_save_cgsize = fs->fs_cgsize;
1321 		fs->fs_cgsize = fs->fs_bsize;
1322 	}
1323 }
1324 
1325 /*
1326  * Unwinding superblock updates for old filesystems.
1327  * See ffs_oldfscompat_read above for details.
1328  *
1329  * XXX - Parts get retired eventually.
1330  * Unfortunately new bits get added.
1331  */
1332 void
1333 ffs_oldfscompat_write(fs, ump)
1334 	struct fs *fs;
1335 	struct ufsmount *ump;
1336 {
1337 
1338 	/*
1339 	 * Copy back UFS2 updated fields that UFS1 inspects.
1340 	 */
1341 	if (fs->fs_magic == FS_UFS1_MAGIC) {
1342 		fs->fs_old_time = fs->fs_time;
1343 		fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1344 		fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1345 		fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1346 		fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1347 		fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1348 	}
1349 	if (bigcgs) {
1350 		fs->fs_cgsize = fs->fs_save_cgsize;
1351 		fs->fs_save_cgsize = 0;
1352 	}
1353 }
1354 
1355 /*
1356  * unmount system call
1357  */
1358 static int
1359 ffs_unmount(mp, mntflags)
1360 	struct mount *mp;
1361 	int mntflags;
1362 {
1363 	struct thread *td;
1364 	struct ufsmount *ump = VFSTOUFS(mp);
1365 	struct fs *fs;
1366 	int error, flags, susp;
1367 #ifdef UFS_EXTATTR
1368 	int e_restart;
1369 #endif
1370 
1371 	flags = 0;
1372 	td = curthread;
1373 	fs = ump->um_fs;
1374 	if (mntflags & MNT_FORCE)
1375 		flags |= FORCECLOSE;
1376 	susp = fs->fs_ronly == 0;
1377 #ifdef UFS_EXTATTR
1378 	if ((error = ufs_extattr_stop(mp, td))) {
1379 		if (error != EOPNOTSUPP)
1380 			printf("WARNING: unmount %s: ufs_extattr_stop "
1381 			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1382 			    error);
1383 		e_restart = 0;
1384 	} else {
1385 		ufs_extattr_uepm_destroy(&ump->um_extattr);
1386 		e_restart = 1;
1387 	}
1388 #endif
1389 	if (susp) {
1390 		error = vfs_write_suspend_umnt(mp);
1391 		if (error != 0)
1392 			goto fail1;
1393 	}
1394 	if (MOUNTEDSOFTDEP(mp))
1395 		error = softdep_flushfiles(mp, flags, td);
1396 	else
1397 		error = ffs_flushfiles(mp, flags, td);
1398 	if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1399 		goto fail;
1400 
1401 	UFS_LOCK(ump);
1402 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1403 		printf("WARNING: unmount %s: pending error: blocks %jd "
1404 		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1405 		    fs->fs_pendinginodes);
1406 		fs->fs_pendingblocks = 0;
1407 		fs->fs_pendinginodes = 0;
1408 	}
1409 	UFS_UNLOCK(ump);
1410 	if (MOUNTEDSOFTDEP(mp))
1411 		softdep_unmount(mp);
1412 	MPASS(ump->um_softdep == NULL);
1413 	if (fs->fs_ronly == 0) {
1414 		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1415 		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1416 		if (ffs_fsfail_cleanup(ump, error))
1417 			error = 0;
1418 		if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1419 			fs->fs_clean = 0;
1420 			goto fail;
1421 		}
1422 	}
1423 	if (susp)
1424 		vfs_write_resume(mp, VR_START_WRITE);
1425 	if (ump->um_trim_tq != NULL) {
1426 		while (ump->um_trim_inflight != 0)
1427 			pause("ufsutr", hz);
1428 		taskqueue_drain_all(ump->um_trim_tq);
1429 		taskqueue_free(ump->um_trim_tq);
1430 		free (ump->um_trimhash, M_TRIM);
1431 	}
1432 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1433 	g_topology_lock();
1434 	g_vfs_close(ump->um_cp);
1435 	g_topology_unlock();
1436 	BO_LOCK(&ump->um_odevvp->v_bufobj);
1437 	ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1438 	BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1439 	atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1440 	mntfs_freevp(ump->um_devvp);
1441 	vrele(ump->um_odevvp);
1442 	dev_rel(ump->um_dev);
1443 	mtx_destroy(UFS_MTX(ump));
1444 	sx_destroy(&ump->um_checkpath_lock);
1445 	if (mp->mnt_gjprovider != NULL) {
1446 		free(mp->mnt_gjprovider, M_UFSMNT);
1447 		mp->mnt_gjprovider = NULL;
1448 	}
1449 	free(fs->fs_csp, M_UFSMNT);
1450 	free(fs->fs_si, M_UFSMNT);
1451 	free(fs, M_UFSMNT);
1452 	free(ump, M_UFSMNT);
1453 	mp->mnt_data = NULL;
1454 	MNT_ILOCK(mp);
1455 	mp->mnt_flag &= ~MNT_LOCAL;
1456 	MNT_IUNLOCK(mp);
1457 	if (td->td_su == mp) {
1458 		td->td_su = NULL;
1459 		vfs_rel(mp);
1460 	}
1461 	return (error);
1462 
1463 fail:
1464 	if (susp)
1465 		vfs_write_resume(mp, VR_START_WRITE);
1466 fail1:
1467 #ifdef UFS_EXTATTR
1468 	if (e_restart) {
1469 		ufs_extattr_uepm_init(&ump->um_extattr);
1470 #ifdef UFS_EXTATTR_AUTOSTART
1471 		(void) ufs_extattr_autostart(mp, td);
1472 #endif
1473 	}
1474 #endif
1475 
1476 	return (error);
1477 }
1478 
1479 /*
1480  * Flush out all the files in a filesystem.
1481  */
1482 int
1483 ffs_flushfiles(mp, flags, td)
1484 	struct mount *mp;
1485 	int flags;
1486 	struct thread *td;
1487 {
1488 	struct ufsmount *ump;
1489 	int qerror, error;
1490 
1491 	ump = VFSTOUFS(mp);
1492 	qerror = 0;
1493 #ifdef QUOTA
1494 	if (mp->mnt_flag & MNT_QUOTA) {
1495 		int i;
1496 		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1497 		if (error)
1498 			return (error);
1499 		for (i = 0; i < MAXQUOTAS; i++) {
1500 			error = quotaoff(td, mp, i);
1501 			if (error != 0) {
1502 				if ((flags & EARLYFLUSH) == 0)
1503 					return (error);
1504 				else
1505 					qerror = error;
1506 			}
1507 		}
1508 
1509 		/*
1510 		 * Here we fall through to vflush again to ensure that
1511 		 * we have gotten rid of all the system vnodes, unless
1512 		 * quotas must not be closed.
1513 		 */
1514 	}
1515 #endif
1516 	/* devvp is not locked there */
1517 	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1518 		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1519 			return (error);
1520 		ffs_snapshot_unmount(mp);
1521 		flags |= FORCECLOSE;
1522 		/*
1523 		 * Here we fall through to vflush again to ensure
1524 		 * that we have gotten rid of all the system vnodes.
1525 		 */
1526 	}
1527 
1528 	/*
1529 	 * Do not close system files if quotas were not closed, to be
1530 	 * able to sync the remaining dquots.  The freeblks softupdate
1531 	 * workitems might hold a reference on a dquot, preventing
1532 	 * quotaoff() from completing.  Next round of
1533 	 * softdep_flushworklist() iteration should process the
1534 	 * blockers, allowing the next run of quotaoff() to finally
1535 	 * flush held dquots.
1536 	 *
1537 	 * Otherwise, flush all the files.
1538 	 */
1539 	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1540 		return (error);
1541 
1542 	/*
1543 	 * Flush filesystem metadata.
1544 	 */
1545 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1546 	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1547 	VOP_UNLOCK(ump->um_devvp);
1548 	return (error);
1549 }
1550 
1551 /*
1552  * Get filesystem statistics.
1553  */
1554 static int
1555 ffs_statfs(mp, sbp)
1556 	struct mount *mp;
1557 	struct statfs *sbp;
1558 {
1559 	struct ufsmount *ump;
1560 	struct fs *fs;
1561 
1562 	ump = VFSTOUFS(mp);
1563 	fs = ump->um_fs;
1564 	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1565 		panic("ffs_statfs");
1566 	sbp->f_version = STATFS_VERSION;
1567 	sbp->f_bsize = fs->fs_fsize;
1568 	sbp->f_iosize = fs->fs_bsize;
1569 	sbp->f_blocks = fs->fs_dsize;
1570 	UFS_LOCK(ump);
1571 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1572 	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1573 	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1574 	    dbtofsb(fs, fs->fs_pendingblocks);
1575 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1576 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1577 	UFS_UNLOCK(ump);
1578 	sbp->f_namemax = UFS_MAXNAMLEN;
1579 	return (0);
1580 }
1581 
1582 static bool
1583 sync_doupdate(struct inode *ip)
1584 {
1585 
1586 	return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1587 	    IN_UPDATE)) != 0);
1588 }
1589 
1590 static int
1591 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1592 {
1593 	struct inode *ip;
1594 
1595 	/*
1596 	 * Flags are safe to access because ->v_data invalidation
1597 	 * is held off by listmtx.
1598 	 */
1599 	if (vp->v_type == VNON)
1600 		return (false);
1601 	ip = VTOI(vp);
1602 	if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1603 		return (false);
1604 	return (true);
1605 }
1606 
1607 /*
1608  * For a lazy sync, we only care about access times, quotas and the
1609  * superblock.  Other filesystem changes are already converted to
1610  * cylinder group blocks or inode blocks updates and are written to
1611  * disk by syncer.
1612  */
1613 static int
1614 ffs_sync_lazy(mp)
1615      struct mount *mp;
1616 {
1617 	struct vnode *mvp, *vp;
1618 	struct inode *ip;
1619 	int allerror, error;
1620 
1621 	allerror = 0;
1622 	if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1623 #ifdef QUOTA
1624 		qsync(mp);
1625 #endif
1626 		goto sbupdate;
1627 	}
1628 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1629 		if (vp->v_type == VNON) {
1630 			VI_UNLOCK(vp);
1631 			continue;
1632 		}
1633 		ip = VTOI(vp);
1634 
1635 		/*
1636 		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1637 		 * ufs_close() and ufs_getattr() by the calls to
1638 		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1639 		 * Test also all the other timestamp flags too, to pick up
1640 		 * any other cases that could be missed.
1641 		 */
1642 		if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1643 			VI_UNLOCK(vp);
1644 			continue;
1645 		}
1646 		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1647 			continue;
1648 #ifdef QUOTA
1649 		qsyncvp(vp);
1650 #endif
1651 		if (sync_doupdate(ip))
1652 			error = ffs_update(vp, 0);
1653 		if (error != 0)
1654 			allerror = error;
1655 		vput(vp);
1656 	}
1657 sbupdate:
1658 	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1659 	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1660 		allerror = error;
1661 	return (allerror);
1662 }
1663 
1664 /*
1665  * Go through the disk queues to initiate sandbagged IO;
1666  * go through the inodes to write those that have been modified;
1667  * initiate the writing of the super block if it has been modified.
1668  *
1669  * Note: we are always called with the filesystem marked busy using
1670  * vfs_busy().
1671  */
1672 static int
1673 ffs_sync(mp, waitfor)
1674 	struct mount *mp;
1675 	int waitfor;
1676 {
1677 	struct vnode *mvp, *vp, *devvp;
1678 	struct thread *td;
1679 	struct inode *ip;
1680 	struct ufsmount *ump = VFSTOUFS(mp);
1681 	struct fs *fs;
1682 	int error, count, lockreq, allerror = 0;
1683 	int suspend;
1684 	int suspended;
1685 	int secondary_writes;
1686 	int secondary_accwrites;
1687 	int softdep_deps;
1688 	int softdep_accdeps;
1689 	struct bufobj *bo;
1690 
1691 	suspend = 0;
1692 	suspended = 0;
1693 	td = curthread;
1694 	fs = ump->um_fs;
1695 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1696 		panic("%s: ffs_sync: modification on read-only filesystem",
1697 		    fs->fs_fsmnt);
1698 	if (waitfor == MNT_LAZY) {
1699 		if (!rebooting)
1700 			return (ffs_sync_lazy(mp));
1701 		waitfor = MNT_NOWAIT;
1702 	}
1703 
1704 	/*
1705 	 * Write back each (modified) inode.
1706 	 */
1707 	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1708 	if (waitfor == MNT_SUSPEND) {
1709 		suspend = 1;
1710 		waitfor = MNT_WAIT;
1711 	}
1712 	if (waitfor == MNT_WAIT)
1713 		lockreq = LK_EXCLUSIVE;
1714 	lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
1715 loop:
1716 	/* Grab snapshot of secondary write counts */
1717 	MNT_ILOCK(mp);
1718 	secondary_writes = mp->mnt_secondary_writes;
1719 	secondary_accwrites = mp->mnt_secondary_accwrites;
1720 	MNT_IUNLOCK(mp);
1721 
1722 	/* Grab snapshot of softdep dependency counts */
1723 	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1724 
1725 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1726 		/*
1727 		 * Depend on the vnode interlock to keep things stable enough
1728 		 * for a quick test.  Since there might be hundreds of
1729 		 * thousands of vnodes, we cannot afford even a subroutine
1730 		 * call unless there's a good chance that we have work to do.
1731 		 */
1732 		if (vp->v_type == VNON) {
1733 			VI_UNLOCK(vp);
1734 			continue;
1735 		}
1736 		ip = VTOI(vp);
1737 		if ((ip->i_flag &
1738 		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1739 		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1740 			VI_UNLOCK(vp);
1741 			continue;
1742 		}
1743 		if ((error = vget(vp, lockreq)) != 0) {
1744 			if (error == ENOENT || error == ENOLCK) {
1745 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1746 				goto loop;
1747 			}
1748 			continue;
1749 		}
1750 #ifdef QUOTA
1751 		qsyncvp(vp);
1752 #endif
1753 		for (;;) {
1754 			error = ffs_syncvnode(vp, waitfor, 0);
1755 			if (error == ERELOOKUP)
1756 				continue;
1757 			if (error != 0)
1758 				allerror = error;
1759 			break;
1760 		}
1761 		vput(vp);
1762 	}
1763 	/*
1764 	 * Force stale filesystem control information to be flushed.
1765 	 */
1766 	if (waitfor == MNT_WAIT || rebooting) {
1767 		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1768 			allerror = error;
1769 		if (ffs_fsfail_cleanup(ump, allerror))
1770 			allerror = 0;
1771 		/* Flushed work items may create new vnodes to clean */
1772 		if (allerror == 0 && count)
1773 			goto loop;
1774 	}
1775 
1776 	devvp = ump->um_devvp;
1777 	bo = &devvp->v_bufobj;
1778 	BO_LOCK(bo);
1779 	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1780 		BO_UNLOCK(bo);
1781 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1782 		error = VOP_FSYNC(devvp, waitfor, td);
1783 		VOP_UNLOCK(devvp);
1784 		if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1785 			error = ffs_sbupdate(ump, waitfor, 0);
1786 		if (error != 0)
1787 			allerror = error;
1788 		if (ffs_fsfail_cleanup(ump, allerror))
1789 			allerror = 0;
1790 		if (allerror == 0 && waitfor == MNT_WAIT)
1791 			goto loop;
1792 	} else if (suspend != 0) {
1793 		if (softdep_check_suspend(mp,
1794 					  devvp,
1795 					  softdep_deps,
1796 					  softdep_accdeps,
1797 					  secondary_writes,
1798 					  secondary_accwrites) != 0) {
1799 			MNT_IUNLOCK(mp);
1800 			goto loop;	/* More work needed */
1801 		}
1802 		mtx_assert(MNT_MTX(mp), MA_OWNED);
1803 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1804 		MNT_IUNLOCK(mp);
1805 		suspended = 1;
1806 	} else
1807 		BO_UNLOCK(bo);
1808 	/*
1809 	 * Write back modified superblock.
1810 	 */
1811 	if (fs->fs_fmod != 0 &&
1812 	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1813 		allerror = error;
1814 	if (ffs_fsfail_cleanup(ump, allerror))
1815 		allerror = 0;
1816 	return (allerror);
1817 }
1818 
1819 int
1820 ffs_vget(mp, ino, flags, vpp)
1821 	struct mount *mp;
1822 	ino_t ino;
1823 	int flags;
1824 	struct vnode **vpp;
1825 {
1826 	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1827 }
1828 
1829 int
1830 ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
1831 	struct mount *mp;
1832 	ino_t ino;
1833 	int flags;
1834 	struct vnode **vpp;
1835 	int ffs_flags;
1836 {
1837 	struct fs *fs;
1838 	struct inode *ip;
1839 	struct ufsmount *ump;
1840 	struct buf *bp;
1841 	struct vnode *vp;
1842 	daddr_t dbn;
1843 	int error;
1844 
1845 	MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1846 	    (flags & LK_EXCLUSIVE) != 0);
1847 
1848 	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1849 	if (error != 0)
1850 		return (error);
1851 	if (*vpp != NULL) {
1852 		if ((ffs_flags & FFSV_REPLACE) == 0 ||
1853 		    ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1854 		    !VN_IS_DOOMED(*vpp)))
1855 			return (0);
1856 		vgone(*vpp);
1857 		vput(*vpp);
1858 	}
1859 
1860 	/*
1861 	 * We must promote to an exclusive lock for vnode creation.  This
1862 	 * can happen if lookup is passed LOCKSHARED.
1863 	 */
1864 	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1865 		flags &= ~LK_TYPE_MASK;
1866 		flags |= LK_EXCLUSIVE;
1867 	}
1868 
1869 	/*
1870 	 * We do not lock vnode creation as it is believed to be too
1871 	 * expensive for such rare case as simultaneous creation of vnode
1872 	 * for same ino by different processes. We just allow them to race
1873 	 * and check later to decide who wins. Let the race begin!
1874 	 */
1875 
1876 	ump = VFSTOUFS(mp);
1877 	fs = ump->um_fs;
1878 	ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1879 
1880 	/* Allocate a new vnode/inode. */
1881 	error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1882 	    &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1883 	if (error) {
1884 		*vpp = NULL;
1885 		uma_zfree_smr(uma_inode, ip);
1886 		return (error);
1887 	}
1888 	/*
1889 	 * FFS supports recursive locking.
1890 	 */
1891 	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1892 	VN_LOCK_AREC(vp);
1893 	vp->v_data = ip;
1894 	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1895 	ip->i_vnode = vp;
1896 	ip->i_ump = ump;
1897 	ip->i_number = ino;
1898 	ip->i_ea_refs = 0;
1899 	ip->i_nextclustercg = -1;
1900 	ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1901 	ip->i_mode = 0; /* ensure error cases below throw away vnode */
1902 	cluster_init_vn(&ip->i_clusterw);
1903 #ifdef DIAGNOSTIC
1904 	ufs_init_trackers(ip);
1905 #endif
1906 #ifdef QUOTA
1907 	{
1908 		int i;
1909 		for (i = 0; i < MAXQUOTAS; i++)
1910 			ip->i_dquot[i] = NODQUOT;
1911 	}
1912 #endif
1913 
1914 	if (ffs_flags & FFSV_FORCEINSMQ)
1915 		vp->v_vflag |= VV_FORCEINSMQ;
1916 	error = insmntque(vp, mp);
1917 	if (error != 0) {
1918 		uma_zfree_smr(uma_inode, ip);
1919 		*vpp = NULL;
1920 		return (error);
1921 	}
1922 	vp->v_vflag &= ~VV_FORCEINSMQ;
1923 	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1924 	if (error != 0)
1925 		return (error);
1926 	if (*vpp != NULL) {
1927 		/*
1928 		 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1929 		 * operate on empty inode, which must not be found by
1930 		 * other threads until fully filled.  Vnode for empty
1931 		 * inode must be not re-inserted on the hash by other
1932 		 * thread, after removal by us at the beginning.
1933 		 */
1934 		MPASS((ffs_flags & FFSV_REPLACE) == 0);
1935 		return (0);
1936 	}
1937 
1938 	/* Read in the disk contents for the inode, copy into the inode. */
1939 	dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1940 	error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize,
1941 	    NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1942 	if (error != 0) {
1943 		/*
1944 		 * The inode does not contain anything useful, so it would
1945 		 * be misleading to leave it on its hash chain. With mode
1946 		 * still zero, it will be unlinked and returned to the free
1947 		 * list by vput().
1948 		 */
1949 		vgone(vp);
1950 		vput(vp);
1951 		*vpp = NULL;
1952 		return (error);
1953 	}
1954 	if (I_IS_UFS1(ip))
1955 		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1956 	else
1957 		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1958 	if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1959 		bqrelse(bp);
1960 		vgone(vp);
1961 		vput(vp);
1962 		*vpp = NULL;
1963 		return (error);
1964 	}
1965 	if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
1966 	    (ffs_flags & FFSV_FORCEINODEDEP) != 0))
1967 		softdep_load_inodeblock(ip);
1968 	else
1969 		ip->i_effnlink = ip->i_nlink;
1970 	bqrelse(bp);
1971 
1972 	/*
1973 	 * Initialize the vnode from the inode, check for aliases.
1974 	 * Note that the underlying vnode may have changed.
1975 	 */
1976 	error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
1977 	    &vp);
1978 	if (error) {
1979 		vgone(vp);
1980 		vput(vp);
1981 		*vpp = NULL;
1982 		return (error);
1983 	}
1984 
1985 	/*
1986 	 * Finish inode initialization.
1987 	 */
1988 	if (vp->v_type != VFIFO) {
1989 		/* FFS supports shared locking for all files except fifos. */
1990 		VN_LOCK_ASHARE(vp);
1991 	}
1992 
1993 	/*
1994 	 * Set up a generation number for this inode if it does not
1995 	 * already have one. This should only happen on old filesystems.
1996 	 */
1997 	if (ip->i_gen == 0) {
1998 		while (ip->i_gen == 0)
1999 			ip->i_gen = arc4random();
2000 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
2001 			UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
2002 			DIP_SET(ip, i_gen, ip->i_gen);
2003 		}
2004 	}
2005 #ifdef MAC
2006 	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
2007 		/*
2008 		 * If this vnode is already allocated, and we're running
2009 		 * multi-label, attempt to perform a label association
2010 		 * from the extended attributes on the inode.
2011 		 */
2012 		error = mac_vnode_associate_extattr(mp, vp);
2013 		if (error) {
2014 			/* ufs_inactive will release ip->i_devvp ref. */
2015 			vgone(vp);
2016 			vput(vp);
2017 			*vpp = NULL;
2018 			return (error);
2019 		}
2020 	}
2021 #endif
2022 
2023 	*vpp = vp;
2024 	return (0);
2025 }
2026 
2027 /*
2028  * File handle to vnode
2029  *
2030  * Have to be really careful about stale file handles:
2031  * - check that the inode number is valid
2032  * - for UFS2 check that the inode number is initialized
2033  * - call ffs_vget() to get the locked inode
2034  * - check for an unallocated inode (i_mode == 0)
2035  * - check that the given client host has export rights and return
2036  *   those rights via. exflagsp and credanonp
2037  */
2038 static int
2039 ffs_fhtovp(mp, fhp, flags, vpp)
2040 	struct mount *mp;
2041 	struct fid *fhp;
2042 	int flags;
2043 	struct vnode **vpp;
2044 {
2045 	struct ufid *ufhp;
2046 
2047 	ufhp = (struct ufid *)fhp;
2048 	return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
2049 	    vpp, 0));
2050 }
2051 
2052 int
2053 ffs_inotovp(mp, ino, gen, lflags, vpp, ffs_flags)
2054 	struct mount *mp;
2055 	ino_t ino;
2056 	u_int64_t gen;
2057 	int lflags;
2058 	struct vnode **vpp;
2059 	int ffs_flags;
2060 {
2061 	struct ufsmount *ump;
2062 	struct vnode *nvp;
2063 	struct inode *ip;
2064 	struct fs *fs;
2065 	struct cg *cgp;
2066 	struct buf *bp;
2067 	u_int cg;
2068 	int error;
2069 
2070 	ump = VFSTOUFS(mp);
2071 	fs = ump->um_fs;
2072 	*vpp = NULL;
2073 
2074 	if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
2075 		return (ESTALE);
2076 
2077 	/*
2078 	 * Need to check if inode is initialized because UFS2 does lazy
2079 	 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
2080 	 */
2081 	if (fs->fs_magic == FS_UFS2_MAGIC) {
2082 		cg = ino_to_cg(fs, ino);
2083 		error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp);
2084 		if (error != 0)
2085 			return (error);
2086 		if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
2087 			brelse(bp);
2088 			return (ESTALE);
2089 		}
2090 		brelse(bp);
2091 	}
2092 
2093 	error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags);
2094 	if (error != 0)
2095 		return (error);
2096 
2097 	ip = VTOI(nvp);
2098 	if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
2099 		if (ip->i_mode == 0)
2100 			vgone(nvp);
2101 		vput(nvp);
2102 		return (ESTALE);
2103 	}
2104 
2105 	vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
2106 	*vpp = nvp;
2107 	return (0);
2108 }
2109 
2110 /*
2111  * Initialize the filesystem.
2112  */
2113 static int
2114 ffs_init(vfsp)
2115 	struct vfsconf *vfsp;
2116 {
2117 
2118 	ffs_susp_initialize();
2119 	softdep_initialize();
2120 	return (ufs_init(vfsp));
2121 }
2122 
2123 /*
2124  * Undo the work of ffs_init().
2125  */
2126 static int
2127 ffs_uninit(vfsp)
2128 	struct vfsconf *vfsp;
2129 {
2130 	int ret;
2131 
2132 	ret = ufs_uninit(vfsp);
2133 	softdep_uninitialize();
2134 	ffs_susp_uninitialize();
2135 	taskqueue_drain_all(taskqueue_thread);
2136 	return (ret);
2137 }
2138 
2139 /*
2140  * Structure used to pass information from ffs_sbupdate to its
2141  * helper routine ffs_use_bwrite.
2142  */
2143 struct devfd {
2144 	struct ufsmount	*ump;
2145 	struct buf	*sbbp;
2146 	int		 waitfor;
2147 	int		 suspended;
2148 	int		 error;
2149 };
2150 
2151 /*
2152  * Write a superblock and associated information back to disk.
2153  */
2154 int
2155 ffs_sbupdate(ump, waitfor, suspended)
2156 	struct ufsmount *ump;
2157 	int waitfor;
2158 	int suspended;
2159 {
2160 	struct fs *fs;
2161 	struct buf *sbbp;
2162 	struct devfd devfd;
2163 
2164 	fs = ump->um_fs;
2165 	if (fs->fs_ronly == 1 &&
2166 	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2167 	    (MNT_RDONLY | MNT_UPDATE))
2168 		panic("ffs_sbupdate: write read-only filesystem");
2169 	/*
2170 	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2171 	 */
2172 	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2173 	    (int)fs->fs_sbsize, 0, 0, 0);
2174 	/*
2175 	 * Initialize info needed for write function.
2176 	 */
2177 	devfd.ump = ump;
2178 	devfd.sbbp = sbbp;
2179 	devfd.waitfor = waitfor;
2180 	devfd.suspended = suspended;
2181 	devfd.error = 0;
2182 	return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2183 }
2184 
2185 /*
2186  * Write function for use by filesystem-layer routines.
2187  */
2188 static int
2189 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2190 {
2191 	struct devfd *devfdp;
2192 	struct ufsmount *ump;
2193 	struct buf *bp;
2194 	struct fs *fs;
2195 	int error;
2196 
2197 	devfdp = devfd;
2198 	ump = devfdp->ump;
2199 	fs = ump->um_fs;
2200 	/*
2201 	 * Writing the superblock summary information.
2202 	 */
2203 	if (loc != fs->fs_sblockloc) {
2204 		bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2205 		bcopy(buf, bp->b_data, (u_int)size);
2206 		if (devfdp->suspended)
2207 			bp->b_flags |= B_VALIDSUSPWRT;
2208 		if (devfdp->waitfor != MNT_WAIT)
2209 			bawrite(bp);
2210 		else if ((error = bwrite(bp)) != 0)
2211 			devfdp->error = error;
2212 		return (0);
2213 	}
2214 	/*
2215 	 * Writing the superblock itself. We need to do special checks for it.
2216 	 */
2217 	bp = devfdp->sbbp;
2218 	if (ffs_fsfail_cleanup(ump, devfdp->error))
2219 		devfdp->error = 0;
2220 	if (devfdp->error != 0) {
2221 		brelse(bp);
2222 		return (devfdp->error);
2223 	}
2224 	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2225 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2226 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2227 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2228 		fs->fs_sblockloc = SBLOCK_UFS1;
2229 	}
2230 	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2231 	    (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2232 		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2233 		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2234 		fs->fs_sblockloc = SBLOCK_UFS2;
2235 	}
2236 	if (MOUNTEDSOFTDEP(ump->um_mountp))
2237 		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2238 	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
2239 	fs = (struct fs *)bp->b_data;
2240 	ffs_oldfscompat_write(fs, ump);
2241 	fs->fs_si = NULL;
2242 	/* Recalculate the superblock hash */
2243 	fs->fs_ckhash = ffs_calc_sbhash(fs);
2244 	if (devfdp->suspended)
2245 		bp->b_flags |= B_VALIDSUSPWRT;
2246 	if (devfdp->waitfor != MNT_WAIT)
2247 		bawrite(bp);
2248 	else if ((error = bwrite(bp)) != 0)
2249 		devfdp->error = error;
2250 	return (devfdp->error);
2251 }
2252 
2253 static int
2254 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2255 	int attrnamespace, const char *attrname)
2256 {
2257 
2258 #ifdef UFS_EXTATTR
2259 	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2260 	    attrname));
2261 #else
2262 	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2263 	    attrname));
2264 #endif
2265 }
2266 
2267 static void
2268 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2269 {
2270 
2271 	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2272 		uma_zfree(uma_ufs1, ip->i_din1);
2273 	else if (ip->i_din2 != NULL)
2274 		uma_zfree(uma_ufs2, ip->i_din2);
2275 	uma_zfree_smr(uma_inode, ip);
2276 }
2277 
2278 static int dobkgrdwrite = 1;
2279 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2280     "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2281 
2282 /*
2283  * Complete a background write started from bwrite.
2284  */
2285 static void
2286 ffs_backgroundwritedone(struct buf *bp)
2287 {
2288 	struct bufobj *bufobj;
2289 	struct buf *origbp;
2290 
2291 #ifdef SOFTUPDATES
2292 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2293 		softdep_handle_error(bp);
2294 #endif
2295 
2296 	/*
2297 	 * Find the original buffer that we are writing.
2298 	 */
2299 	bufobj = bp->b_bufobj;
2300 	BO_LOCK(bufobj);
2301 	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2302 		panic("backgroundwritedone: lost buffer");
2303 
2304 	/*
2305 	 * We should mark the cylinder group buffer origbp as
2306 	 * dirty, to not lose the failed write.
2307 	 */
2308 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2309 		origbp->b_vflags |= BV_BKGRDERR;
2310 	BO_UNLOCK(bufobj);
2311 	/*
2312 	 * Process dependencies then return any unfinished ones.
2313 	 */
2314 	if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2315 		buf_complete(bp);
2316 #ifdef SOFTUPDATES
2317 	if (!LIST_EMPTY(&bp->b_dep))
2318 		softdep_move_dependencies(bp, origbp);
2319 #endif
2320 	/*
2321 	 * This buffer is marked B_NOCACHE so when it is released
2322 	 * by biodone it will be tossed.  Clear B_IOSTARTED in case of error.
2323 	 */
2324 	bp->b_flags |= B_NOCACHE;
2325 	bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2326 	pbrelvp(bp);
2327 
2328 	/*
2329 	 * Prevent brelse() from trying to keep and re-dirtying bp on
2330 	 * errors. It causes b_bufobj dereference in
2331 	 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2332 	 * pbrelvp() above.
2333 	 */
2334 	if ((bp->b_ioflags & BIO_ERROR) != 0)
2335 		bp->b_flags |= B_INVAL;
2336 	bufdone(bp);
2337 	BO_LOCK(bufobj);
2338 	/*
2339 	 * Clear the BV_BKGRDINPROG flag in the original buffer
2340 	 * and awaken it if it is waiting for the write to complete.
2341 	 * If BV_BKGRDINPROG is not set in the original buffer it must
2342 	 * have been released and re-instantiated - which is not legal.
2343 	 */
2344 	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2345 	    ("backgroundwritedone: lost buffer2"));
2346 	origbp->b_vflags &= ~BV_BKGRDINPROG;
2347 	if (origbp->b_vflags & BV_BKGRDWAIT) {
2348 		origbp->b_vflags &= ~BV_BKGRDWAIT;
2349 		wakeup(&origbp->b_xflags);
2350 	}
2351 	BO_UNLOCK(bufobj);
2352 }
2353 
2354 /*
2355  * Write, release buffer on completion.  (Done by iodone
2356  * if async).  Do not bother writing anything if the buffer
2357  * is invalid.
2358  *
2359  * Note that we set B_CACHE here, indicating that buffer is
2360  * fully valid and thus cacheable.  This is true even of NFS
2361  * now so we set it generally.  This could be set either here
2362  * or in biodone() since the I/O is synchronous.  We put it
2363  * here.
2364  */
2365 static int
2366 ffs_bufwrite(struct buf *bp)
2367 {
2368 	struct buf *newbp;
2369 	struct cg *cgp;
2370 
2371 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2372 	if (bp->b_flags & B_INVAL) {
2373 		brelse(bp);
2374 		return (0);
2375 	}
2376 
2377 	if (!BUF_ISLOCKED(bp))
2378 		panic("bufwrite: buffer is not busy???");
2379 	/*
2380 	 * If a background write is already in progress, delay
2381 	 * writing this block if it is asynchronous. Otherwise
2382 	 * wait for the background write to complete.
2383 	 */
2384 	BO_LOCK(bp->b_bufobj);
2385 	if (bp->b_vflags & BV_BKGRDINPROG) {
2386 		if (bp->b_flags & B_ASYNC) {
2387 			BO_UNLOCK(bp->b_bufobj);
2388 			bdwrite(bp);
2389 			return (0);
2390 		}
2391 		bp->b_vflags |= BV_BKGRDWAIT;
2392 		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2393 		    "bwrbg", 0);
2394 		if (bp->b_vflags & BV_BKGRDINPROG)
2395 			panic("bufwrite: still writing");
2396 	}
2397 	bp->b_vflags &= ~BV_BKGRDERR;
2398 	BO_UNLOCK(bp->b_bufobj);
2399 
2400 	/*
2401 	 * If this buffer is marked for background writing and we
2402 	 * do not have to wait for it, make a copy and write the
2403 	 * copy so as to leave this buffer ready for further use.
2404 	 *
2405 	 * This optimization eats a lot of memory.  If we have a page
2406 	 * or buffer shortfall we can't do it.
2407 	 */
2408 	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2409 	    (bp->b_flags & B_ASYNC) &&
2410 	    !vm_page_count_severe() &&
2411 	    !buf_dirty_count_severe()) {
2412 		KASSERT(bp->b_iodone == NULL,
2413 		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2414 
2415 		/* get a new block */
2416 		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2417 		if (newbp == NULL)
2418 			goto normal_write;
2419 
2420 		KASSERT(buf_mapped(bp), ("Unmapped cg"));
2421 		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2422 		BO_LOCK(bp->b_bufobj);
2423 		bp->b_vflags |= BV_BKGRDINPROG;
2424 		BO_UNLOCK(bp->b_bufobj);
2425 		newbp->b_xflags |=
2426 		    (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2427 		newbp->b_lblkno = bp->b_lblkno;
2428 		newbp->b_blkno = bp->b_blkno;
2429 		newbp->b_offset = bp->b_offset;
2430 		newbp->b_iodone = ffs_backgroundwritedone;
2431 		newbp->b_flags |= B_ASYNC;
2432 		newbp->b_flags &= ~B_INVAL;
2433 		pbgetvp(bp->b_vp, newbp);
2434 
2435 #ifdef SOFTUPDATES
2436 		/*
2437 		 * Move over the dependencies.  If there are rollbacks,
2438 		 * leave the parent buffer dirtied as it will need to
2439 		 * be written again.
2440 		 */
2441 		if (LIST_EMPTY(&bp->b_dep) ||
2442 		    softdep_move_dependencies(bp, newbp) == 0)
2443 			bundirty(bp);
2444 #else
2445 		bundirty(bp);
2446 #endif
2447 
2448 		/*
2449 		 * Initiate write on the copy, release the original.  The
2450 		 * BKGRDINPROG flag prevents it from going away until
2451 		 * the background write completes. We have to recalculate
2452 		 * its check hash in case the buffer gets freed and then
2453 		 * reconstituted from the buffer cache during a later read.
2454 		 */
2455 		if ((bp->b_xflags & BX_CYLGRP) != 0) {
2456 			cgp = (struct cg *)bp->b_data;
2457 			cgp->cg_ckhash = 0;
2458 			cgp->cg_ckhash =
2459 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2460 		}
2461 		bqrelse(bp);
2462 		bp = newbp;
2463 	} else
2464 		/* Mark the buffer clean */
2465 		bundirty(bp);
2466 
2467 	/* Let the normal bufwrite do the rest for us */
2468 normal_write:
2469 	/*
2470 	 * If we are writing a cylinder group, update its time.
2471 	 */
2472 	if ((bp->b_xflags & BX_CYLGRP) != 0) {
2473 		cgp = (struct cg *)bp->b_data;
2474 		cgp->cg_old_time = cgp->cg_time = time_second;
2475 	}
2476 	return (bufwrite(bp));
2477 }
2478 
2479 static void
2480 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2481 {
2482 	struct vnode *vp;
2483 	struct buf *tbp;
2484 	int error, nocopy;
2485 
2486 	/*
2487 	 * This is the bufobj strategy for the private VCHR vnodes
2488 	 * used by FFS to access the underlying storage device.
2489 	 * We override the default bufobj strategy and thus bypass
2490 	 * VOP_STRATEGY() for these vnodes.
2491 	 */
2492 	vp = bo2vnode(bo);
2493 	KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2494 	    bp->b_vp->v_rdev == NULL ||
2495 	    bp->b_vp->v_rdev->si_mountpt == NULL ||
2496 	    VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2497 	    vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2498 	    ("ffs_geom_strategy() with wrong vp"));
2499 	if (bp->b_iocmd == BIO_WRITE) {
2500 		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2501 		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2502 		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2503 			panic("ffs_geom_strategy: bad I/O");
2504 		nocopy = bp->b_flags & B_NOCOPY;
2505 		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2506 		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2507 		    vp->v_rdev->si_snapdata != NULL) {
2508 			if ((bp->b_flags & B_CLUSTER) != 0) {
2509 				runningbufwakeup(bp);
2510 				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2511 					      b_cluster.cluster_entry) {
2512 					error = ffs_copyonwrite(vp, tbp);
2513 					if (error != 0 &&
2514 					    error != EOPNOTSUPP) {
2515 						bp->b_error = error;
2516 						bp->b_ioflags |= BIO_ERROR;
2517 						bp->b_flags &= ~B_BARRIER;
2518 						bufdone(bp);
2519 						return;
2520 					}
2521 				}
2522 				bp->b_runningbufspace = bp->b_bufsize;
2523 				atomic_add_long(&runningbufspace,
2524 					       bp->b_runningbufspace);
2525 			} else {
2526 				error = ffs_copyonwrite(vp, bp);
2527 				if (error != 0 && error != EOPNOTSUPP) {
2528 					bp->b_error = error;
2529 					bp->b_ioflags |= BIO_ERROR;
2530 					bp->b_flags &= ~B_BARRIER;
2531 					bufdone(bp);
2532 					return;
2533 				}
2534 			}
2535 		}
2536 #ifdef SOFTUPDATES
2537 		if ((bp->b_flags & B_CLUSTER) != 0) {
2538 			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2539 				      b_cluster.cluster_entry) {
2540 				if (!LIST_EMPTY(&tbp->b_dep))
2541 					buf_start(tbp);
2542 			}
2543 		} else {
2544 			if (!LIST_EMPTY(&bp->b_dep))
2545 				buf_start(bp);
2546 		}
2547 
2548 #endif
2549 		/*
2550 		 * Check for metadata that needs check-hashes and update them.
2551 		 */
2552 		switch (bp->b_xflags & BX_FSPRIV) {
2553 		case BX_CYLGRP:
2554 			((struct cg *)bp->b_data)->cg_ckhash = 0;
2555 			((struct cg *)bp->b_data)->cg_ckhash =
2556 			    calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2557 			break;
2558 
2559 		case BX_SUPERBLOCK:
2560 		case BX_INODE:
2561 		case BX_INDIR:
2562 		case BX_DIR:
2563 			printf("Check-hash write is unimplemented!!!\n");
2564 			break;
2565 
2566 		case 0:
2567 			break;
2568 
2569 		default:
2570 			printf("multiple buffer types 0x%b\n",
2571 			    (u_int)(bp->b_xflags & BX_FSPRIV),
2572 			    PRINT_UFS_BUF_XFLAGS);
2573 			break;
2574 		}
2575 	}
2576 	if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2577 		bp->b_xflags |= BX_CVTENXIO;
2578 	g_vfs_strategy(bo, bp);
2579 }
2580 
2581 int
2582 ffs_own_mount(const struct mount *mp)
2583 {
2584 
2585 	if (mp->mnt_op == &ufs_vfsops)
2586 		return (1);
2587 	return (0);
2588 }
2589 
2590 #ifdef	DDB
2591 #ifdef SOFTUPDATES
2592 
2593 /* defined in ffs_softdep.c */
2594 extern void db_print_ffs(struct ufsmount *ump);
2595 
2596 DB_SHOW_COMMAND(ffs, db_show_ffs)
2597 {
2598 	struct mount *mp;
2599 	struct ufsmount *ump;
2600 
2601 	if (have_addr) {
2602 		ump = VFSTOUFS((struct mount *)addr);
2603 		db_print_ffs(ump);
2604 		return;
2605 	}
2606 
2607 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2608 		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2609 			db_print_ffs(VFSTOUFS(mp));
2610 	}
2611 }
2612 
2613 #endif	/* SOFTUPDATES */
2614 #endif	/* DDB */
2615