Lines Matching refs:mp

135 static void mount_devctl_event(const char *type, struct mount *mp, bool donew);
154 struct mount *mp;
156 mp = (struct mount *)mem;
157 mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
158 mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF);
159 lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
160 mp->mnt_pcpu = uma_zalloc_pcpu(pcpu_zone_16, M_WAITOK | M_ZERO);
161 mp->mnt_ref = 0;
162 mp->mnt_vfs_ops = 1;
163 mp->mnt_rootvnode = NULL;
170 struct mount *mp;
172 mp = (struct mount *)mem;
173 uma_zfree_pcpu(pcpu_zone_16, mp->mnt_pcpu);
174 lockdestroy(&mp->mnt_explock);
175 mtx_destroy(&mp->mnt_listmtx);
176 mtx_destroy(&mp->mnt_mtx);
495 struct mount *mp;
498 mp = atomic_load_ptr(&vp->v_mount);
499 if (__predict_false(mp == NULL)) {
500 return (mp);
502 if (vfs_op_thread_enter(mp, mpcpu)) {
503 if (__predict_true(mp == vp->v_mount)) {
505 vfs_op_thread_exit(mp, mpcpu);
507 vfs_op_thread_exit(mp, mpcpu);
508 mp = NULL;
511 MNT_ILOCK(mp);
512 if (mp == vp->v_mount) {
513 MNT_REF(mp);
514 MNT_IUNLOCK(mp);
516 MNT_IUNLOCK(mp);
517 mp = NULL;
520 return (mp);
524 vfs_ref(struct mount *mp)
528 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
529 if (vfs_op_thread_enter(mp, mpcpu)) {
531 vfs_op_thread_exit(mp, mpcpu);
535 MNT_ILOCK(mp);
536 MNT_REF(mp);
537 MNT_IUNLOCK(mp);
544 * caller and stored in per-mount data associated with mp.
556 struct mount *mp;
558 mp = atomic_load_ptr(&vp->v_mount);
559 if (mp == NULL)
561 MNT_ILOCK(mp);
562 if (mp != vp->v_mount ||
563 ((mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_RECURSE)) != 0)) {
564 MNT_IUNLOCK(mp);
567 KASSERT(ump != mp, ("upper and lower mounts are identical"));
568 upper->mp = ump;
569 MNT_REF(mp);
570 TAILQ_INSERT_TAIL(&mp->mnt_uppers, upper, mnt_upper_link);
571 MNT_IUNLOCK(mp);
572 return (mp);
577 * notifications from lower mount mp. This registration will
580 * associated with mp.
582 * ump must already be registered as an upper mount of mp
586 vfs_register_for_notification(struct mount *mp, struct mount *ump,
589 upper->mp = ump;
590 MNT_ILOCK(mp);
591 TAILQ_INSERT_TAIL(&mp->mnt_notify, upper, mnt_upper_link);
592 MNT_IUNLOCK(mp);
596 vfs_drain_upper_locked(struct mount *mp)
598 mtx_assert(MNT_MTX(mp), MA_OWNED);
599 while (mp->mnt_upper_pending != 0) {
600 mp->mnt_kern_flag |= MNTK_UPPER_WAITER;
601 msleep(&mp->mnt_uppers, MNT_MTX(mp), 0, "mntupw", 0);
608 * as an upper mount for mp.
611 vfs_unregister_for_notification(struct mount *mp,
614 MNT_ILOCK(mp);
615 vfs_drain_upper_locked(mp);
616 TAILQ_REMOVE(&mp->mnt_notify, upper, mnt_upper_link);
617 MNT_IUNLOCK(mp);
622 * This must be done before mp can be unmounted.
625 vfs_unregister_upper(struct mount *mp, struct mount_upper_node *upper)
627 MNT_ILOCK(mp);
628 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0,
630 vfs_drain_upper_locked(mp);
631 TAILQ_REMOVE(&mp->mnt_uppers, upper, mnt_upper_link);
632 if ((mp->mnt_kern_flag & MNTK_TASKQUEUE_WAITER) != 0 &&
633 TAILQ_EMPTY(&mp->mnt_uppers)) {
634 mp->mnt_kern_flag &= ~MNTK_TASKQUEUE_WAITER;
635 wakeup(&mp->mnt_taskqueue_link);
637 MNT_REL(mp);
638 MNT_IUNLOCK(mp);
642 vfs_rel(struct mount *mp)
646 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
647 if (vfs_op_thread_enter(mp, mpcpu)) {
649 vfs_op_thread_exit(mp, mpcpu);
653 MNT_ILOCK(mp);
654 MNT_REL(mp);
655 MNT_IUNLOCK(mp);
665 struct mount *mp;
667 mp = uma_zalloc(mount_zone, M_WAITOK);
668 bzero(&mp->mnt_startzero,
670 mp->mnt_kern_flag = 0;
671 mp->mnt_flag = 0;
672 mp->mnt_rootvnode = NULL;
673 mp->mnt_vnodecovered = NULL;
674 mp->mnt_op = NULL;
675 mp->mnt_vfc = NULL;
676 TAILQ_INIT(&mp->mnt_nvnodelist);
677 mp->mnt_nvnodelistsize = 0;
678 TAILQ_INIT(&mp->mnt_lazyvnodelist);
679 mp->mnt_lazyvnodelistsize = 0;
680 MPPASS(mp->mnt_ref == 0 && mp->mnt_lockref == 0 &&
681 mp->mnt_writeopcount == 0, mp);
682 MPASSERT(mp->mnt_vfs_ops == 1, mp,
683 ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops));
684 (void) vfs_busy(mp, MBF_NOWAIT);
686 mp->mnt_op = vfsp->vfc_vfsops;
687 mp->mnt_vfc = vfsp;
688 mp->mnt_stat.f_type = vfsp->vfc_typenum;
689 mp->mnt_gen++;
690 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
691 mp->mnt_vnodecovered = vp;
692 mp->mnt_cred = crdup(cred);
693 mp->mnt_stat.f_owner = cred->cr_uid;
694 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
695 mp->mnt_iosize_max = DFLTPHYS;
697 mac_mount_init(mp);
698 mac_mount_create(cred, mp);
700 arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0);
701 mp->mnt_upper_pending = 0;
702 TAILQ_INIT(&mp->mnt_uppers);
703 TAILQ_INIT(&mp->mnt_notify);
704 mp->mnt_taskqueue_flags = 0;
705 mp->mnt_unmount_retries = 0;
706 return (mp);
713 vfs_mount_destroy(struct mount *mp)
716 MPPASS(mp->mnt_vfs_ops != 0, mp);
718 vfs_assert_mount_counters(mp);
720 MNT_ILOCK(mp);
721 mp->mnt_kern_flag |= MNTK_REFEXPIRE;
722 if (mp->mnt_kern_flag & MNTK_MWAIT) {
723 mp->mnt_kern_flag &= ~MNTK_MWAIT;
724 wakeup(mp);
726 while (mp->mnt_ref)
727 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0);
728 KASSERT(mp->mnt_ref == 0,
731 MPPASS(mp->mnt_writeopcount == 0, mp);
732 MPPASS(mp->mnt_secondary_writes == 0, mp);
733 atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
734 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
737 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
741 KASSERT(mp->mnt_upper_pending == 0, ("mnt_upper_pending"));
742 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers"));
743 KASSERT(TAILQ_EMPTY(&mp->mnt_notify), ("mnt_notify"));
744 MPPASS(mp->mnt_nvnodelistsize == 0, mp);
745 MPPASS(mp->mnt_lazyvnodelistsize == 0, mp);
746 MPPASS(mp->mnt_lockref == 0, mp);
747 MNT_IUNLOCK(mp);
749 MPASSERT(mp->mnt_vfs_ops == 1, mp,
750 ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops));
752 MPASSERT(mp->mnt_rootvnode == NULL, mp,
753 ("mount point still has a root vnode %p", mp->mnt_rootvnode));
755 if (mp->mnt_vnodecovered != NULL)
756 vrele(mp->mnt_vnodecovered);
758 mac_mount_destroy(mp);
760 if (mp->mnt_opt != NULL)
761 vfs_freeopts(mp->mnt_opt);
762 if (mp->mnt_exjail != NULL) {
763 atomic_subtract_int(&mp->mnt_exjail->cr_prison->pr_exportcnt,
765 crfree(mp->mnt_exjail);
767 if (mp->mnt_export != NULL) {
768 vfs_free_addrlist(mp->mnt_export);
769 free(mp->mnt_export, M_MOUNT);
771 crfree(mp->mnt_cred);
772 uma_zfree(mount_zone, mp);
981 * variables will fit in our mp buffers, including the
1120 struct mount *mp;
1177 mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
1179 mp->mnt_optnew = *optlist;
1181 mp->mnt_flag = (fsflags &
1191 if ((error = VFS_MOUNT(mp)) != 0 ||
1192 (error1 = VFS_STATFS(mp, &mp->mnt_stat)) != 0 ||
1193 (error1 = VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) != 0) {
1197 rootvp = vfs_cache_root_clear(mp);
1202 (void)vn_start_write(NULL, &mp, V_WAIT);
1203 MNT_ILOCK(mp);
1204 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_UNMOUNTF;
1205 MNT_IUNLOCK(mp);
1206 VFS_PURGE(mp);
1207 error = VFS_UNMOUNT(mp, 0);
1208 vn_finished_write(mp);
1217 vfs_unbusy(mp);
1218 mp->mnt_vnodecovered = NULL;
1221 vfs_mount_destroy(mp);
1237 if (mp->mnt_opt != NULL)
1238 vfs_freeopts(mp->mnt_opt);
1239 mp->mnt_opt = mp->mnt_optnew;
1245 mp->mnt_optnew = NULL;
1247 MNT_ILOCK(mp);
1248 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1249 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1250 mp->mnt_kern_flag |= MNTK_ASYNC;
1252 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1253 MNT_IUNLOCK(mp);
1262 vp->v_mountedhere = mp;
1280 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
1284 EVENTHANDLER_DIRECT_INVOKE(vfs_mounted, mp, newdp, td);
1286 mount_devctl_event("MOUNT", mp, false);
1291 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1292 vfs_allocate_syncvnode(mp);
1293 vfs_op_exit(mp);
1294 vfs_unbusy(mp);
1314 struct mount *mp;
1323 mp = vp->v_mount;
1339 flag = mp->mnt_flag;
1357 error = vfs_suser(mp, td);
1366 if (vfs_busy(mp, MBF_NOWAIT)) {
1373 vfs_unbusy(mp);
1382 vfs_op_enter(mp);
1391 if (fsidcmp(fsid_up, &mp->mnt_stat.f_fsid) != 0) {
1399 MNT_ILOCK(mp);
1400 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
1401 MNT_IUNLOCK(mp);
1417 mp->mnt_flag |= MNT_UPDATE;
1419 mp->mnt_flag &= ~MNT_UPDATEMASK;
1420 if ((mp->mnt_flag & MNT_UNION) == 0 &&
1425 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE |
1427 if ((mp->mnt_flag & MNT_ASYNC) == 0)
1428 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1430 rootvp = vfs_cache_root_clear(mp);
1431 MNT_IUNLOCK(mp);
1432 mp->mnt_optnew = *optlist;
1433 vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt);
1450 error = VFS_MOUNT(mp);
1454 if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp,
1493 export_error = vfs_export(mp, &export, true);
1515 export_error = vfs_export(mp, &export, true);
1524 MNT_ILOCK(mp);
1526 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE |
1528 mp->mnt_flag |= mnt_union;
1537 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA);
1539 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1540 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1541 mp->mnt_kern_flag |= MNTK_ASYNC;
1543 mp->mnt_kern_flag &= ~MNTK_ASYNC;
1544 MNT_IUNLOCK(mp);
1549 mount_devctl_event("REMOUNT", mp, true);
1550 if (mp->mnt_opt != NULL)
1551 vfs_freeopts(mp->mnt_opt);
1552 mp->mnt_opt = mp->mnt_optnew;
1554 (void)VFS_STATFS(mp, &mp->mnt_stat);
1559 mp->mnt_optnew = NULL;
1561 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1562 vfs_allocate_syncvnode(mp);
1564 vfs_deallocate_syncvnode(mp);
1566 vfs_op_exit(mp);
1572 vfs_unbusy(mp);
1601 * variables will fit in our mp buffers, including the
1726 struct mount *mp;
1753 mp = vfs_getvfs(&fsid);
1755 if (mp == NULL) {
1779 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1780 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) {
1781 vfs_ref(mp);
1787 if (mp == NULL) {
1801 if (mp->mnt_flag & MNT_ROOTFS) {
1802 vfs_rel(mp);
1805 error = dounmount(mp, flags, td);
1817 vfs_check_usecounts(struct mount *mp)
1821 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1825 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1835 dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
1838 mtx_assert(MNT_MTX(mp), MA_OWNED);
1839 mp->mnt_kern_flag &= ~mntkflags;
1840 if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) {
1841 mp->mnt_kern_flag &= ~MNTK_MWAIT;
1842 wakeup(mp);
1844 vfs_op_exit_locked(mp);
1845 MNT_IUNLOCK(mp);
1850 vn_finished_write(mp);
1851 vfs_rel(mp);
1862 vfs_op_enter(struct mount *mp)
1867 MNT_ILOCK(mp);
1868 mp->mnt_vfs_ops++;
1869 if (mp->mnt_vfs_ops > 1) {
1870 MNT_IUNLOCK(mp);
1873 vfs_op_barrier_wait(mp);
1875 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1877 mp->mnt_ref += mpcpu->mntp_ref;
1880 mp->mnt_lockref += mpcpu->mntp_lockref;
1883 mp->mnt_writeopcount += mpcpu->mntp_writeopcount;
1886 MPASSERT(mp->mnt_ref > 0 && mp->mnt_lockref >= 0 &&
1887 mp->mnt_writeopcount >= 0, mp,
1889 mp->mnt_ref, mp->mnt_lockref, mp->mnt_writeopcount));
1890 MNT_IUNLOCK(mp);
1891 vfs_assert_mount_counters(mp);
1895 vfs_op_exit_locked(struct mount *mp)
1898 mtx_assert(MNT_MTX(mp), MA_OWNED);
1900 MPASSERT(mp->mnt_vfs_ops > 0, mp,
1901 ("invalid vfs_ops count %d", mp->mnt_vfs_ops));
1902 MPASSERT(mp->mnt_vfs_ops > 1 ||
1903 (mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_SUSPEND)) == 0, mp,
1904 ("vfs_ops too low %d in unmount or suspend", mp->mnt_vfs_ops));
1905 mp->mnt_vfs_ops--;
1909 vfs_op_exit(struct mount *mp)
1912 MNT_ILOCK(mp);
1913 vfs_op_exit_locked(mp);
1914 MNT_IUNLOCK(mp);
1918 struct mount *mp;
1926 struct mount *mp;
1929 mp = vfsopipi->mp;
1931 if (!vfs_op_thread_entered(mp))
1939 struct mount *mp;
1943 mp = vfsopipi->mp;
1945 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1951 vfs_op_barrier_wait(struct mount *mp)
1955 vfsopipi.mp = mp;
1967 vfs_assert_mount_counters(struct mount *mp)
1972 if (mp->mnt_vfs_ops == 0)
1976 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
1980 vfs_dump_mount_counters(mp);
1985 vfs_dump_mount_counters(struct mount *mp)
1991 printf("%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops);
1994 ref = mp->mnt_ref;
1996 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
2002 lockref = mp->mnt_lockref;
2004 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
2010 writeopcount = mp->mnt_writeopcount;
2012 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
2019 printf("ref %-5d %-5d\n", mp->mnt_ref, ref);
2020 printf("lockref %-5d %-5d\n", mp->mnt_lockref, lockref);
2021 printf("writeopcount %-5d %-5d\n", mp->mnt_writeopcount, writeopcount);
2028 vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which)
2035 sum = mp->mnt_ref;
2038 sum = mp->mnt_lockref;
2041 sum = mp->mnt_writeopcount;
2046 mpcpu = vfs_mount_pcpu_remote(mp, cpu);
2063 deferred_unmount_enqueue(struct mount *mp, uint64_t flags, bool requeue,
2070 if ((mp->mnt_taskqueue_flags & MNT_DEFERRED) == 0 || requeue) {
2071 mp->mnt_taskqueue_flags = flags | MNT_DEFERRED;
2072 STAILQ_INSERT_TAIL(&deferred_unmount_list, mp,
2094 struct mount *mp, *tmp;
2104 STAILQ_FOREACH_SAFE(mp, &local_unmounts, mnt_taskqueue_link, tmp) {
2105 flags = mp->mnt_taskqueue_flags;
2108 error = dounmount(mp, flags, curthread);
2110 MNT_ILOCK(mp);
2111 unmounted = ((mp->mnt_kern_flag & MNTK_REFEXPIRE) != 0);
2112 MNT_IUNLOCK(mp);
2119 retries = (mp->mnt_unmount_retries)++;
2122 deferred_unmount_enqueue(mp, flags, true,
2128 mp->mnt_stat.f_mntonname, retries, error);
2130 vfs_rel(mp);
2140 dounmount(struct mount *mp, uint64_t flags, struct thread *td)
2164 if (!deferred_unmount_enqueue(mp, flags, false, 0))
2165 vfs_rel(mp);
2179 error = vfs_suser(mp, td);
2183 vfs_rel(mp);
2194 MNT_ILOCK(mp);
2202 mp->mnt_kern_flag |= MNTK_RECURSE;
2203 mp->mnt_upper_pending++;
2204 TAILQ_FOREACH(upper, &mp->mnt_uppers, mnt_upper_link) {
2205 retries = upper->mp->mnt_unmount_retries;
2210 MNT_IUNLOCK(mp);
2212 vfs_ref(upper->mp);
2213 if (!deferred_unmount_enqueue(upper->mp, flags,
2215 vfs_rel(upper->mp);
2216 MNT_ILOCK(mp);
2218 mp->mnt_upper_pending--;
2219 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 &&
2220 mp->mnt_upper_pending == 0) {
2221 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER;
2222 wakeup(&mp->mnt_uppers);
2232 while (error == 0 && !TAILQ_EMPTY(&mp->mnt_uppers)) {
2233 mp->mnt_kern_flag |= MNTK_TASKQUEUE_WAITER;
2234 error = msleep(&mp->mnt_taskqueue_link,
2235 MNT_MTX(mp), PCATCH, "umntqw", 0);
2238 MNT_REL(mp);
2239 MNT_IUNLOCK(mp);
2242 } else if (!TAILQ_EMPTY(&mp->mnt_uppers)) {
2243 MNT_IUNLOCK(mp);
2245 deferred_unmount_enqueue(mp, flags, true, 0);
2248 MNT_IUNLOCK(mp);
2249 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers not empty"));
2254 vfs_ref(mp);
2256 if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
2257 mnt_gen_r = mp->mnt_gen;
2262 * Check for mp being unmounted while waiting for the
2265 if (coveredvp->v_mountedhere != mp ||
2269 vfs_rel(mp);
2274 vfs_op_enter(mp);
2276 vn_start_write(NULL, &mp, V_WAIT);
2277 MNT_ILOCK(mp);
2278 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
2279 (mp->mnt_flag & MNT_UPDATE) != 0 ||
2280 !TAILQ_EMPTY(&mp->mnt_uppers)) {
2281 dounmount_cleanup(mp, coveredvp, 0);
2284 mp->mnt_kern_flag |= MNTK_UNMOUNT;
2285 rootvp = vfs_cache_root_clear(mp);
2289 MNT_IUNLOCK(mp);
2290 error = vfs_check_usecounts(mp);
2291 MNT_ILOCK(mp);
2294 dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT);
2304 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
2305 MNT_IUNLOCK(mp);
2310 VFS_PURGE(mp);
2311 MNT_ILOCK(mp);
2314 if (mp->mnt_lockref) {
2315 mp->mnt_kern_flag |= MNTK_DRAINING;
2316 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
2319 MNT_IUNLOCK(mp);
2320 KASSERT(mp->mnt_lockref == 0,
2337 if (mp->mnt_flag & MNT_EXPUBLIC)
2340 vfs_periodic(mp, MNT_WAIT);
2341 MNT_ILOCK(mp);
2342 async_flag = mp->mnt_flag & MNT_ASYNC;
2343 mp->mnt_flag &= ~MNT_ASYNC;
2344 mp->mnt_kern_flag &= ~MNTK_ASYNC;
2345 MNT_IUNLOCK(mp);
2346 vfs_deallocate_syncvnode(mp);
2347 error = VFS_UNMOUNT(mp, flags);
2348 vn_finished_write(mp);
2349 vfs_rel(mp);
2357 MNT_ILOCK(mp);
2358 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
2359 MNT_IUNLOCK(mp);
2360 vfs_allocate_syncvnode(mp);
2361 MNT_ILOCK(mp);
2363 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
2364 mp->mnt_flag |= async_flag;
2365 if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
2366 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
2367 mp->mnt_kern_flag |= MNTK_ASYNC;
2368 if (mp->mnt_kern_flag & MNTK_MWAIT) {
2369 mp->mnt_kern_flag &= ~MNTK_MWAIT;
2370 wakeup(mp);
2372 vfs_op_exit_locked(mp);
2373 MNT_IUNLOCK(mp);
2387 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2389 EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td);
2399 mount_devctl_event("UNMOUNT", mp, false);
2405 if (rootvnode != NULL && mp == rootvnode->v_mount) {
2409 if (mp == rootdevmp)
2412 vfs_rel(mp);
2413 vfs_mount_destroy(mp);
2421 vfs_mount_error(struct mount *mp, const char *fmt, ...)
2423 struct vfsoptlist *moptlist = mp->mnt_optnew;
2751 __vfs_statfs(struct mount *mp, struct statfs *sbp)
2757 if (sbp != &mp->mnt_stat)
2758 memcpy(sbp, &mp->mnt_stat, sizeof(*sbp));
2765 sbp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
2766 sbp->f_nvnodelistsize = mp->mnt_nvnodelistsize;
2768 return (mp->mnt_op->vfs_statfs(mp, sbp));
2772 vfs_mountedfrom(struct mount *mp, const char *from)
2775 bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname);
2776 strlcpy(mp->mnt_stat.f_mntfromname, from,
2777 sizeof mp->mnt_stat.f_mntfromname);
2965 mount_devctl_event(const char *type, struct mount *mp, bool donew)
2970 struct statfs *sfp = &mp->mnt_stat;
2989 if ((mp->mnt_flag & fp->o_opt) != 0) {
3020 vfs_remount_ro(struct mount *mp)
3027 vfs_op_enter(mp);
3028 KASSERT(mp->mnt_lockref > 0,
3029 ("vfs_remount_ro: mp %p is not busied", mp));
3030 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0,
3031 ("vfs_remount_ro: mp %p is being unmounted (and busy?)", mp));
3034 vp_covered = mp->mnt_vnodecovered;
3037 vfs_op_exit(mp);
3044 vfs_op_exit(mp);
3051 MNT_ILOCK(mp);
3052 if ((mp->mnt_flag & MNT_RDONLY) != 0) {
3053 MNT_IUNLOCK(mp);
3057 mp->mnt_flag |= MNT_UPDATE | MNT_FORCE | MNT_RDONLY;
3058 rootvp = vfs_cache_root_clear(mp);
3059 MNT_IUNLOCK(mp);
3067 vfs_mergeopts(opts, mp->mnt_opt);
3068 mp->mnt_optnew = opts;
3070 error = VFS_MOUNT(mp);
3073 MNT_ILOCK(mp);
3074 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE);
3075 MNT_IUNLOCK(mp);
3076 vfs_deallocate_syncvnode(mp);
3077 if (mp->mnt_opt != NULL)
3078 vfs_freeopts(mp->mnt_opt);
3079 mp->mnt_opt = mp->mnt_optnew;
3081 MNT_ILOCK(mp);
3082 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE | MNT_RDONLY);
3083 MNT_IUNLOCK(mp);
3084 vfs_freeopts(mp->mnt_optnew);
3086 mp->mnt_optnew = NULL;
3089 vfs_op_exit(mp);
3114 struct mount *mp;
3118 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
3119 error = vfs_busy(mp, MBF_MNTLSTLOCK | MBF_NOWAIT);
3122 if ((mp->mnt_flag & (MNT_RDONLY | MNT_LOCAL)) != MNT_LOCAL ||
3123 (mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
3125 vfs_unbusy(mp);
3128 error = vfs_write_suspend(mp, 0);
3130 MNT_ILOCK(mp);
3131 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0);
3132 mp->mnt_kern_flag |= MNTK_SUSPEND_ALL;
3133 MNT_IUNLOCK(mp);
3137 mp->mnt_stat.f_mntonname, error);
3139 vfs_unbusy(mp);
3183 struct mount *mp;
3186 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3187 if ((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0)
3190 MNT_ILOCK(mp);
3191 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) != 0);
3192 mp->mnt_kern_flag &= ~MNTK_SUSPEND_ALL;
3193 MNT_IUNLOCK(mp);
3194 vfs_write_resume(mp, 0);
3196 vfs_unbusy(mp);