1 /*
2 * Copyright (c) 2004,2013-2022 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 */
66
67 /*
68 * External virtual filesystem routines
69 */
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/spinlock2.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
82
83 #include <machine/limits.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_object.h>
87
88 struct mountscan_info {
89 TAILQ_ENTRY(mountscan_info) msi_entry;
90 int msi_how;
91 struct mount *msi_node;
92 };
93
94 struct vmntvnodescan_info {
95 TAILQ_ENTRY(vmntvnodescan_info) entry;
96 struct vnode *vp;
97 };
98
99 static int
mount_cmp(struct mount * mnt1,struct mount * mnt2)100 mount_cmp(struct mount *mnt1, struct mount *mnt2)
101 {
102 if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0])
103 return -1;
104 if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0])
105 return 1;
106 if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1])
107 return -1;
108 if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1])
109 return 1;
110 return 0;
111 }
112
113 static int
mount_fsid_cmp(fsid_t * fsid,struct mount * mnt)114 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt)
115 {
116 if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0])
117 return -1;
118 if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0])
119 return 1;
120 if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1])
121 return -1;
122 if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1])
123 return 1;
124 return 0;
125 }
126
127 RB_HEAD(mount_rb_tree, mount);
128 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *);
129 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp);
130 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node,
131 mount_fsid_cmp, fsid_t *);
132
133 static int vnlru_nowhere = 0;
134 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
135 &vnlru_nowhere, 0,
136 "Number of times the vnlru process ran without success");
137
138
139 static struct lwkt_token mntid_token;
140 static struct mount dummymount;
141
142 /* note: mountlist exported to pstat */
143 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
144 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree);
145 static TAILQ_HEAD(,mountscan_info) mountscan_list;
146 static struct lwkt_token mountlist_token;
147
148 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
149
150 /*
151 * Called from vfsinit()
152 */
153 void
vfs_mount_init(void)154 vfs_mount_init(void)
155 {
156 lwkt_token_init(&mountlist_token, "mntlist");
157 lwkt_token_init(&mntid_token, "mntid");
158 TAILQ_INIT(&mountscan_list);
159 mount_init(&dummymount, NULL);
160 dummymount.mnt_flag |= MNT_RDONLY;
161 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
162 }
163
164 /*
165 * Support function called to remove a vnode from the mountlist and
166 * deal with side effects for scans in progress.
167 *
168 * Target mnt_token is held on call.
169 */
170 static void
vremovevnodemnt(struct vnode * vp)171 vremovevnodemnt(struct vnode *vp)
172 {
173 struct vmntvnodescan_info *info;
174 struct mount *mp = vp->v_mount;
175
176 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
177 if (info->vp == vp)
178 info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
179 }
180 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
181 }
182
183 /*
184 * Allocate a new vnode and associate it with a tag, mount point, and
185 * operations vector.
186 *
187 * A VX locked and refd vnode is returned. The caller should setup the
188 * remaining fields and vx_put() or, if he wishes to leave a vref,
189 * vx_unlock() the vnode. Or if he wishes to return a normal locked
190 * vnode, call vx_downgrade(vp); to downgrade the VX lock to a normal
191 * VN lock.
192 */
193 int
getnewvnode(enum vtagtype tag,struct mount * mp,struct vnode ** vpp,int lktimeout,int lkflags)194 getnewvnode(enum vtagtype tag, struct mount *mp,
195 struct vnode **vpp, int lktimeout, int lkflags)
196 {
197 struct vnode *vp;
198
199 KKASSERT(mp != NULL);
200
201 vp = allocvnode(lktimeout, lkflags);
202 vp->v_tag = tag;
203 vp->v_data = NULL;
204
205 /*
206 * By default the vnode is assigned the mount point's normal
207 * operations vector.
208 */
209 vp->v_ops = &mp->mnt_vn_use_ops;
210 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
211
212 /*
213 * Placing the vnode on the mount point's queue makes it visible.
214 * VNON prevents it from being messed with, however.
215 */
216 insmntque(vp, mp);
217
218 /*
219 * A VX locked & refd vnode is returned.
220 */
221 *vpp = vp;
222 return (0);
223 }
224
225 /*
226 * This function creates vnodes with special operations vectors. The
227 * mount point is optional.
228 *
229 * This routine is being phased out but is still used by vfs_conf to
230 * create vnodes for devices prior to the root mount (with mp == NULL).
231 */
232 int
getspecialvnode(enum vtagtype tag,struct mount * mp,struct vop_ops ** ops,struct vnode ** vpp,int lktimeout,int lkflags)233 getspecialvnode(enum vtagtype tag, struct mount *mp,
234 struct vop_ops **ops,
235 struct vnode **vpp, int lktimeout, int lkflags)
236 {
237 struct vnode *vp;
238
239 vp = allocvnode(lktimeout, lkflags);
240 vp->v_tag = tag;
241 vp->v_data = NULL;
242 vp->v_ops = ops;
243
244 if (mp == NULL)
245 mp = &dummymount;
246
247 /*
248 * Placing the vnode on the mount point's queue makes it visible.
249 * VNON prevents it from being messed with, however.
250 */
251 insmntque(vp, mp);
252
253 /*
254 * A VX locked & refd vnode is returned.
255 */
256 *vpp = vp;
257 return (0);
258 }
259
260 /*
261 * Interlock against an unmount, return 0 on success, non-zero on failure.
262 *
263 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
264 * is in-progress.
265 *
266 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
267 * are used. A shared locked will be obtained and the filesystem will not
268 * be unmountable until the lock is released.
269 */
270 int
vfs_busy(struct mount * mp,int flags)271 vfs_busy(struct mount *mp, int flags)
272 {
273 int lkflags;
274
275 atomic_add_int(&mp->mnt_refs, 1);
276 lwkt_gettoken(&mp->mnt_token);
277 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
278 if (flags & LK_NOWAIT) {
279 lwkt_reltoken(&mp->mnt_token);
280 atomic_add_int(&mp->mnt_refs, -1);
281 return (ENOENT);
282 }
283 /* XXX not MP safe */
284 mp->mnt_kern_flag |= MNTK_MWAIT;
285
286 /*
287 * Since all busy locks are shared except the exclusive
288 * lock granted when unmounting, the only place that a
289 * wakeup needs to be done is at the release of the
290 * exclusive lock at the end of dounmount.
291 *
292 * WARNING! mp can potentially go away once we release
293 * our ref.
294 */
295 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
296 lwkt_reltoken(&mp->mnt_token);
297 atomic_add_int(&mp->mnt_refs, -1);
298 return (ENOENT);
299 }
300 lkflags = LK_SHARED;
301 if (lockmgr(&mp->mnt_lock, lkflags))
302 panic("vfs_busy: unexpected lock failure");
303 lwkt_reltoken(&mp->mnt_token);
304 return (0);
305 }
306
307 /*
308 * Free a busy filesystem.
309 *
310 * Once refs is decremented the mount point can potentially get ripped
311 * out from under us, but we want to clean up our refs before unlocking
312 * so do a hold/drop around the whole mess.
313 *
314 * This is not in the critical path (I hope).
315 */
316 void
vfs_unbusy(struct mount * mp)317 vfs_unbusy(struct mount *mp)
318 {
319 mount_hold(mp);
320 atomic_add_int(&mp->mnt_refs, -1);
321 lockmgr(&mp->mnt_lock, LK_RELEASE);
322 mount_drop(mp);
323 }
324
325 /*
326 * Lookup a filesystem type, and if found allocate and initialize
327 * a mount structure for it.
328 *
329 * Devname is usually updated by mount(8) after booting.
330 */
331 int
vfs_rootmountalloc(char * fstypename,char * devname,struct mount ** mpp)332 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
333 {
334 struct vfsconf *vfsp;
335 struct mount *mp;
336
337 if (fstypename == NULL)
338 return (ENODEV);
339
340 vfsp = vfsconf_find_by_name(fstypename);
341 if (vfsp == NULL)
342 return (ENODEV);
343 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
344 mount_init(mp, vfsp->vfc_vfsops);
345 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
346 lockinit(&mp->mnt_renlock, "renamlk", VLKTIMEOUT, 0);
347
348 vfs_busy(mp, 0);
349 mp->mnt_vfc = vfsp;
350 mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
351 vfsp->vfc_refcount++;
352 mp->mnt_stat.f_type = vfsp->vfc_typenum;
353 mp->mnt_flag |= MNT_RDONLY;
354 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
355 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
356 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
357
358 /*
359 * Pre-set MPSAFE flags for VFS_MOUNT() call.
360 */
361 if (vfsp->vfc_flags & VFCF_MPSAFE)
362 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
363
364 *mpp = mp;
365
366 return (0);
367 }
368
369 /*
370 * Basic mount structure initialization
371 */
372 void
mount_init(struct mount * mp,struct vfsops * ops)373 mount_init(struct mount *mp, struct vfsops *ops)
374 {
375 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
376 lockinit(&mp->mnt_renlock, "renamlk", hz*5, 0);
377 lwkt_token_init(&mp->mnt_token, "permnt");
378
379 TAILQ_INIT(&mp->mnt_vnodescan_list);
380 TAILQ_INIT(&mp->mnt_nvnodelist);
381 TAILQ_INIT(&mp->mnt_reservedvnlist);
382 TAILQ_INIT(&mp->mnt_jlist);
383 mp->mnt_nvnodelistsize = 0;
384 mp->mnt_flag = 0;
385 mp->mnt_hold = 1; /* hold for umount last drop */
386 mp->mnt_iosize_max = MAXPHYS;
387 mp->mnt_op = ops;
388 if (ops == NULL || (ops->vfs_flags & VFSOPSF_NOSYNCERTHR) == 0)
389 vn_syncer_thr_create(mp);
390 }
391
392 void
mount_hold(struct mount * mp)393 mount_hold(struct mount *mp)
394 {
395 atomic_add_int(&mp->mnt_hold, 1);
396 }
397
398 void
mount_drop(struct mount * mp)399 mount_drop(struct mount *mp)
400 {
401 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) {
402 KKASSERT(mp->mnt_refs == 0);
403 kfree(mp, M_MOUNT);
404 }
405 }
406
407 /*
408 * Lookup a mount point by filesystem identifier.
409 *
410 * If not NULL, the returned mp is held and the caller is expected to drop
411 * it via mount_drop().
412 */
413 struct mount *
vfs_getvfs(fsid_t * fsid)414 vfs_getvfs(fsid_t *fsid)
415 {
416 struct mount *mp;
417
418 lwkt_gettoken_shared(&mountlist_token);
419 mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid);
420 if (mp)
421 mount_hold(mp);
422 lwkt_reltoken(&mountlist_token);
423 return (mp);
424 }
425
426 /*
427 * Generate a FSID based on the mountpt. The FSID will be adjusted to avoid
428 * collisions when the mount is added to mountlist.
429 *
430 * May only be called prior to the mount succeeding.
431 *
432 * OLD:
433 *
434 * Get a new unique fsid. Try to make its val[0] unique, since this value
435 * will be used to create fake device numbers for stat(). Also try (but
436 * not so hard) make its val[0] unique mod 2^16, since some emulators only
437 * support 16-bit device numbers. We end up with unique val[0]'s for the
438 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
439 */
440 void
vfs_getnewfsid(struct mount * mp)441 vfs_getnewfsid(struct mount *mp)
442 {
443 fsid_t tfsid;
444 int mtype;
445 int error;
446 char *retbuf;
447 char *freebuf;
448
449 mtype = mp->mnt_vfc->vfc_typenum;
450 tfsid.val[1] = mtype;
451 error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL,
452 &retbuf, &freebuf, 0);
453 if (error) {
454 tfsid.val[0] = makeudev(255, 0);
455 } else {
456 tfsid.val[0] = makeudev(255,
457 iscsi_crc32(retbuf, strlen(retbuf)) &
458 ~makeudev(255, 0));
459 /*kprintf("getnewfsid %08x %08x %s\n", tfsid.val[0], tfsid.val[1], retbuf);*/
460 kfree(freebuf, M_TEMP);
461 }
462 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
463 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
464 }
465
466 /*
467 * Set the FSID for a new mount point to the template.
468 *
469 * The FSID will be adjusted to avoid collisions when the mount is
470 * added to mountlist.
471 *
472 * May only be called prior to the mount succeeding.
473 */
474 void
vfs_setfsid(struct mount * mp,fsid_t * template)475 vfs_setfsid(struct mount *mp, fsid_t *template)
476 {
477 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
478
479 #if 0
480 struct mount *mptmp;
481
482 lwkt_gettoken(&mntid_token);
483 for (;;) {
484 mptmp = vfs_getvfs(template);
485 if (mptmp == NULL)
486 break;
487 mount_drop(mptmp);
488 ++template->val[1];
489 }
490 lwkt_reltoken(&mntid_token);
491 #endif
492 mp->mnt_stat.f_fsid = *template;
493 }
494
495 /*
496 * This routine is called when we have too many vnodes. It attempts
497 * to free <count> vnodes and will potentially free vnodes that still
498 * have VM backing store (VM backing store is typically the cause
499 * of a vnode blowout so we want to do this). Therefore, this operation
500 * is not considered cheap.
501 *
502 * A number of conditions may prevent a vnode from being reclaimed.
503 * the buffer cache may have references on the vnode, a directory
504 * vnode may still have references due to the namei cache representing
505 * underlying files, or the vnode may be in active use. It is not
506 * desireable to reuse such vnodes. These conditions may cause the
507 * number of vnodes to reach some minimum value regardless of what
508 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
509 */
510
511 /*
512 * Attempt to recycle vnodes in a context that is always safe to block.
513 * Calling vlrurecycle() from the bowels of file system code has some
514 * interesting deadlock problems.
515 */
516 static struct thread *vnlruthread;
517
518 static void
vnlru_proc(void)519 vnlru_proc(void)
520 {
521 struct thread *td = curthread;
522
523 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
524 SHUTDOWN_PRI_FIRST);
525
526 for (;;) {
527 int ncachedandinactive;
528
529 kproc_suspend_loop();
530
531 /*
532 * Try to free some vnodes if we have too many. Trigger based
533 * on potentially freeable vnodes but calculate the count
534 * based on total vnodes.
535 *
536 * (long) -> deal with 64 bit machines, intermediate overflow
537 */
538 synchronizevnodecount();
539 ncachedandinactive = countcachedandinactivevnodes();
540 if (numvnodes >= maxvnodes * 9 / 10 &&
541 ncachedandinactive >= maxvnodes * 5 / 10) {
542 int count = numvnodes - maxvnodes * 9 / 10;
543
544 if (count > (ncachedandinactive) / 100)
545 count = (ncachedandinactive) / 100;
546 if (count < 5)
547 count = 5;
548 freesomevnodes(count);
549 }
550
551 /*
552 * Do non-critical-path (more robust) cache cleaning,
553 * even if vnode counts are nominal, to try to avoid
554 * having to do it in the critical path.
555 */
556 cache_hysteresis(0);
557
558 /*
559 * Nothing to do if most of our vnodes are already on
560 * the free list.
561 */
562 synchronizevnodecount();
563 ncachedandinactive = countcachedandinactivevnodes();
564 if (numvnodes <= maxvnodes * 9 / 10 ||
565 ncachedandinactive <= maxvnodes * 5 / 10) {
566 tsleep(vnlruthread, 0, "vlruwt", hz);
567 continue;
568 }
569
570 /*
571 * Do not allow this thread to become cpu-bound if something
572 * goes wrong.
573 */
574 tsleep(vnlruthread, 0, "vlruwt", 1);
575 }
576 }
577
578 /*
579 * MOUNTLIST FUNCTIONS
580 */
581
582 /*
583 * mountlist_insert (MP SAFE)
584 *
585 * Add a new mount point to the mount list. Filesystem should attempt to
586 * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure
587 * uniqueness.
588 */
589 void
mountlist_insert(struct mount * mp,int how)590 mountlist_insert(struct mount *mp, int how)
591 {
592 int lim = 0x01000000;
593
594 lwkt_gettoken(&mountlist_token);
595 if (how == MNTINS_FIRST)
596 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
597 else
598 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
599 while (mount_rb_tree_RB_INSERT(&mounttree, mp)) {
600 int32_t val;
601
602 /*
603 * minor device mask: 0xFFFF00FF
604 */
605 val = mp->mnt_stat.f_fsid.val[0];
606 val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF);
607 ++val;
608 val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF);
609 mp->mnt_stat.f_fsid.val[0] = val;
610 if (--lim == 0) {
611 lim = 0x01000000;
612 mp->mnt_stat.f_fsid.val[1] += 0x0100;
613 kprintf("mountlist_insert: fsid collision, "
614 "too many mounts\n");
615 }
616 }
617 lwkt_reltoken(&mountlist_token);
618 }
619
620 /*
621 * mountlist_interlock (MP SAFE)
622 *
623 * Execute the specified interlock function with the mountlist token
624 * held. The function will be called in a serialized fashion verses
625 * other functions called through this mechanism.
626 *
627 * The function is expected to be very short-lived.
628 */
629 int
mountlist_interlock(int (* callback)(struct mount *),struct mount * mp)630 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
631 {
632 int error;
633
634 lwkt_gettoken(&mountlist_token);
635 error = callback(mp);
636 lwkt_reltoken(&mountlist_token);
637 return (error);
638 }
639
640 /*
641 * mountlist_boot_getfirst (DURING BOOT ONLY)
642 *
643 * This function returns the first mount on the mountlist, which is
644 * expected to be the root mount. Since no interlocks are obtained
645 * this function is only safe to use during booting.
646 */
647
648 struct mount *
mountlist_boot_getfirst(void)649 mountlist_boot_getfirst(void)
650 {
651 return(TAILQ_FIRST(&mountlist));
652 }
653
654 /*
655 * mountlist_remove (MP SAFE)
656 *
657 * Remove a node from the mountlist. If this node is the next scan node
658 * for any active mountlist scans, the active mountlist scan will be
659 * adjusted to skip the node, thus allowing removals during mountlist
660 * scans.
661 */
662 void
mountlist_remove(struct mount * mp)663 mountlist_remove(struct mount *mp)
664 {
665 struct mountscan_info *msi;
666
667 lwkt_gettoken(&mountlist_token);
668 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
669 if (msi->msi_node == mp) {
670 if (msi->msi_how & MNTSCAN_FORWARD)
671 msi->msi_node = TAILQ_NEXT(mp, mnt_list);
672 else
673 msi->msi_node = TAILQ_PREV(mp, mntlist,
674 mnt_list);
675 }
676 }
677 TAILQ_REMOVE(&mountlist, mp, mnt_list);
678 mount_rb_tree_RB_REMOVE(&mounttree, mp);
679 lwkt_reltoken(&mountlist_token);
680 }
681
682 /*
683 * mountlist_exists (MP SAFE)
684 *
685 * Checks if a node exists in the mountlist.
686 * This function is mainly used by VFS quota code to check if a
687 * cached nullfs struct mount pointer is still valid at use time
688 *
689 * FIXME: there is no warranty the mp passed to that function
690 * will be the same one used by VFS_ACCOUNT() later
691 */
692 int
mountlist_exists(struct mount * mp)693 mountlist_exists(struct mount *mp)
694 {
695 int node_exists = 0;
696 struct mount* lmp;
697
698 lwkt_gettoken_shared(&mountlist_token);
699 TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
700 if (lmp == mp) {
701 node_exists = 1;
702 break;
703 }
704 }
705 lwkt_reltoken(&mountlist_token);
706
707 return(node_exists);
708 }
709
710 /*
711 * mountlist_scan
712 *
713 * Safely scan the mount points on the mount list. Each mountpoint
714 * is held across the callback. The callback is responsible for
715 * acquiring any further tokens or locks.
716 *
717 * Unless otherwise specified each mount point will be busied prior to the
718 * callback and unbusied afterwords. The callback may safely remove any
719 * mount point without interfering with the scan. If the current callback
720 * mount is removed the scanner will not attempt to unbusy it.
721 *
722 * If a mount node cannot be busied it is silently skipped.
723 *
724 * The callback return value is aggregated and a total is returned. A return
725 * value of < 0 is not aggregated and will terminate the scan.
726 *
727 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
728 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
729 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
730 * the mount node.
731 * MNTSCAN_NOUNLOCK - Do not unlock mountlist_token across callback
732 *
733 * NOTE: mountlist_token is not held across the callback.
734 */
735 int
mountlist_scan(int (* callback)(struct mount *,void *),void * data,int how)736 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
737 {
738 struct mountscan_info info;
739 struct mount *mp;
740 int count;
741 int res;
742 int dounlock = ((how & MNTSCAN_NOUNLOCK) == 0);
743
744 lwkt_gettoken(&mountlist_token);
745 info.msi_how = how;
746 info.msi_node = NULL; /* paranoia */
747 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
748 lwkt_reltoken(&mountlist_token);
749
750 res = 0;
751 lwkt_gettoken_shared(&mountlist_token);
752
753 if (how & MNTSCAN_FORWARD) {
754 info.msi_node = TAILQ_FIRST(&mountlist);
755 while ((mp = info.msi_node) != NULL) {
756 mount_hold(mp);
757 if (how & MNTSCAN_NOBUSY) {
758 if (dounlock)
759 lwkt_reltoken(&mountlist_token);
760 count = callback(mp, data);
761 if (dounlock)
762 lwkt_gettoken_shared(&mountlist_token);
763 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
764 if (dounlock)
765 lwkt_reltoken(&mountlist_token);
766 count = callback(mp, data);
767 if (dounlock)
768 lwkt_gettoken_shared(&mountlist_token);
769 if (mp == info.msi_node)
770 vfs_unbusy(mp);
771 } else {
772 count = 0;
773 }
774 mount_drop(mp);
775 if (count < 0)
776 break;
777 res += count;
778 if (mp == info.msi_node)
779 info.msi_node = TAILQ_NEXT(mp, mnt_list);
780 }
781 } else if (how & MNTSCAN_REVERSE) {
782 info.msi_node = TAILQ_LAST(&mountlist, mntlist);
783 while ((mp = info.msi_node) != NULL) {
784 mount_hold(mp);
785 if (how & MNTSCAN_NOBUSY) {
786 if (dounlock)
787 lwkt_reltoken(&mountlist_token);
788 count = callback(mp, data);
789 if (dounlock)
790 lwkt_gettoken_shared(&mountlist_token);
791 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
792 if (dounlock)
793 lwkt_reltoken(&mountlist_token);
794 count = callback(mp, data);
795 if (dounlock)
796 lwkt_gettoken_shared(&mountlist_token);
797 if (mp == info.msi_node)
798 vfs_unbusy(mp);
799 } else {
800 count = 0;
801 }
802 mount_drop(mp);
803 if (count < 0)
804 break;
805 res += count;
806 if (mp == info.msi_node)
807 info.msi_node = TAILQ_PREV(mp, mntlist,
808 mnt_list);
809 }
810 }
811 lwkt_reltoken(&mountlist_token);
812
813 lwkt_gettoken(&mountlist_token);
814 TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
815 lwkt_reltoken(&mountlist_token);
816
817 return(res);
818 }
819
820 /*
821 * MOUNT RELATED VNODE FUNCTIONS
822 */
823
824 static struct kproc_desc vnlru_kp = {
825 "vnlru",
826 vnlru_proc,
827 &vnlruthread
828 };
829 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp);
830
831 /*
832 * Move a vnode from one mount queue to another.
833 */
834 void
insmntque(struct vnode * vp,struct mount * mp)835 insmntque(struct vnode *vp, struct mount *mp)
836 {
837 struct mount *omp;
838
839 /*
840 * Delete from old mount point vnode list, if on one.
841 */
842 if ((omp = vp->v_mount) != NULL) {
843 lwkt_gettoken(&omp->mnt_token);
844 KKASSERT(omp == vp->v_mount);
845 KASSERT(omp->mnt_nvnodelistsize > 0,
846 ("bad mount point vnode list size"));
847 vremovevnodemnt(vp);
848 omp->mnt_nvnodelistsize--;
849 lwkt_reltoken(&omp->mnt_token);
850 }
851
852 /*
853 * Insert into list of vnodes for the new mount point, if available.
854 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
855 */
856 if (mp == NULL) {
857 vp->v_mount = NULL;
858 return;
859 }
860 lwkt_gettoken(&mp->mnt_token);
861 vp->v_mount = mp;
862 if (mp->mnt_syncer) {
863 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
864 } else {
865 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
866 }
867 mp->mnt_nvnodelistsize++;
868 lwkt_reltoken(&mp->mnt_token);
869 }
870
871
872 /*
873 * Scan the vnodes under a mount point and issue appropriate callbacks.
874 *
875 * The fastfunc() callback is called with just the mountlist token held
876 * (no vnode lock). It may not block and the vnode may be undergoing
877 * modifications while the caller is processing it. The vnode will
878 * not be entirely destroyed, however, due to the fact that the mountlist
879 * token is held. A return value < 0 skips to the next vnode without calling
880 * the slowfunc(), a return value > 0 terminates the loop.
881 *
882 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp
883 * data structure is unstable when called from fastfunc().
884 *
885 * The slowfunc() callback is called after the vnode has been successfully
886 * locked based on passed flags. The vnode is skipped if it gets rearranged
887 * or destroyed while blocking on the lock. A non-zero return value from
888 * the slow function terminates the loop. The slow function is allowed to
889 * arbitrarily block. The scanning code guarentees consistency of operation
890 * even if the slow function deletes or moves the node, or blocks and some
891 * other thread deletes or moves the node.
892 */
893 int
vmntvnodescan(struct mount * mp,int flags,int (* fastfunc)(struct mount * mp,struct vnode * vp,void * data),int (* slowfunc)(struct mount * mp,struct vnode * vp,void * data),void * data)894 vmntvnodescan(
895 struct mount *mp,
896 int flags,
897 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
898 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
899 void *data
900 ) {
901 struct vmntvnodescan_info info;
902 struct vnode *vp;
903 int r = 0;
904 int maxcount = mp->mnt_nvnodelistsize * 2;
905 int stopcount = 0;
906 int count = 0;
907
908 lwkt_gettoken(&mp->mnt_token);
909
910 /*
911 * If asked to do one pass stop after iterating available vnodes.
912 * Under heavy loads new vnodes can be added while we are scanning,
913 * so this isn't perfect. Create a slop factor of 2x.
914 */
915 if (flags & VMSC_ONEPASS)
916 stopcount = mp->mnt_nvnodelistsize;
917
918 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
919 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
920
921 while ((vp = info.vp) != NULL) {
922 if (--maxcount == 0) {
923 kprintf("Warning: excessive fssync iteration\n");
924 maxcount = mp->mnt_nvnodelistsize * 2;
925 }
926
927 /*
928 * Skip if visible but not ready, or special (e.g.
929 * mp->mnt_syncer)
930 */
931 if (vp->v_type == VNON)
932 goto next;
933 KKASSERT(vp->v_mount == mp);
934
935 /*
936 * Quick test. A negative return continues the loop without
937 * calling the slow test. 0 continues onto the slow test.
938 * A positive number aborts the loop.
939 */
940 if (fastfunc) {
941 if ((r = fastfunc(mp, vp, data)) < 0) {
942 r = 0;
943 goto next;
944 }
945 if (r)
946 break;
947 }
948
949 /*
950 * Get a vxlock on the vnode, retry if it has moved or isn't
951 * in the mountlist where we expect it.
952 */
953 if (slowfunc) {
954 int error;
955
956 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
957 case VMSC_GETVP:
958 error = vget(vp, LK_EXCLUSIVE);
959 break;
960 case VMSC_GETVP|VMSC_NOWAIT:
961 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
962 break;
963 case VMSC_GETVX:
964 vx_get(vp);
965 error = 0;
966 break;
967 default:
968 error = 0;
969 break;
970 }
971 if (error)
972 goto next;
973 /*
974 * Do not call the slow function if the vnode is
975 * invalid or if it was ripped out from under us
976 * while we (potentially) blocked.
977 */
978 if (info.vp == vp && vp->v_type != VNON)
979 r = slowfunc(mp, vp, data);
980
981 /*
982 * Cleanup
983 */
984 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
985 case VMSC_GETVP:
986 case VMSC_GETVP|VMSC_NOWAIT:
987 vput(vp);
988 break;
989 case VMSC_GETVX:
990 vx_put(vp);
991 break;
992 default:
993 break;
994 }
995 if (r != 0)
996 break;
997 }
998
999 next:
1000 /*
1001 * Yield after some processing. Depending on the number
1002 * of vnodes, we might wind up running for a long time.
1003 * Because threads are not preemptable, time critical
1004 * userland processes might starve. Give them a chance
1005 * now and then.
1006 */
1007 if (++count == 10000) {
1008 /*
1009 * We really want to yield a bit, so we simply
1010 * sleep a tick
1011 */
1012 tsleep(mp, 0, "vnodescn", 1);
1013 count = 0;
1014 }
1015
1016 /*
1017 * If doing one pass this decrements to zero. If it starts
1018 * at zero it is effectively unlimited for the purposes of
1019 * this loop.
1020 */
1021 if (--stopcount == 0)
1022 break;
1023
1024 /*
1025 * Iterate. If the vnode was ripped out from under us
1026 * info.vp will already point to the next vnode, otherwise
1027 * we have to obtain the next valid vnode ourselves.
1028 */
1029 if (info.vp == vp)
1030 info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1031 }
1032
1033 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
1034 lwkt_reltoken(&mp->mnt_token);
1035 return(r);
1036 }
1037
1038 /*
1039 * Remove any vnodes in the vnode table belonging to mount point mp.
1040 *
1041 * If FORCECLOSE is not specified, there should not be any active ones,
1042 * return error if any are found (nb: this is a user error, not a
1043 * system error). If FORCECLOSE is specified, detach any active vnodes
1044 * that are found.
1045 *
1046 * If WRITECLOSE is set, only flush out regular file vnodes open for
1047 * writing.
1048 *
1049 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1050 *
1051 * `rootrefs' specifies the base reference count for the root vnode
1052 * of this filesystem. The root vnode is considered busy if its
1053 * v_refcnt exceeds this value. On a successful return, vflush()
1054 * will call vrele() on the root vnode exactly rootrefs times.
1055 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1056 * be zero.
1057 */
1058 static int debug_busyprt = 0; /* print out busy vnodes */
1059 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, "");
1060
1061 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1062
1063 struct vflush_info {
1064 int flags;
1065 int busy;
1066 thread_t td;
1067 };
1068
1069 int
vflush(struct mount * mp,int rootrefs,int flags)1070 vflush(struct mount *mp, int rootrefs, int flags)
1071 {
1072 struct thread *td = curthread; /* XXX */
1073 struct vnode *rootvp = NULL;
1074 int error;
1075 struct vflush_info vflush_info;
1076
1077 if (rootrefs > 0) {
1078 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1079 ("vflush: bad args"));
1080 /*
1081 * Get the filesystem root vnode. We can vput() it
1082 * immediately, since with rootrefs > 0, it won't go away.
1083 */
1084 if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1085 if ((flags & FORCECLOSE) == 0)
1086 return (error);
1087 rootrefs = 0;
1088 /* continue anyway */
1089 }
1090 if (rootrefs)
1091 vput(rootvp);
1092 }
1093
1094 vflush_info.busy = 0;
1095 vflush_info.flags = flags;
1096 vflush_info.td = td;
1097 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1098
1099 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1100 /*
1101 * If just the root vnode is busy, and if its refcount
1102 * is equal to `rootrefs', then go ahead and kill it.
1103 */
1104 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1105 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1106 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1107 vx_lock(rootvp);
1108 vgone_vxlocked(rootvp);
1109 vx_unlock(rootvp);
1110 vflush_info.busy = 0;
1111 }
1112 }
1113 if (vflush_info.busy)
1114 return (EBUSY);
1115 for (; rootrefs > 0; rootrefs--)
1116 vrele(rootvp);
1117 return (0);
1118 }
1119
1120 /*
1121 * The scan callback is made with an VX locked vnode.
1122 */
1123 static int
vflush_scan(struct mount * mp,struct vnode * vp,void * data)1124 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1125 {
1126 struct vflush_info *info = data;
1127 struct vattr vattr;
1128 int flags = info->flags;
1129
1130 /*
1131 * Generally speaking try to deactivate on 0 refs (catch-all)
1132 */
1133 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1134
1135 /*
1136 * Skip over a vnodes marked VSYSTEM.
1137 */
1138 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1139 return(0);
1140 }
1141
1142 /*
1143 * Do not force-close VCHR or VBLK vnodes
1144 */
1145 if (vp->v_type == VCHR || vp->v_type == VBLK)
1146 flags &= ~(WRITECLOSE|FORCECLOSE);
1147
1148 /*
1149 * If WRITECLOSE is set, flush out unlinked but still open
1150 * files (even if open only for reading) and regular file
1151 * vnodes open for writing.
1152 */
1153 if ((flags & WRITECLOSE) &&
1154 (vp->v_type == VNON ||
1155 (VOP_GETATTR(vp, &vattr) == 0 &&
1156 vattr.va_nlink > 0)) &&
1157 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1158 return(0);
1159 }
1160
1161 /*
1162 * If we are the only holder (refcnt of 1) or the vnode is in
1163 * termination (refcnt < 0), we can vgone the vnode.
1164 */
1165 if (VREFCNT(vp) <= 1) {
1166 vgone_vxlocked(vp);
1167 return(0);
1168 }
1169
1170 /*
1171 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1172 * it to a dummymount structure so vop_*() functions don't deref
1173 * a NULL pointer.
1174 */
1175 if (flags & FORCECLOSE) {
1176 vhold(vp);
1177 vgone_vxlocked(vp);
1178 if (vp->v_mount == NULL)
1179 insmntque(vp, &dummymount);
1180 vdrop(vp);
1181 return(0);
1182 }
1183 if (vp->v_type == VCHR || vp->v_type == VBLK)
1184 kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1185 if (debug_busyprt) {
1186 const char *filename;
1187
1188 spin_lock(&vp->v_spin);
1189 filename = TAILQ_FIRST(&vp->v_namecache) ?
1190 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
1191 spin_unlock(&vp->v_spin);
1192 kprintf("vflush: busy vnode (%p) %s\n", vp, filename);
1193 }
1194 ++info->busy;
1195 return(0);
1196 }
1197
1198 void
add_bio_ops(struct bio_ops * ops)1199 add_bio_ops(struct bio_ops *ops)
1200 {
1201 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1202 }
1203
1204 void
rem_bio_ops(struct bio_ops * ops)1205 rem_bio_ops(struct bio_ops *ops)
1206 {
1207 TAILQ_REMOVE(&bio_ops_list, ops, entry);
1208 }
1209
1210 /*
1211 * This calls the bio_ops io_sync function either for a mount point
1212 * or generally.
1213 *
1214 * WARNING: softdeps is weirdly coded and just isn't happy unless
1215 * io_sync is called with a NULL mount from the general syncing code.
1216 */
1217 void
bio_ops_sync(struct mount * mp)1218 bio_ops_sync(struct mount *mp)
1219 {
1220 struct bio_ops *ops;
1221
1222 if (mp) {
1223 if ((ops = mp->mnt_bioops) != NULL)
1224 ops->io_sync(mp);
1225 } else {
1226 TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1227 ops->io_sync(NULL);
1228 }
1229 }
1230 }
1231
1232 /*
1233 * Lookup a mount point by nch
1234 */
1235 struct mount *
mount_get_by_nc(struct namecache * ncp)1236 mount_get_by_nc(struct namecache *ncp)
1237 {
1238 struct mount *mp = NULL;
1239
1240 lwkt_gettoken_shared(&mountlist_token);
1241 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1242 if (ncp == mp->mnt_ncmountpt.ncp)
1243 break;
1244 }
1245 lwkt_reltoken(&mountlist_token);
1246
1247 return (mp);
1248 }
1249
1250