1 /* $OpenBSD: vfs_subr.c,v 1.327 2025/01/02 10:07:18 dlg Exp $ */
2 /* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
3
4 /*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
38 */
39
40 /*
41 * External virtual filesystem routines
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/sysctl.h>
48 #include <sys/mount.h>
49 #include <sys/fcntl.h>
50 #include <sys/conf.h>
51 #include <sys/vnode.h>
52 #include <sys/lock.h>
53 #include <sys/lockf.h>
54 #include <sys/stat.h>
55 #include <sys/acct.h>
56 #include <sys/namei.h>
57 #include <sys/ucred.h>
58 #include <sys/buf.h>
59 #include <sys/errno.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/syscallargs.h>
63 #include <sys/pool.h>
64 #include <sys/tree.h>
65 #include <sys/specdev.h>
66 #include <sys/atomic.h>
67
68 #include <netinet/in.h>
69
70 #include <uvm/uvm_extern.h>
71 #include <uvm/uvm_vnode.h>
72
73 #include "softraid.h"
74
75 /*
76 * Locks used to protect data:
77 * a atomic
78 */
79
80 void sr_quiesce(void);
81
82 enum vtype iftovt_tab[16] = {
83 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
84 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
85 };
86
87 int vttoif_tab[9] = {
88 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
89 S_IFSOCK, S_IFIFO, S_IFMT,
90 };
91
92 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
93 int suid_clear = 1; /* [a] 1 => clear SUID / SGID on owner change */
94
95 /*
96 * Insq/Remq for the vnode usage lists.
97 */
98 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
99 #define bufremvn(bp) { \
100 LIST_REMOVE(bp, b_vnbufs); \
101 LIST_NEXT(bp, b_vnbufs) = NOLIST; \
102 }
103
104 TAILQ_HEAD(freelst, vnode);
105 struct freelst vnode_hold_list; /* list of vnodes referencing buffers */
106 struct freelst vnode_free_list; /* vnode free list */
107
108 struct mntlist mountlist; /* mounted filesystem list */
109
110 void vclean(struct vnode *, int, struct proc *);
111
112 void insmntque(struct vnode *, struct mount *);
113 int getdevvp(dev_t, struct vnode **, enum vtype);
114
115 int vfs_hang_addrlist(struct mount *, struct netexport *,
116 struct export_args *);
117 int vfs_free_netcred(struct radix_node *, void *, u_int);
118 void vfs_free_addrlist(struct netexport *);
119 void vputonfreelist(struct vnode *);
120
121 int vflush_vnode(struct vnode *, void *);
122 int maxvnodes;
123
124 struct mutex vnode_mtx = MUTEX_INITIALIZER(IPL_BIO);
125
126 void vfs_unmountall(void);
127
128 #ifdef DEBUG
129 void printlockedvnodes(void);
130 #endif
131
132 struct pool vnode_pool;
133 struct pool uvm_vnode_pool;
134
135 static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
136 RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
137
138 static inline int
rb_buf_compare(const struct buf * b1,const struct buf * b2)139 rb_buf_compare(const struct buf *b1, const struct buf *b2)
140 {
141 if (b1->b_lblkno < b2->b_lblkno)
142 return(-1);
143 if (b1->b_lblkno > b2->b_lblkno)
144 return(1);
145 return(0);
146 }
147
148 /*
149 * Initialize the vnode management data structures.
150 */
151 void
vntblinit(void)152 vntblinit(void)
153 {
154 /* buffer cache may need a vnode for each buffer */
155 maxvnodes = 2 * initialvnodes;
156 pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
157 PR_WAITOK, "vnodes", NULL);
158 pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
159 PR_WAITOK, "uvmvnodes", NULL);
160 TAILQ_INIT(&vnode_hold_list);
161 TAILQ_INIT(&vnode_free_list);
162 TAILQ_INIT(&mountlist);
163 /*
164 * Initialize the filesystem syncer.
165 */
166 vn_initialize_syncerd();
167
168 #ifdef NFSSERVER
169 rn_init(sizeof(struct sockaddr_in));
170 #endif /* NFSSERVER */
171 }
172
173 /*
174 * Allocate a mount point.
175 *
176 * The returned mount point is marked as busy.
177 */
178 struct mount *
vfs_mount_alloc(struct vnode * vp,struct vfsconf * vfsp)179 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp)
180 {
181 struct mount *mp;
182
183 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
184 refcnt_init(&mp->mnt_refs);
185 rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
186 (void)vfs_busy(mp, VB_READ|VB_NOWAIT);
187
188 TAILQ_INIT(&mp->mnt_vnodelist);
189 mp->mnt_vnodecovered = vp;
190
191 atomic_inc_int(&vfsp->vfc_refcount);
192 mp->mnt_vfc = vfsp;
193 mp->mnt_op = vfsp->vfc_vfsops;
194 mp->mnt_flag = vfsp->vfc_flags;
195 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
196
197 return (mp);
198 }
199
200 struct mount *
vfs_mount_take(struct mount * mp)201 vfs_mount_take(struct mount *mp)
202 {
203 refcnt_take(&mp->mnt_refs);
204 return (mp);
205 }
206
207 static void
vfs_mount_rele(struct mount * mp)208 vfs_mount_rele(struct mount *mp)
209 {
210 if (refcnt_rele(&mp->mnt_refs))
211 free(mp, M_MOUNT, sizeof(*mp));
212 }
213
214 /*
215 * Release a mount point.
216 */
217 void
vfs_mount_free(struct mount * mp)218 vfs_mount_free(struct mount *mp)
219 {
220 SET(mp->mnt_flag, MNT_UNMOUNT);
221 atomic_dec_int(&mp->mnt_vfc->vfc_refcount);
222 vfs_mount_rele(mp);
223 }
224
225 /*
226 * Mark a mount point as busy. Used to synchronize access and to delay
227 * unmounting.
228 *
229 * Default behaviour is to attempt getting a READ lock and in case of an
230 * ongoing unmount, to wait for it to finish and then return failure.
231 */
232 int
vfs_busy(struct mount * mp,int flags)233 vfs_busy(struct mount *mp, int flags)
234 {
235 int rwflags = ISSET(flags, VB_WRITE) ? RW_WRITE : RW_READ;
236 int error = 0;
237
238 if (!ISSET(flags, VB_WAIT))
239 rwflags |= RW_NOSLEEP;
240
241 #ifdef WITNESS
242 if (ISSET(flags, VB_DUPOK))
243 rwflags |= RW_DUPOK;
244 #endif
245
246 vfs_mount_take(mp);
247 if (rw_enter(&mp->mnt_lock, rwflags) != 0)
248 error = EBUSY;
249 else if (ISSET(mp->mnt_flag, MNT_UNMOUNT)) {
250 rw_exit(&mp->mnt_lock);
251 error = EBUSY;
252 }
253 vfs_mount_rele(mp);
254
255 return (error);
256 }
257
258 /*
259 * Free a busy file system
260 */
261 void
vfs_unbusy(struct mount * mp)262 vfs_unbusy(struct mount *mp)
263 {
264 rw_exit(&mp->mnt_lock);
265 }
266
267 int
vfs_isbusy(struct mount * mp)268 vfs_isbusy(struct mount *mp)
269 {
270 return (rw_status(&mp->mnt_lock) != 0);
271 }
272
273 /*
274 * Lookup a filesystem type, and if found allocate and initialize
275 * a mount structure for it.
276 *
277 * Devname is usually updated by mount(8) after booting.
278 */
279 int
vfs_rootmountalloc(char * fstypename,char * devname,struct mount ** mpp)280 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
281 {
282 struct vfsconf *vfsp;
283 struct mount *mp;
284
285 vfsp = vfs_byname(fstypename);
286 if (vfsp == NULL)
287 return (ENODEV);
288 mp = vfs_mount_alloc(NULLVP, vfsp);
289 mp->mnt_flag |= MNT_RDONLY;
290 mp->mnt_stat.f_mntonname[0] = '/';
291 strlcpy(mp->mnt_stat.f_mntfromname, devname, MNAMELEN);
292 strlcpy(mp->mnt_stat.f_mntfromspec, devname, MNAMELEN);
293 *mpp = mp;
294 return (0);
295 }
296
297 /*
298 * Lookup a mount point by filesystem identifier.
299 */
300 struct mount *
vfs_getvfs(fsid_t * fsid)301 vfs_getvfs(fsid_t *fsid)
302 {
303 struct mount *mp;
304
305 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
306 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
307 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
308 return (mp);
309 }
310 }
311
312 return (NULL);
313 }
314
315
316 /*
317 * Get a new unique fsid
318 */
319 void
vfs_getnewfsid(struct mount * mp)320 vfs_getnewfsid(struct mount *mp)
321 {
322 static u_short xxxfs_mntid;
323
324 fsid_t tfsid;
325 int mtype;
326
327 mtype = mp->mnt_vfc->vfc_typenum;
328 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
329 mp->mnt_stat.f_fsid.val[1] = mtype;
330 if (xxxfs_mntid == 0)
331 ++xxxfs_mntid;
332 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
333 tfsid.val[1] = mtype;
334 if (!TAILQ_EMPTY(&mountlist)) {
335 while (vfs_getvfs(&tfsid)) {
336 tfsid.val[0]++;
337 xxxfs_mntid++;
338 }
339 }
340 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
341 }
342
343 /*
344 * Set vnode attributes to VNOVAL
345 */
346 void
vattr_null(struct vattr * vap)347 vattr_null(struct vattr *vap)
348 {
349
350 vap->va_type = VNON;
351 /*
352 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
353 * with 2^31-1 instead of 2^64-1. Just write'm out and let
354 * the compiler do its job.
355 */
356 vap->va_mode = VNOVAL;
357 vap->va_nlink = VNOVAL;
358 vap->va_uid = VNOVAL;
359 vap->va_gid = VNOVAL;
360 vap->va_fsid = VNOVAL;
361 vap->va_fileid = VNOVAL;
362 vap->va_size = VNOVAL;
363 vap->va_blocksize = VNOVAL;
364 vap->va_atime.tv_sec = VNOVAL;
365 vap->va_atime.tv_nsec = VNOVAL;
366 vap->va_mtime.tv_sec = VNOVAL;
367 vap->va_mtime.tv_nsec = VNOVAL;
368 vap->va_ctime.tv_sec = VNOVAL;
369 vap->va_ctime.tv_nsec = VNOVAL;
370 vap->va_gen = VNOVAL;
371 vap->va_flags = VNOVAL;
372 vap->va_rdev = VNOVAL;
373 vap->va_bytes = VNOVAL;
374 vap->va_filerev = VNOVAL;
375 vap->va_vaflags = 0;
376 }
377
378 /*
379 * Routines having to do with the management of the vnode table.
380 */
381 long numvnodes;
382
383 /*
384 * Return the next vnode from the free list.
385 */
386 int
getnewvnode(enum vtagtype tag,struct mount * mp,const struct vops * vops,struct vnode ** vpp)387 getnewvnode(enum vtagtype tag, struct mount *mp, const struct vops *vops,
388 struct vnode **vpp)
389 {
390 struct proc *p = curproc;
391 struct freelst *listhd;
392 static int toggle;
393 struct vnode *vp;
394 int s;
395
396 /*
397 * allow maxvnodes to increase if the buffer cache itself
398 * is big enough to justify it. (we don't shrink it ever)
399 */
400 maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
401 : maxvnodes;
402
403 /*
404 * We must choose whether to allocate a new vnode or recycle an
405 * existing one. The criterion for allocating a new one is that
406 * the total number of vnodes is less than the number desired or
407 * there are no vnodes on either free list. Generally we only
408 * want to recycle vnodes that have no buffers associated with
409 * them, so we look first on the vnode_free_list. If it is empty,
410 * we next consider vnodes with referencing buffers on the
411 * vnode_hold_list. The toggle ensures that half the time we
412 * will use a buffer from the vnode_hold_list, and half the time
413 * we will allocate a new one unless the list has grown to twice
414 * the desired size. We are reticent to recycle vnodes from the
415 * vnode_hold_list because we will lose the identity of all its
416 * referencing buffers.
417 */
418 toggle ^= 1;
419 if (numvnodes / 2 > maxvnodes)
420 toggle = 0;
421
422 s = splbio();
423 if ((numvnodes < maxvnodes) ||
424 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
425 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
426 splx(s);
427 vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
428 vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
429 vp->v_uvm->u_vnode = vp;
430 uvm_obj_init(&vp->v_uvm->u_obj, &uvm_vnodeops, 0);
431 RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
432 cache_tree_init(&vp->v_nc_tree);
433 TAILQ_INIT(&vp->v_cache_dst);
434 numvnodes++;
435 } else {
436 TAILQ_FOREACH(vp, listhd, v_freelist) {
437 if (VOP_ISLOCKED(vp) == 0)
438 break;
439 }
440 /*
441 * Unless this is a bad time of the month, at most
442 * the first NCPUS items on the free list are
443 * locked, so this is close enough to being empty.
444 */
445 if (vp == NULL) {
446 splx(s);
447 tablefull("vnode");
448 *vpp = NULL;
449 return (ENFILE);
450 }
451
452 #ifdef DIAGNOSTIC
453 if (vp->v_usecount) {
454 vprint("free vnode", vp);
455 panic("free vnode isn't");
456 }
457 #endif
458
459 TAILQ_REMOVE(listhd, vp, v_freelist);
460 vp->v_bioflag &= ~VBIOONFREELIST;
461 splx(s);
462
463 if (vp->v_type != VBAD)
464 vgonel(vp, p);
465 #ifdef DIAGNOSTIC
466 if (vp->v_data) {
467 vprint("cleaned vnode", vp);
468 panic("cleaned vnode isn't");
469 }
470 s = splbio();
471 if (vp->v_numoutput)
472 panic("Clean vnode has pending I/O's");
473 splx(s);
474 #endif
475 vp->v_flag = 0;
476 vp->v_socket = NULL;
477 }
478 cache_purge(vp);
479 vp->v_type = VNON;
480 vp->v_tag = tag;
481 vp->v_op = vops;
482 insmntque(vp, mp);
483 *vpp = vp;
484 vp->v_usecount = 1;
485 vp->v_data = NULL;
486 return (0);
487 }
488
489 /*
490 * Move a vnode from one mount queue to another.
491 */
492 void
insmntque(struct vnode * vp,struct mount * mp)493 insmntque(struct vnode *vp, struct mount *mp)
494 {
495 /*
496 * Delete from old mount point vnode list, if on one.
497 */
498 if (vp->v_mount != NULL)
499 TAILQ_REMOVE(&vp->v_mount->mnt_vnodelist, vp, v_mntvnodes);
500 /*
501 * Insert into list of vnodes for the new mount point, if available.
502 */
503 if ((vp->v_mount = mp) != NULL)
504 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
505 }
506
507 /*
508 * Create a vnode for a block device.
509 * Used for root filesystem, argdev, and swap areas.
510 * Also used for memory file system special devices.
511 */
512 int
bdevvp(dev_t dev,struct vnode ** vpp)513 bdevvp(dev_t dev, struct vnode **vpp)
514 {
515 return (getdevvp(dev, vpp, VBLK));
516 }
517
518 /*
519 * Create a vnode for a character device.
520 * Used for console handling.
521 */
522 int
cdevvp(dev_t dev,struct vnode ** vpp)523 cdevvp(dev_t dev, struct vnode **vpp)
524 {
525 return (getdevvp(dev, vpp, VCHR));
526 }
527
528 /*
529 * Create a vnode for a device.
530 * Used by bdevvp (block device) for root file system etc.,
531 * and by cdevvp (character device) for console.
532 */
533 int
getdevvp(dev_t dev,struct vnode ** vpp,enum vtype type)534 getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
535 {
536 struct vnode *vp;
537 struct vnode *nvp;
538 int error;
539
540 if (dev == NODEV) {
541 *vpp = NULLVP;
542 return (0);
543 }
544 error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
545 if (error) {
546 *vpp = NULLVP;
547 return (error);
548 }
549 vp = nvp;
550 vp->v_type = type;
551 if ((nvp = checkalias(vp, dev, NULL)) != NULL) {
552 vput(vp);
553 vp = nvp;
554 }
555 if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
556 vp->v_flag |= VISTTY;
557 *vpp = vp;
558 return (0);
559 }
560
561 /*
562 * Check to see if the new vnode represents a special device
563 * for which we already have a vnode (either because of
564 * bdevvp() or because of a different vnode representing
565 * the same block device). If such an alias exists, deallocate
566 * the existing contents and return the aliased vnode. The
567 * caller is responsible for filling it with its new contents.
568 */
569 struct vnode *
checkalias(struct vnode * nvp,dev_t nvp_rdev,struct mount * mp)570 checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
571 {
572 struct proc *p = curproc;
573 struct vnode *vp;
574 struct vnodechain *vchain;
575
576 if (nvp->v_type != VBLK && nvp->v_type != VCHR)
577 return (NULLVP);
578
579 vchain = &speclisth[SPECHASH(nvp_rdev)];
580 loop:
581 SLIST_FOREACH(vp, vchain, v_specnext) {
582 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
583 continue;
584 }
585 /*
586 * Alias, but not in use, so flush it out.
587 */
588 if (vp->v_usecount == 0) {
589 vgonel(vp, p);
590 goto loop;
591 }
592 if (vget(vp, LK_EXCLUSIVE)) {
593 goto loop;
594 }
595 break;
596 }
597
598 /*
599 * Common case is actually in the if statement
600 */
601 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
602 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
603 M_WAITOK);
604 nvp->v_rdev = nvp_rdev;
605 nvp->v_hashchain = vchain;
606 nvp->v_specmountpoint = NULL;
607 nvp->v_speclockf = NULL;
608 nvp->v_specbitmap = NULL;
609 if (nvp->v_type == VCHR &&
610 (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
611 (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
612 if (vp != NULLVP)
613 nvp->v_specbitmap = vp->v_specbitmap;
614 else
615 nvp->v_specbitmap = malloc(CLONE_MAPSZ,
616 M_VNODE, M_WAITOK | M_ZERO);
617 }
618 SLIST_INSERT_HEAD(vchain, nvp, v_specnext);
619 if (vp != NULLVP) {
620 nvp->v_flag |= VALIASED;
621 vp->v_flag |= VALIASED;
622 vput(vp);
623 }
624 return (NULLVP);
625 }
626
627 /*
628 * This code is the uncommon case. It is called in case
629 * we found an alias that was VT_NON && vtype of VBLK
630 * This means we found a block device that was created
631 * using bdevvp.
632 * An example of such a vnode is the root partition device vnode
633 * created in ffs_mountroot.
634 *
635 * The vnodes created by bdevvp should not be aliased (why?).
636 */
637
638 VOP_UNLOCK(vp);
639 vclean(vp, 0, p);
640 vp->v_op = nvp->v_op;
641 vp->v_tag = nvp->v_tag;
642 nvp->v_type = VNON;
643 insmntque(vp, mp);
644 return (vp);
645 }
646
647 /*
648 * Grab a particular vnode from the free list, increment its
649 * reference count and lock it. If the vnode lock bit is set,
650 * the vnode is being eliminated in vgone. In that case, we
651 * cannot grab it, so the process is awakened when the
652 * transition is completed, and an error code is returned to
653 * indicate that the vnode is no longer usable, possibly
654 * having been changed to a new file system type.
655 */
656 int
vget(struct vnode * vp,int flags)657 vget(struct vnode *vp, int flags)
658 {
659 int error, s, onfreelist;
660
661 /*
662 * If the vnode is in the process of being cleaned out for
663 * another use, we wait for the cleaning to finish and then
664 * return failure. Cleaning is determined by checking that
665 * the VXLOCK flag is set.
666 */
667 mtx_enter(&vnode_mtx);
668 if (vp->v_lflag & VXLOCK) {
669 if (flags & LK_NOWAIT) {
670 mtx_leave(&vnode_mtx);
671 return (EBUSY);
672 }
673
674 vp->v_lflag |= VXWANT;
675 msleep_nsec(vp, &vnode_mtx, PINOD, "vget", INFSLP);
676 mtx_leave(&vnode_mtx);
677 return (ENOENT);
678 }
679 mtx_leave(&vnode_mtx);
680
681 s = splbio();
682 onfreelist = vp->v_bioflag & VBIOONFREELIST;
683 if (vp->v_usecount == 0 && onfreelist) {
684 if (vp->v_holdcnt > 0)
685 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
686 else
687 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
688 vp->v_bioflag &= ~VBIOONFREELIST;
689 }
690 splx(s);
691
692 vp->v_usecount++;
693 if (flags & LK_TYPE_MASK) {
694 if ((error = vn_lock(vp, flags)) != 0) {
695 vp->v_usecount--;
696 if (vp->v_usecount == 0 && onfreelist)
697 vputonfreelist(vp);
698 }
699 return (error);
700 }
701
702 return (0);
703 }
704
705
706 /* Vnode reference. */
707 void
vref(struct vnode * vp)708 vref(struct vnode *vp)
709 {
710 KERNEL_ASSERT_LOCKED();
711
712 #ifdef DIAGNOSTIC
713 if (vp->v_usecount == 0)
714 panic("vref used where vget required");
715 if (vp->v_type == VNON)
716 panic("vref on a VNON vnode");
717 #endif
718 vp->v_usecount++;
719 }
720
721 void
vputonfreelist(struct vnode * vp)722 vputonfreelist(struct vnode *vp)
723 {
724 int s;
725 struct freelst *lst;
726
727 s = splbio();
728 #ifdef DIAGNOSTIC
729 if (vp->v_usecount != 0)
730 panic("Use count is not zero!");
731
732 /*
733 * If the hold count is still positive, one or many threads could still
734 * be waiting on the vnode lock inside uvn_io().
735 */
736 if (vp->v_holdcnt == 0 && vp->v_lockcount != 0)
737 panic("%s: lock count is not zero", __func__);
738
739 if (vp->v_bioflag & VBIOONFREELIST) {
740 vprint("vnode already on free list: ", vp);
741 panic("vnode already on free list");
742 }
743 #endif
744
745 vp->v_bioflag |= VBIOONFREELIST;
746 vp->v_bioflag &= ~VBIOERROR;
747
748 if (vp->v_holdcnt > 0)
749 lst = &vnode_hold_list;
750 else
751 lst = &vnode_free_list;
752
753 if (vp->v_type == VBAD)
754 TAILQ_INSERT_HEAD(lst, vp, v_freelist);
755 else
756 TAILQ_INSERT_TAIL(lst, vp, v_freelist);
757
758 splx(s);
759 }
760
761 /*
762 * vput(), just unlock and vrele()
763 */
764 void
vput(struct vnode * vp)765 vput(struct vnode *vp)
766 {
767 struct proc *p = curproc;
768 int s;
769
770 #ifdef DIAGNOSTIC
771 if (vp == NULL)
772 panic("vput: null vp");
773 #endif
774
775 #ifdef DIAGNOSTIC
776 if (vp->v_usecount == 0) {
777 vprint("vput: bad ref count", vp);
778 panic("vput: ref cnt");
779 }
780 #endif
781 vp->v_usecount--;
782 KASSERT(vp->v_usecount > 0 || vp->v_uvcount == 0);
783 if (vp->v_usecount > 0) {
784 VOP_UNLOCK(vp);
785 return;
786 }
787
788 #ifdef DIAGNOSTIC
789 if (vp->v_writecount != 0) {
790 vprint("vput: bad writecount", vp);
791 panic("vput: v_writecount != 0");
792 }
793 #endif
794
795 VOP_INACTIVE(vp, p);
796
797 s = splbio();
798 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
799 vputonfreelist(vp);
800 splx(s);
801 }
802
803 /*
804 * Vnode release - use for active VNODES.
805 * If count drops to zero, call inactive routine and return to freelist.
806 * Returns 0 if it did not sleep.
807 */
808 int
vrele(struct vnode * vp)809 vrele(struct vnode *vp)
810 {
811 struct proc *p = curproc;
812 int s;
813
814 #ifdef DIAGNOSTIC
815 if (vp == NULL)
816 panic("vrele: null vp");
817 #endif
818 #ifdef DIAGNOSTIC
819 if (vp->v_usecount == 0) {
820 vprint("vrele: bad ref count", vp);
821 panic("vrele: ref cnt");
822 }
823 #endif
824 vp->v_usecount--;
825 if (vp->v_usecount > 0) {
826 return (0);
827 }
828
829 #ifdef DIAGNOSTIC
830 if (vp->v_writecount != 0) {
831 vprint("vrele: bad writecount", vp);
832 panic("vrele: v_writecount != 0");
833 }
834 #endif
835
836 if (vn_lock(vp, LK_EXCLUSIVE)) {
837 #ifdef DIAGNOSTIC
838 vprint("vrele: cannot lock", vp);
839 #endif
840 return (1);
841 }
842
843 VOP_INACTIVE(vp, p);
844
845 s = splbio();
846 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
847 vputonfreelist(vp);
848 splx(s);
849 return (1);
850 }
851
852 /* Page or buffer structure gets a reference. */
853 void
vhold(struct vnode * vp)854 vhold(struct vnode *vp)
855 {
856 int s;
857
858 s = splbio();
859
860 /*
861 * If it is on the freelist and the hold count is currently
862 * zero, move it to the hold list.
863 */
864 if ((vp->v_bioflag & VBIOONFREELIST) &&
865 vp->v_holdcnt == 0 && vp->v_usecount == 0) {
866 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
867 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
868 }
869 vp->v_holdcnt++;
870
871 splx(s);
872 }
873
874 /* Lose interest in a vnode. */
875 void
vdrop(struct vnode * vp)876 vdrop(struct vnode *vp)
877 {
878 int s;
879
880 s = splbio();
881
882 #ifdef DIAGNOSTIC
883 if (vp->v_holdcnt == 0)
884 panic("vdrop: zero holdcnt");
885 #endif
886
887 vp->v_holdcnt--;
888
889 /*
890 * If it is on the holdlist and the hold count drops to
891 * zero, move it to the free list.
892 */
893 if ((vp->v_bioflag & VBIOONFREELIST) &&
894 vp->v_holdcnt == 0 && vp->v_usecount == 0) {
895 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
896 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
897 }
898
899 splx(s);
900 }
901
902 /*
903 * Remove any vnodes in the vnode table belonging to mount point mp.
904 *
905 * If MNT_NOFORCE is specified, there should not be any active ones,
906 * return error if any are found (nb: this is a user error, not a
907 * system error). If MNT_FORCE is specified, detach any active vnodes
908 * that are found.
909 */
910 #ifdef DEBUG_SYSCTL
911 int busyprt = 0; /* print out busy vnodes */
912 struct ctldebug debug_vfs_busyprt = { "vfs_busyprt", &busyprt };
913 #endif
914
915 int
vfs_mount_foreach_vnode(struct mount * mp,int (* func)(struct vnode *,void *),void * arg)916 vfs_mount_foreach_vnode(struct mount *mp,
917 int (*func)(struct vnode *, void *), void *arg) {
918 struct vnode *vp, *nvp;
919 int error = 0;
920
921 loop:
922 TAILQ_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
923 if (vp->v_mount != mp)
924 goto loop;
925
926 error = func(vp, arg);
927
928 if (error != 0)
929 break;
930 }
931
932 return (error);
933 }
934
935 struct vflush_args {
936 struct vnode *skipvp;
937 int busy;
938 int flags;
939 };
940
941 int
vflush_vnode(struct vnode * vp,void * arg)942 vflush_vnode(struct vnode *vp, void *arg)
943 {
944 struct vflush_args *va = arg;
945 struct proc *p = curproc;
946 int empty, s;
947
948 if (vp == va->skipvp) {
949 return (0);
950 }
951
952 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
953 return (0);
954 }
955
956 /*
957 * If WRITECLOSE is set, only flush out regular file
958 * vnodes open for writing.
959 */
960 if ((va->flags & WRITECLOSE) &&
961 (vp->v_writecount == 0 || vp->v_type != VREG)) {
962 return (0);
963 }
964
965 /*
966 * With v_usecount == 0, all we need to do is clear
967 * out the vnode data structures and we are done.
968 */
969 if (vp->v_usecount == 0) {
970 vgonel(vp, p);
971 return (0);
972 }
973
974 /*
975 * If FORCECLOSE is set, forcibly close the vnode.
976 * For block or character devices, revert to an
977 * anonymous device. For all other files, just kill them.
978 */
979 if (va->flags & FORCECLOSE) {
980 if (vp->v_type != VBLK && vp->v_type != VCHR) {
981 vgonel(vp, p);
982 } else {
983 vclean(vp, 0, p);
984 vp->v_op = &spec_vops;
985 insmntque(vp, NULL);
986 }
987 return (0);
988 }
989
990 /*
991 * If set, this is allowed to ignore vnodes which don't
992 * have changes pending to disk.
993 * XXX Might be nice to check per-fs "inode" flags, but
994 * generally the filesystem is sync'd already, right?
995 */
996 s = splbio();
997 empty = (va->flags & IGNORECLEAN) && LIST_EMPTY(&vp->v_dirtyblkhd);
998 splx(s);
999
1000 if (empty)
1001 return (0);
1002
1003 #if defined(DEBUG_SYSCTL) && (defined(DEBUG) || defined(DIAGNOSTIC))
1004 if (busyprt)
1005 vprint("vflush: busy vnode", vp);
1006 #endif
1007 va->busy++;
1008 return (0);
1009 }
1010
1011 int
vflush(struct mount * mp,struct vnode * skipvp,int flags)1012 vflush(struct mount *mp, struct vnode *skipvp, int flags)
1013 {
1014 struct vflush_args va;
1015 va.skipvp = skipvp;
1016 va.busy = 0;
1017 va.flags = flags;
1018
1019 vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
1020
1021 if (va.busy)
1022 return (EBUSY);
1023 return (0);
1024 }
1025
1026 /*
1027 * Disassociate the underlying file system from a vnode.
1028 */
1029 void
vclean(struct vnode * vp,int flags,struct proc * p)1030 vclean(struct vnode *vp, int flags, struct proc *p)
1031 {
1032 int active, do_wakeup = 0;
1033 int s;
1034
1035 /*
1036 * Check to see if the vnode is in use.
1037 * If so we have to reference it before we clean it out
1038 * so that its count cannot fall to zero and generate a
1039 * race against ourselves to recycle it.
1040 */
1041 if ((active = vp->v_usecount) != 0)
1042 vp->v_usecount++;
1043
1044 /*
1045 * Prevent the vnode from being recycled or
1046 * brought into use while we clean it out.
1047 */
1048 mtx_enter(&vnode_mtx);
1049 if (vp->v_lflag & VXLOCK)
1050 panic("vclean: deadlock");
1051 vp->v_lflag |= VXLOCK;
1052
1053 if (vp->v_lockcount > 0) {
1054 /*
1055 * Ensure that any thread currently waiting on the same lock has
1056 * observed that the vnode is about to be exclusively locked
1057 * before continuing.
1058 */
1059 msleep_nsec(&vp->v_lockcount, &vnode_mtx, PINOD, "vop_lock",
1060 INFSLP);
1061 KASSERT(vp->v_lockcount == 0);
1062 }
1063 mtx_leave(&vnode_mtx);
1064
1065 /*
1066 * Even if the count is zero, the VOP_INACTIVE routine may still
1067 * have the object locked while it cleans it out. The VOP_LOCK
1068 * ensures that the VOP_INACTIVE routine is done with its work.
1069 * For active vnodes, it ensures that no other activity can
1070 * occur while the underlying object is being cleaned out.
1071 */
1072 VOP_LOCK(vp, LK_EXCLUSIVE | LK_DRAIN);
1073
1074 /*
1075 * Clean out any VM data associated with the vnode.
1076 */
1077 uvm_vnp_terminate(vp);
1078 /*
1079 * Clean out any buffers associated with the vnode.
1080 */
1081 if (flags & DOCLOSE)
1082 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, INFSLP);
1083 /*
1084 * If purging an active vnode, it must be closed and
1085 * deactivated before being reclaimed. Note that the
1086 * VOP_INACTIVE will unlock the vnode
1087 */
1088 if (active) {
1089 if (flags & DOCLOSE)
1090 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1091 VOP_INACTIVE(vp, p);
1092 } else {
1093 /*
1094 * Any other processes trying to obtain this lock must first
1095 * wait for VXLOCK to clear, then call the new lock operation.
1096 */
1097 VOP_UNLOCK(vp);
1098 }
1099
1100 /*
1101 * Reclaim the vnode.
1102 */
1103 if (VOP_RECLAIM(vp, p))
1104 panic("vclean: cannot reclaim");
1105 if (active) {
1106 vp->v_usecount--;
1107 if (vp->v_usecount == 0) {
1108 s = splbio();
1109 if (vp->v_holdcnt > 0)
1110 panic("vclean: not clean");
1111 vputonfreelist(vp);
1112 splx(s);
1113 }
1114 }
1115 cache_purge(vp);
1116
1117 /*
1118 * Done with purge, notify sleepers of the grim news.
1119 */
1120 vp->v_op = &dead_vops;
1121 VN_KNOTE(vp, NOTE_REVOKE);
1122 vp->v_tag = VT_NON;
1123 #ifdef VFSLCKDEBUG
1124 vp->v_flag &= ~VLOCKSWORK;
1125 #endif
1126 mtx_enter(&vnode_mtx);
1127 vp->v_lflag &= ~VXLOCK;
1128 if (vp->v_lflag & VXWANT) {
1129 vp->v_lflag &= ~VXWANT;
1130 do_wakeup = 1;
1131 }
1132 mtx_leave(&vnode_mtx);
1133 if (do_wakeup)
1134 wakeup(vp);
1135 }
1136
1137 /*
1138 * Recycle an unused vnode to the front of the free list.
1139 */
1140 int
vrecycle(struct vnode * vp,struct proc * p)1141 vrecycle(struct vnode *vp, struct proc *p)
1142 {
1143 if (vp->v_usecount == 0) {
1144 vgonel(vp, p);
1145 return (1);
1146 }
1147 return (0);
1148 }
1149
1150 /*
1151 * Eliminate all activity associated with a vnode
1152 * in preparation for reuse.
1153 */
1154 void
vgone(struct vnode * vp)1155 vgone(struct vnode *vp)
1156 {
1157 struct proc *p = curproc;
1158 vgonel(vp, p);
1159 }
1160
1161 /*
1162 * vgone, with struct proc.
1163 */
1164 void
vgonel(struct vnode * vp,struct proc * p)1165 vgonel(struct vnode *vp, struct proc *p)
1166 {
1167 struct vnode *vq;
1168 struct vnode *vx;
1169 int s;
1170
1171 KASSERT(vp->v_uvcount == 0);
1172
1173 /*
1174 * If a vgone (or vclean) is already in progress,
1175 * wait until it is done and return.
1176 */
1177 mtx_enter(&vnode_mtx);
1178 if (vp->v_lflag & VXLOCK) {
1179 vp->v_lflag |= VXWANT;
1180 msleep_nsec(vp, &vnode_mtx, PINOD, "vgone", INFSLP);
1181 mtx_leave(&vnode_mtx);
1182 return;
1183 }
1184 mtx_leave(&vnode_mtx);
1185
1186 /*
1187 * Clean out the filesystem specific data.
1188 */
1189 vclean(vp, DOCLOSE, p);
1190 /*
1191 * Delete from old mount point vnode list, if on one.
1192 */
1193 if (vp->v_mount != NULL)
1194 insmntque(vp, NULL);
1195 /*
1196 * If special device, remove it from special device alias list
1197 * if it is on one.
1198 */
1199 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
1200 vp->v_specinfo != NULL) {
1201 if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1202 (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1203 (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1204 free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1205 }
1206 SLIST_REMOVE(vp->v_hashchain, vp, vnode, v_specnext);
1207 if (vp->v_flag & VALIASED) {
1208 vx = NULL;
1209 SLIST_FOREACH(vq, vp->v_hashchain, v_specnext) {
1210 if (vq->v_rdev != vp->v_rdev ||
1211 vq->v_type != vp->v_type)
1212 continue;
1213 if (vx)
1214 break;
1215 vx = vq;
1216 }
1217 if (vx == NULL)
1218 panic("missing alias");
1219 if (vq == NULL)
1220 vx->v_flag &= ~VALIASED;
1221 vp->v_flag &= ~VALIASED;
1222 }
1223 lf_purgelocks(&vp->v_speclockf);
1224 free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1225 vp->v_specinfo = NULL;
1226 }
1227 /*
1228 * If it is on the freelist and not already at the head,
1229 * move it to the head of the list.
1230 */
1231 vp->v_type = VBAD;
1232
1233 /*
1234 * Move onto the free list, unless we were called from
1235 * getnewvnode and we're not on any free list
1236 */
1237 s = splbio();
1238 if (vp->v_usecount == 0 &&
1239 (vp->v_bioflag & VBIOONFREELIST)) {
1240 if (vp->v_holdcnt > 0)
1241 panic("vgonel: not clean");
1242
1243 if (TAILQ_FIRST(&vnode_free_list) != vp) {
1244 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1245 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1246 }
1247 }
1248 splx(s);
1249 }
1250
1251 /*
1252 * Lookup a vnode by device number.
1253 */
1254 int
vfinddev(dev_t dev,enum vtype type,struct vnode ** vpp)1255 vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1256 {
1257 struct vnode *vp;
1258 int rc =0;
1259
1260 SLIST_FOREACH(vp, &speclisth[SPECHASH(dev)], v_specnext) {
1261 if (dev != vp->v_rdev || type != vp->v_type)
1262 continue;
1263 *vpp = vp;
1264 rc = 1;
1265 break;
1266 }
1267 return (rc);
1268 }
1269
1270 /*
1271 * Revoke all the vnodes corresponding to the specified minor number
1272 * range (endpoints inclusive) of the specified major.
1273 */
1274 void
vdevgone(int maj,int minl,int minh,enum vtype type)1275 vdevgone(int maj, int minl, int minh, enum vtype type)
1276 {
1277 struct vnode *vp;
1278 int mn;
1279
1280 for (mn = minl; mn <= minh; mn++)
1281 if (vfinddev(makedev(maj, mn), type, &vp))
1282 VOP_REVOKE(vp, REVOKEALL);
1283 }
1284
1285 /*
1286 * Calculate the total number of references to a special device.
1287 */
1288 int
vcount(struct vnode * vp)1289 vcount(struct vnode *vp)
1290 {
1291 struct vnode *vq;
1292 int count;
1293
1294 loop:
1295 if ((vp->v_flag & VALIASED) == 0)
1296 return (vp->v_usecount);
1297 count = 0;
1298 SLIST_FOREACH(vq, vp->v_hashchain, v_specnext) {
1299 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1300 continue;
1301 /*
1302 * Alias, but not in use, so flush it out.
1303 */
1304 if (vq->v_usecount == 0 && vq != vp) {
1305 vgone(vq);
1306 goto loop;
1307 }
1308 count += vq->v_usecount;
1309 }
1310 return (count);
1311 }
1312
1313 #if defined(DEBUG) || defined(DIAGNOSTIC)
1314 /*
1315 * Print out a description of a vnode.
1316 */
1317 static char *typename[] =
1318 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1319
1320 void
vprint(char * label,struct vnode * vp)1321 vprint(char *label, struct vnode *vp)
1322 {
1323 char buf[64];
1324
1325 if (label != NULL)
1326 printf("%s: ", label);
1327 printf("%p, type %s, use %u, write %u, hold %u,",
1328 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1329 vp->v_holdcnt);
1330 buf[0] = '\0';
1331 if (vp->v_flag & VROOT)
1332 strlcat(buf, "|VROOT", sizeof buf);
1333 if (vp->v_flag & VTEXT)
1334 strlcat(buf, "|VTEXT", sizeof buf);
1335 if (vp->v_flag & VSYSTEM)
1336 strlcat(buf, "|VSYSTEM", sizeof buf);
1337 if (vp->v_lflag & VXLOCK)
1338 strlcat(buf, "|VXLOCK", sizeof buf);
1339 if (vp->v_lflag & VXWANT)
1340 strlcat(buf, "|VXWANT", sizeof buf);
1341 if (vp->v_bioflag & VBIOWAIT)
1342 strlcat(buf, "|VBIOWAIT", sizeof buf);
1343 if (vp->v_bioflag & VBIOONFREELIST)
1344 strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1345 if (vp->v_bioflag & VBIOONSYNCLIST)
1346 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1347 if (vp->v_flag & VALIASED)
1348 strlcat(buf, "|VALIASED", sizeof buf);
1349 if (buf[0] != '\0')
1350 printf(" flags (%s)", &buf[1]);
1351 if (vp->v_data == NULL) {
1352 printf("\n");
1353 } else {
1354 printf("\n\t");
1355 VOP_PRINT(vp);
1356 }
1357 }
1358 #endif /* DEBUG || DIAGNOSTIC */
1359
1360 #ifdef DEBUG
1361 /*
1362 * List all of the locked vnodes in the system.
1363 * Called when debugging the kernel.
1364 */
1365 void
printlockedvnodes(void)1366 printlockedvnodes(void)
1367 {
1368 struct mount *mp;
1369 struct vnode *vp;
1370
1371 printf("Locked vnodes\n");
1372
1373 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1374 if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1375 continue;
1376 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1377 if (VOP_ISLOCKED(vp))
1378 vprint(NULL, vp);
1379 }
1380 vfs_unbusy(mp);
1381 }
1382
1383 }
1384 #endif
1385
1386 /*
1387 * Top level filesystem related information gathering.
1388 */
1389 int
vfs_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)1390 vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1391 size_t newlen, struct proc *p)
1392 {
1393 struct vfsconf *vfsp, *tmpvfsp;
1394 int ret;
1395
1396 /* all sysctl names at this level are at least name and field */
1397 if (namelen < 2)
1398 return (ENOTDIR); /* overloaded */
1399
1400 if (name[0] != VFS_GENERIC) {
1401 vfsp = vfs_bytypenum(name[0]);
1402 if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1403 return (EOPNOTSUPP);
1404
1405 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1406 oldp, oldlenp, newp, newlen, p));
1407 }
1408
1409 switch (name[1]) {
1410 case VFS_MAXTYPENUM:
1411 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1412
1413 case VFS_CONF:
1414 if (namelen < 3)
1415 return (ENOTDIR); /* overloaded */
1416
1417 vfsp = vfs_bytypenum(name[2]);
1418 if (vfsp == NULL)
1419 return (EOPNOTSUPP);
1420
1421 /* Make a copy, clear out kernel pointers */
1422 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1423 memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1424 tmpvfsp->vfc_vfsops = NULL;
1425
1426 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1427 sizeof(struct vfsconf));
1428
1429 free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1430 return (ret);
1431 case VFS_BCACHESTAT: /* buffer cache statistics */
1432 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1433 sizeof(struct bcachestats));
1434 return(ret);
1435 }
1436 return (EOPNOTSUPP);
1437 }
1438
1439 /*
1440 * Check to see if a filesystem is mounted on a block device.
1441 */
1442 int
vfs_mountedon(struct vnode * vp)1443 vfs_mountedon(struct vnode *vp)
1444 {
1445 struct vnode *vq;
1446 int error = 0;
1447
1448 if (vp->v_specmountpoint != NULL)
1449 return (EBUSY);
1450 if (vp->v_flag & VALIASED) {
1451 SLIST_FOREACH(vq, vp->v_hashchain, v_specnext) {
1452 if (vq->v_rdev != vp->v_rdev ||
1453 vq->v_type != vp->v_type)
1454 continue;
1455 if (vq->v_specmountpoint != NULL) {
1456 error = EBUSY;
1457 break;
1458 }
1459 }
1460 }
1461 return (error);
1462 }
1463
1464 #ifdef NFSSERVER
1465 /*
1466 * Build hash lists of net addresses and hang them off the mount point.
1467 * Called by vfs_export() to set up the lists of export addresses.
1468 */
1469 int
vfs_hang_addrlist(struct mount * mp,struct netexport * nep,struct export_args * argp)1470 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1471 struct export_args *argp)
1472 {
1473 struct netcred *np;
1474 struct radix_node_head *rnh;
1475 int nplen, i;
1476 struct radix_node *rn;
1477 struct sockaddr *saddr, *smask = NULL;
1478 int error;
1479
1480 if (argp->ex_addrlen == 0) {
1481 if (mp->mnt_flag & MNT_DEFEXPORTED)
1482 return (EPERM);
1483 np = &nep->ne_defexported;
1484 /* fill in the kernel's ucred from userspace's xucred */
1485 if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1486 return (error);
1487 mp->mnt_flag |= MNT_DEFEXPORTED;
1488 goto finish;
1489 }
1490 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1491 argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1492 return (EINVAL);
1493 nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1494 np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1495 np->netc_len = nplen;
1496 saddr = (struct sockaddr *)(np + 1);
1497 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1498 if (error)
1499 goto out;
1500 if (saddr->sa_len > argp->ex_addrlen)
1501 saddr->sa_len = argp->ex_addrlen;
1502 if (argp->ex_masklen) {
1503 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1504 error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1505 if (error)
1506 goto out;
1507 if (smask->sa_len > argp->ex_masklen)
1508 smask->sa_len = argp->ex_masklen;
1509 }
1510 /* fill in the kernel's ucred from userspace's xucred */
1511 if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1512 goto out;
1513 i = saddr->sa_family;
1514 switch (i) {
1515 case AF_INET:
1516 if ((rnh = nep->ne_rtable_inet) == NULL) {
1517 if (!rn_inithead((void **)&nep->ne_rtable_inet,
1518 offsetof(struct sockaddr_in, sin_addr))) {
1519 error = ENOBUFS;
1520 goto out;
1521 }
1522 rnh = nep->ne_rtable_inet;
1523 }
1524 break;
1525 default:
1526 error = EINVAL;
1527 goto out;
1528 }
1529 rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1530 if (rn == NULL || np != (struct netcred *)rn) { /* already exists */
1531 error = EPERM;
1532 goto out;
1533 }
1534 finish:
1535 np->netc_exflags = argp->ex_flags;
1536 return (0);
1537 out:
1538 free(np, M_NETADDR, np->netc_len);
1539 return (error);
1540 }
1541
1542 int
vfs_free_netcred(struct radix_node * rn,void * w,u_int id)1543 vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1544 {
1545 struct radix_node_head *rnh = (struct radix_node_head *)w;
1546 struct netcred * np = (struct netcred *)rn;
1547
1548 rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1549 free(np, M_NETADDR, np->netc_len);
1550 return (0);
1551 }
1552
1553 /*
1554 * Free the net address hash lists that are hanging off the mount points.
1555 */
1556 void
vfs_free_addrlist(struct netexport * nep)1557 vfs_free_addrlist(struct netexport *nep)
1558 {
1559 struct radix_node_head *rnh;
1560
1561 if ((rnh = nep->ne_rtable_inet) != NULL) {
1562 rn_walktree(rnh, vfs_free_netcred, rnh);
1563 free(rnh, M_RTABLE, sizeof(*rnh));
1564 nep->ne_rtable_inet = NULL;
1565 }
1566 }
1567 #endif /* NFSSERVER */
1568
1569 int
vfs_export(struct mount * mp,struct netexport * nep,struct export_args * argp)1570 vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1571 {
1572 #ifdef NFSSERVER
1573 int error;
1574
1575 if (argp->ex_flags & MNT_DELEXPORT) {
1576 vfs_free_addrlist(nep);
1577 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1578 }
1579 if (argp->ex_flags & MNT_EXPORTED) {
1580 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1581 return (error);
1582 mp->mnt_flag |= MNT_EXPORTED;
1583 }
1584 return (0);
1585 #else
1586 return (ENOTSUP);
1587 #endif /* NFSSERVER */
1588 }
1589
1590 struct netcred *
vfs_export_lookup(struct mount * mp,struct netexport * nep,struct mbuf * nam)1591 vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1592 {
1593 #ifdef NFSSERVER
1594 struct netcred *np;
1595 struct radix_node_head *rnh;
1596 struct sockaddr *saddr;
1597
1598 np = NULL;
1599 if (mp->mnt_flag & MNT_EXPORTED) {
1600 /*
1601 * Lookup in the export list first.
1602 */
1603 if (nam != NULL) {
1604 saddr = mtod(nam, struct sockaddr *);
1605 switch(saddr->sa_family) {
1606 case AF_INET:
1607 rnh = nep->ne_rtable_inet;
1608 break;
1609 default:
1610 rnh = NULL;
1611 break;
1612 }
1613 if (rnh != NULL)
1614 np = (struct netcred *)rn_match(saddr, rnh);
1615 }
1616 /*
1617 * If no address match, use the default if it exists.
1618 */
1619 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1620 np = &nep->ne_defexported;
1621 }
1622 return (np);
1623 #else
1624 return (NULL);
1625 #endif /* NFSSERVER */
1626 }
1627
1628 /*
1629 * Do the usual access checking.
1630 * file_mode, uid and gid are from the vnode in question,
1631 * while acc_mode and cred are from the VOP_ACCESS parameter list
1632 */
1633 int
vaccess(enum vtype type,mode_t file_mode,uid_t uid,gid_t gid,mode_t acc_mode,struct ucred * cred)1634 vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1635 mode_t acc_mode, struct ucred *cred)
1636 {
1637 mode_t mask;
1638
1639 /* User id 0 always gets read/write access. */
1640 if (cred->cr_uid == 0) {
1641 /* For VEXEC, at least one of the execute bits must be set. */
1642 if ((acc_mode & VEXEC) && type != VDIR &&
1643 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1644 return EACCES;
1645 return 0;
1646 }
1647
1648 mask = 0;
1649
1650 /* Otherwise, check the owner. */
1651 if (cred->cr_uid == uid) {
1652 if (acc_mode & VEXEC)
1653 mask |= S_IXUSR;
1654 if (acc_mode & VREAD)
1655 mask |= S_IRUSR;
1656 if (acc_mode & VWRITE)
1657 mask |= S_IWUSR;
1658 return (file_mode & mask) == mask ? 0 : EACCES;
1659 }
1660
1661 /* Otherwise, check the groups. */
1662 if (groupmember(gid, cred)) {
1663 if (acc_mode & VEXEC)
1664 mask |= S_IXGRP;
1665 if (acc_mode & VREAD)
1666 mask |= S_IRGRP;
1667 if (acc_mode & VWRITE)
1668 mask |= S_IWGRP;
1669 return (file_mode & mask) == mask ? 0 : EACCES;
1670 }
1671
1672 /* Otherwise, check everyone else. */
1673 if (acc_mode & VEXEC)
1674 mask |= S_IXOTH;
1675 if (acc_mode & VREAD)
1676 mask |= S_IROTH;
1677 if (acc_mode & VWRITE)
1678 mask |= S_IWOTH;
1679 return (file_mode & mask) == mask ? 0 : EACCES;
1680 }
1681
1682 int
vnoperm(struct vnode * vp)1683 vnoperm(struct vnode *vp)
1684 {
1685 if (vp->v_flag & VROOT || vp->v_mount == NULL)
1686 return 0;
1687
1688 return (vp->v_mount->mnt_flag & MNT_NOPERM);
1689 }
1690
1691 struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall");
1692 unsigned int vfs_stalling = 0;
1693
1694 int
vfs_stall(struct proc * p,int stall)1695 vfs_stall(struct proc *p, int stall)
1696 {
1697 struct mount *mp;
1698 int allerror = 0, error;
1699
1700 if (stall) {
1701 atomic_inc_int(&vfs_stalling);
1702 rw_enter_write(&vfs_stall_lock);
1703 }
1704
1705 /*
1706 * The loop variable mp is protected by vfs_busy() so that it cannot
1707 * be unmounted while VFS_SYNC() sleeps. Traverse forward to keep the
1708 * lock order consistent with dounmount().
1709 */
1710 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1711 if (stall) {
1712 error = vfs_busy(mp, VB_WRITE|VB_WAIT|VB_DUPOK);
1713 if (error) {
1714 printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1715 allerror = error;
1716 continue;
1717 }
1718 uvm_vnp_sync(mp);
1719 error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p);
1720 if (error) {
1721 printf("%s: failed to sync\n",
1722 mp->mnt_stat.f_mntonname);
1723 vfs_unbusy(mp);
1724 allerror = error;
1725 continue;
1726 }
1727 mp->mnt_flag |= MNT_STALLED;
1728 } else {
1729 if (mp->mnt_flag & MNT_STALLED) {
1730 vfs_unbusy(mp);
1731 mp->mnt_flag &= ~MNT_STALLED;
1732 }
1733 }
1734 }
1735
1736 if (!stall) {
1737 rw_exit_write(&vfs_stall_lock);
1738 atomic_dec_int(&vfs_stalling);
1739 }
1740
1741 return (allerror);
1742 }
1743
1744 void
vfs_stall_barrier(void)1745 vfs_stall_barrier(void)
1746 {
1747 if (__predict_false(vfs_stalling)) {
1748 rw_enter_read(&vfs_stall_lock);
1749 rw_exit_read(&vfs_stall_lock);
1750 }
1751 }
1752
1753 /*
1754 * Unmount all file systems.
1755 * We traverse the list in reverse order under the assumption that doing so
1756 * will avoid needing to worry about dependencies.
1757 */
1758 void
vfs_unmountall(void)1759 vfs_unmountall(void)
1760 {
1761 struct mount *mp, *nmp;
1762 int allerror, error, again = 1;
1763
1764 retry:
1765 allerror = 0;
1766 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1767 if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1768 continue;
1769 /* XXX Here is a race, the next pointer is not locked. */
1770 if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1771 printf("unmount of %s failed with error %d\n",
1772 mp->mnt_stat.f_mntonname, error);
1773 allerror = 1;
1774 }
1775 }
1776
1777 if (allerror) {
1778 printf("WARNING: some file systems would not unmount\n");
1779 if (again) {
1780 printf("retrying\n");
1781 again = 0;
1782 goto retry;
1783 }
1784 }
1785 }
1786
1787 /*
1788 * Sync and unmount file systems before shutting down.
1789 */
1790 void
vfs_shutdown(struct proc * p)1791 vfs_shutdown(struct proc *p)
1792 {
1793 #ifdef ACCOUNTING
1794 acct_shutdown();
1795 #endif
1796
1797 printf("syncing disks...");
1798
1799 if (panicstr == NULL) {
1800 /* Sync before unmount, in case we hang on something. */
1801 sys_sync(p, NULL, NULL);
1802 vfs_unmountall();
1803 }
1804
1805 #if NSOFTRAID > 0
1806 sr_quiesce();
1807 #endif
1808
1809 if (vfs_syncwait(p, 1))
1810 printf(" giving up\n");
1811 else
1812 printf(" done\n");
1813 }
1814
1815 /*
1816 * perform sync() operation and wait for buffers to flush.
1817 */
1818 int
vfs_syncwait(struct proc * p,int verbose)1819 vfs_syncwait(struct proc *p, int verbose)
1820 {
1821 struct buf *bp;
1822 int iter, nbusy, dcount, s;
1823 #ifdef MULTIPROCESSOR
1824 int hold_count;
1825 #endif
1826
1827 sys_sync(p, NULL, NULL);
1828
1829 /* Wait for sync to finish. */
1830 dcount = 10000;
1831 for (iter = 0; iter < 20; iter++) {
1832 nbusy = 0;
1833 LIST_FOREACH(bp, &bufhead, b_list) {
1834 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1835 nbusy++;
1836 /*
1837 * With soft updates, some buffers that are
1838 * written will be remarked as dirty until other
1839 * buffers are written.
1840 *
1841 * XXX here be dragons. this should really go away
1842 * but should be carefully made to go away on it's
1843 * own with testing.. XXX
1844 */
1845 if (bp->b_flags & B_DELWRI) {
1846 s = splbio();
1847 bremfree(bp);
1848 buf_acquire(bp);
1849 splx(s);
1850 nbusy++;
1851 bawrite(bp);
1852 if (dcount-- <= 0) {
1853 if (verbose)
1854 printf("softdep ");
1855 return 1;
1856 }
1857 }
1858 }
1859 if (nbusy == 0)
1860 break;
1861 if (verbose)
1862 printf("%d ", nbusy);
1863 #ifdef MULTIPROCESSOR
1864 if (_kernel_lock_held())
1865 hold_count = __mp_release_all(&kernel_lock);
1866 else
1867 hold_count = 0;
1868 #endif
1869 DELAY(40000 * iter);
1870 #ifdef MULTIPROCESSOR
1871 if (hold_count)
1872 __mp_acquire_count(&kernel_lock, hold_count);
1873 #endif
1874 }
1875
1876 return nbusy;
1877 }
1878
1879 /*
1880 * posix file system related system variables.
1881 */
1882 int
fs_posix_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)1883 fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1884 void *newp, size_t newlen, struct proc *p)
1885 {
1886 /* all sysctl names at this level are terminal */
1887 if (namelen != 1)
1888 return (ENOTDIR);
1889
1890 switch (name[0]) {
1891 case FS_POSIX_SETUID:
1892 return (sysctl_securelevel_int(oldp, oldlenp, newp, newlen,
1893 &suid_clear));
1894 default:
1895 return (EOPNOTSUPP);
1896 }
1897 /* NOTREACHED */
1898 }
1899
1900 /*
1901 * file system related system variables.
1902 */
1903 int
fs_sysctl(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,struct proc * p)1904 fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1905 size_t newlen, struct proc *p)
1906 {
1907 sysctlfn *fn;
1908
1909 switch (name[0]) {
1910 case FS_POSIX:
1911 fn = fs_posix_sysctl;
1912 break;
1913 default:
1914 return (EOPNOTSUPP);
1915 }
1916 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1917 }
1918
1919
1920 /*
1921 * Routines dealing with vnodes and buffers
1922 */
1923
1924 /*
1925 * Wait for all outstanding I/Os to complete
1926 *
1927 * Manipulates v_numoutput. Must be called at splbio()
1928 */
1929 int
vwaitforio(struct vnode * vp,int slpflag,char * wmesg,uint64_t timeo)1930 vwaitforio(struct vnode *vp, int slpflag, char *wmesg, uint64_t timeo)
1931 {
1932 int error = 0;
1933
1934 splassert(IPL_BIO);
1935
1936 while (vp->v_numoutput) {
1937 vp->v_bioflag |= VBIOWAIT;
1938 error = tsleep_nsec(&vp->v_numoutput,
1939 slpflag | (PRIBIO + 1), wmesg, timeo);
1940 if (error)
1941 break;
1942 }
1943
1944 return (error);
1945 }
1946
1947 /*
1948 * Update outstanding I/O count and do wakeup if requested.
1949 *
1950 * Manipulates v_numoutput. Must be called at splbio()
1951 */
1952 void
vwakeup(struct vnode * vp)1953 vwakeup(struct vnode *vp)
1954 {
1955 splassert(IPL_BIO);
1956
1957 if (vp != NULL) {
1958 if (vp->v_numoutput-- == 0)
1959 panic("vwakeup: neg numoutput");
1960 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1961 vp->v_bioflag &= ~VBIOWAIT;
1962 wakeup(&vp->v_numoutput);
1963 }
1964 }
1965 }
1966
1967 /*
1968 * Flush out and invalidate all buffers associated with a vnode.
1969 * Called with the underlying object locked.
1970 */
1971 int
vinvalbuf(struct vnode * vp,int flags,struct ucred * cred,struct proc * p,int slpflag,uint64_t slptimeo)1972 vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1973 int slpflag, uint64_t slptimeo)
1974 {
1975 struct buf *bp;
1976 struct buf *nbp, *blist;
1977 int s, error;
1978
1979 #ifdef VFSLCKDEBUG
1980 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1981 panic("%s: vp isn't locked, vp %p", __func__, vp);
1982 #endif
1983
1984 if (flags & V_SAVE) {
1985 s = splbio();
1986 vwaitforio(vp, 0, "vinvalbuf", INFSLP);
1987 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1988 splx(s);
1989 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1990 return (error);
1991 s = splbio();
1992 if (vp->v_numoutput > 0 ||
1993 !LIST_EMPTY(&vp->v_dirtyblkhd))
1994 panic("%s: dirty bufs, vp %p", __func__, vp);
1995 }
1996 splx(s);
1997 }
1998 loop:
1999 s = splbio();
2000 for (;;) {
2001 int count = 0;
2002 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
2003 (flags & V_SAVEMETA))
2004 while (blist && blist->b_lblkno < 0)
2005 blist = LIST_NEXT(blist, b_vnbufs);
2006 if (blist == NULL &&
2007 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
2008 (flags & V_SAVEMETA))
2009 while (blist && blist->b_lblkno < 0)
2010 blist = LIST_NEXT(blist, b_vnbufs);
2011 if (!blist)
2012 break;
2013
2014 for (bp = blist; bp; bp = nbp) {
2015 nbp = LIST_NEXT(bp, b_vnbufs);
2016 if (flags & V_SAVEMETA && bp->b_lblkno < 0)
2017 continue;
2018 if (bp->b_flags & B_BUSY) {
2019 bp->b_flags |= B_WANTED;
2020 error = tsleep_nsec(bp, slpflag | (PRIBIO + 1),
2021 "vinvalbuf", slptimeo);
2022 if (error) {
2023 splx(s);
2024 return (error);
2025 }
2026 break;
2027 }
2028 bremfree(bp);
2029 /*
2030 * XXX Since there are no node locks for NFS, I believe
2031 * there is a slight chance that a delayed write will
2032 * occur while sleeping just above, so check for it.
2033 */
2034 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
2035 buf_acquire(bp);
2036 splx(s);
2037 (void) VOP_BWRITE(bp);
2038 goto loop;
2039 }
2040 buf_acquire_nomap(bp);
2041 bp->b_flags |= B_INVAL;
2042 brelse(bp);
2043 count++;
2044 /*
2045 * XXX Temporary workaround XXX
2046 *
2047 * If this is a gigantisch vnode and we are
2048 * trashing a ton of buffers, drop the lock
2049 * and yield every so often. The longer term
2050 * fix is to add a separate list for these
2051 * invalid buffers so we don't have to do the
2052 * work to free these here.
2053 */
2054 if (count > 100) {
2055 splx(s);
2056 sched_pause(yield);
2057 goto loop;
2058 }
2059 }
2060 }
2061 if (!(flags & V_SAVEMETA) &&
2062 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
2063 panic("%s: flush failed, vp %p", __func__, vp);
2064 splx(s);
2065 return (0);
2066 }
2067
2068 void
vflushbuf(struct vnode * vp,int sync)2069 vflushbuf(struct vnode *vp, int sync)
2070 {
2071 struct buf *bp, *nbp;
2072 int s;
2073
2074 loop:
2075 s = splbio();
2076 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
2077 if ((bp->b_flags & B_BUSY))
2078 continue;
2079 if ((bp->b_flags & B_DELWRI) == 0)
2080 panic("vflushbuf: not dirty");
2081 bremfree(bp);
2082 buf_acquire(bp);
2083 splx(s);
2084 /*
2085 * Wait for I/O associated with indirect blocks to complete,
2086 * since there is no way to quickly wait for them below.
2087 */
2088 if (bp->b_vp == vp || sync == 0)
2089 (void) bawrite(bp);
2090 else
2091 (void) bwrite(bp);
2092 goto loop;
2093 }
2094 if (sync == 0) {
2095 splx(s);
2096 return;
2097 }
2098 vwaitforio(vp, 0, "vflushbuf", INFSLP);
2099 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2100 splx(s);
2101 #ifdef DIAGNOSTIC
2102 vprint("vflushbuf: dirty", vp);
2103 #endif
2104 goto loop;
2105 }
2106 splx(s);
2107 }
2108
2109 /*
2110 * Associate a buffer with a vnode.
2111 *
2112 * Manipulates buffer vnode queues. Must be called at splbio().
2113 */
2114 void
bgetvp(struct vnode * vp,struct buf * bp)2115 bgetvp(struct vnode *vp, struct buf *bp)
2116 {
2117 splassert(IPL_BIO);
2118
2119
2120 if (bp->b_vp)
2121 panic("bgetvp: not free");
2122 vhold(vp);
2123 bp->b_vp = vp;
2124 if (vp->v_type == VBLK || vp->v_type == VCHR)
2125 bp->b_dev = vp->v_rdev;
2126 else
2127 bp->b_dev = NODEV;
2128 /*
2129 * Insert onto list for new vnode.
2130 */
2131 bufinsvn(bp, &vp->v_cleanblkhd);
2132 }
2133
2134 /*
2135 * Disassociate a buffer from a vnode.
2136 *
2137 * Manipulates vnode buffer queues. Must be called at splbio().
2138 */
2139 void
brelvp(struct buf * bp)2140 brelvp(struct buf *bp)
2141 {
2142 struct vnode *vp;
2143
2144 splassert(IPL_BIO);
2145
2146 if ((vp = bp->b_vp) == (struct vnode *) 0)
2147 panic("brelvp: NULL");
2148 /*
2149 * Delete from old vnode list, if on one.
2150 */
2151 if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2152 bufremvn(bp);
2153 if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2154 LIST_EMPTY(&vp->v_dirtyblkhd)) {
2155 vp->v_bioflag &= ~VBIOONSYNCLIST;
2156 LIST_REMOVE(vp, v_synclist);
2157 }
2158 bp->b_vp = NULL;
2159
2160 vdrop(vp);
2161 }
2162
2163 /*
2164 * Replaces the current vnode associated with the buffer, if any,
2165 * with a new vnode.
2166 *
2167 * If an output I/O is pending on the buffer, the old vnode
2168 * I/O count is adjusted.
2169 *
2170 * Ignores vnode buffer queues. Must be called at splbio().
2171 */
2172 void
buf_replacevnode(struct buf * bp,struct vnode * newvp)2173 buf_replacevnode(struct buf *bp, struct vnode *newvp)
2174 {
2175 struct vnode *oldvp = bp->b_vp;
2176
2177 splassert(IPL_BIO);
2178
2179 if (oldvp)
2180 brelvp(bp);
2181
2182 if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2183 newvp->v_numoutput++; /* put it on swapdev */
2184 vwakeup(oldvp);
2185 }
2186
2187 bgetvp(newvp, bp);
2188 bufremvn(bp);
2189 }
2190
2191 /*
2192 * Used to assign buffers to the appropriate clean or dirty list on
2193 * the vnode and to add newly dirty vnodes to the appropriate
2194 * filesystem syncer list.
2195 *
2196 * Manipulates vnode buffer queues. Must be called at splbio().
2197 */
2198 void
reassignbuf(struct buf * bp)2199 reassignbuf(struct buf *bp)
2200 {
2201 struct buflists *listheadp;
2202 int delay;
2203 struct vnode *vp = bp->b_vp;
2204
2205 splassert(IPL_BIO);
2206
2207 /*
2208 * Delete from old vnode list, if on one.
2209 */
2210 if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2211 bufremvn(bp);
2212
2213 /*
2214 * If dirty, put on list of dirty buffers;
2215 * otherwise insert onto list of clean buffers.
2216 */
2217 if ((bp->b_flags & B_DELWRI) == 0) {
2218 listheadp = &vp->v_cleanblkhd;
2219 if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2220 LIST_EMPTY(&vp->v_dirtyblkhd)) {
2221 vp->v_bioflag &= ~VBIOONSYNCLIST;
2222 LIST_REMOVE(vp, v_synclist);
2223 }
2224 } else {
2225 listheadp = &vp->v_dirtyblkhd;
2226 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2227 switch (vp->v_type) {
2228 case VDIR:
2229 delay = syncdelay / 2;
2230 break;
2231 case VBLK:
2232 if (vp->v_specmountpoint != NULL) {
2233 delay = syncdelay / 3;
2234 break;
2235 }
2236 /* FALLTHROUGH */
2237 default:
2238 delay = syncdelay;
2239 }
2240 vn_syncer_add_to_worklist(vp, delay);
2241 }
2242 }
2243 bufinsvn(bp, listheadp);
2244 }
2245
2246 #ifdef DDB
2247 #include <machine/db_machdep.h>
2248 #include <ddb/db_interface.h>
2249
2250 void
vfs_buf_print(void * b,int full,int (* pr)(const char *,...))2251 vfs_buf_print(void *b, int full,
2252 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2253 {
2254 struct buf *bp = b;
2255
2256 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2257 " proc %p error %d flags %lb\n",
2258 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2259 bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2260
2261 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2262 " data %p saveaddr %p iodone %p\n",
2263 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2264 bp->b_data, bp->b_saveaddr,
2265 bp->b_iodone);
2266
2267 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2268 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2269
2270 }
2271
2272 const char *vtypes[] = { VTYPE_NAMES };
2273 const char *vtags[] = { VTAG_NAMES };
2274
2275 void
vfs_vnode_print(void * v,int full,int (* pr)(const char *,...))2276 vfs_vnode_print(void *v, int full,
2277 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2278 {
2279 struct vnode *vp = v;
2280
2281 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2282 (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2283 vp->v_tag,
2284 (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2285 vp->v_type, vp->v_mount, vp->v_mountedhere);
2286
2287 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2288 vp->v_data, vp->v_usecount, vp->v_writecount,
2289 vp->v_holdcnt, vp->v_numoutput);
2290
2291 /* uvm_object_printit(&vp->v_uobj, full, pr); */
2292
2293 if (full) {
2294 struct buf *bp;
2295
2296 (*pr)("clean bufs:\n");
2297 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2298 (*pr)(" bp %p\n", bp);
2299 vfs_buf_print(bp, full, pr);
2300 }
2301
2302 (*pr)("dirty bufs:\n");
2303 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2304 (*pr)(" bp %p\n", bp);
2305 vfs_buf_print(bp, full, pr);
2306 }
2307 }
2308 }
2309
2310 void
vfs_mount_print(struct mount * mp,int full,int (* pr)(const char *,...))2311 vfs_mount_print(struct mount *mp, int full,
2312 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2313 {
2314 struct vfsconf *vfc = mp->mnt_vfc;
2315 struct vnode *vp;
2316 int cnt;
2317
2318 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2319 mp->mnt_flag, MNT_BITS,
2320 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2321
2322 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %u flags 0x%x\n",
2323 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2324 vfc->vfc_refcount, vfc->vfc_flags);
2325
2326 (*pr)("statvfs cache: bsize %x iosize %x\n"
2327 "blocks %llu free %llu avail %lld\n",
2328 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2329 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2330
2331 (*pr)(" files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2332 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2333
2334 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2335 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2336 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2337
2338 (*pr)(" syncwrites %llu asyncwrites = %llu\n",
2339 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2340
2341 (*pr)(" syncreads %llu asyncreads = %llu\n",
2342 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2343
2344 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2345 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2346 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2347
2348 (*pr)("locked vnodes:");
2349 /* XXX would take mountlist lock, except ddb has no context */
2350 cnt = 0;
2351 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2352 if (VOP_ISLOCKED(vp)) {
2353 if (cnt == 0)
2354 (*pr)("\n %p", vp);
2355 else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2356 (*pr)(",\n %p", vp);
2357 else
2358 (*pr)(", %p", vp);
2359 cnt++;
2360 }
2361 }
2362 (*pr)("\n");
2363
2364 if (full) {
2365 (*pr)("all vnodes:");
2366 /* XXX would take mountlist lock, except ddb has no context */
2367 cnt = 0;
2368 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2369 if (cnt == 0)
2370 (*pr)("\n %p", vp);
2371 else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2372 (*pr)(",\n %p", vp);
2373 else
2374 (*pr)(", %p", vp);
2375 cnt++;
2376 }
2377 (*pr)("\n");
2378 }
2379 }
2380 #endif /* DDB */
2381
2382 void
copy_statfs_info(struct statfs * sbp,const struct mount * mp)2383 copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2384 {
2385 const struct statfs *mbp;
2386
2387 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2388
2389 if (sbp == (mbp = &mp->mnt_stat))
2390 return;
2391
2392 sbp->f_fsid = mbp->f_fsid;
2393 sbp->f_owner = mbp->f_owner;
2394 sbp->f_flags = mbp->f_flags;
2395 sbp->f_syncwrites = mbp->f_syncwrites;
2396 sbp->f_asyncwrites = mbp->f_asyncwrites;
2397 sbp->f_syncreads = mbp->f_syncreads;
2398 sbp->f_asyncreads = mbp->f_asyncreads;
2399 sbp->f_namemax = mbp->f_namemax;
2400 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2401 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2402 memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2403 memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2404 sizeof(union mount_info));
2405 }
2406