xref: /freebsd/sys/kern/vfs_default.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/fcntl.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 #include <sys/dirent.h>
56 #include <sys/poll.h>
57 
58 #include <security/mac/mac_framework.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vnode_pager.h>
68 
69 static int	vop_nolookup(struct vop_lookup_args *);
70 static int	vop_norename(struct vop_rename_args *);
71 static int	vop_nostrategy(struct vop_strategy_args *);
72 static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
73 				char *dirbuf, int dirbuflen, off_t *off,
74 				char **cpos, int *len, int *eofflag,
75 				struct thread *td);
76 static int	dirent_exists(struct vnode *vp, const char *dirname,
77 			      struct thread *td);
78 
79 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
80 
81 /*
82  * This vnode table stores what we want to do if the filesystem doesn't
83  * implement a particular VOP.
84  *
85  * If there is no specific entry here, we will return EOPNOTSUPP.
86  *
87  * Note that every filesystem has to implement either vop_access
88  * or vop_accessx; failing to do so will result in immediate crash
89  * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
90  * which calls vop_stdaccess() etc.
91  */
92 
93 struct vop_vector default_vnodeops = {
94 	.vop_default =		NULL,
95 	.vop_bypass =		VOP_EOPNOTSUPP,
96 
97 	.vop_access =		vop_stdaccess,
98 	.vop_accessx =		vop_stdaccessx,
99 	.vop_advlock =		vop_stdadvlock,
100 	.vop_advlockasync =	vop_stdadvlockasync,
101 	.vop_advlockpurge =	vop_stdadvlockpurge,
102 	.vop_bmap =		vop_stdbmap,
103 	.vop_close =		VOP_NULL,
104 	.vop_fsync =		VOP_NULL,
105 	.vop_getpages =		vop_stdgetpages,
106 	.vop_getwritemount = 	vop_stdgetwritemount,
107 	.vop_inactive =		VOP_NULL,
108 	.vop_ioctl =		VOP_ENOTTY,
109 	.vop_kqfilter =		vop_stdkqfilter,
110 	.vop_islocked =		vop_stdislocked,
111 	.vop_lock1 =		vop_stdlock,
112 	.vop_lookup =		vop_nolookup,
113 	.vop_open =		VOP_NULL,
114 	.vop_pathconf =		VOP_EINVAL,
115 	.vop_poll =		vop_nopoll,
116 	.vop_putpages =		vop_stdputpages,
117 	.vop_readlink =		VOP_EINVAL,
118 	.vop_rename =		vop_norename,
119 	.vop_revoke =		VOP_PANIC,
120 	.vop_strategy =		vop_nostrategy,
121 	.vop_unlock =		vop_stdunlock,
122 	.vop_vptocnp =		vop_stdvptocnp,
123 	.vop_vptofh =		vop_stdvptofh,
124 };
125 
126 /*
127  * Series of placeholder functions for various error returns for
128  * VOPs.
129  */
130 
131 int
132 vop_eopnotsupp(struct vop_generic_args *ap)
133 {
134 	/*
135 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
136 	*/
137 
138 	return (EOPNOTSUPP);
139 }
140 
141 int
142 vop_ebadf(struct vop_generic_args *ap)
143 {
144 
145 	return (EBADF);
146 }
147 
148 int
149 vop_enotty(struct vop_generic_args *ap)
150 {
151 
152 	return (ENOTTY);
153 }
154 
155 int
156 vop_einval(struct vop_generic_args *ap)
157 {
158 
159 	return (EINVAL);
160 }
161 
162 int
163 vop_enoent(struct vop_generic_args *ap)
164 {
165 
166 	return (ENOENT);
167 }
168 
169 int
170 vop_null(struct vop_generic_args *ap)
171 {
172 
173 	return (0);
174 }
175 
176 /*
177  * Helper function to panic on some bad VOPs in some filesystems.
178  */
179 int
180 vop_panic(struct vop_generic_args *ap)
181 {
182 
183 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
184 }
185 
186 /*
187  * vop_std<something> and vop_no<something> are default functions for use by
188  * filesystems that need the "default reasonable" implementation for a
189  * particular operation.
190  *
191  * The documentation for the operations they implement exists (if it exists)
192  * in the VOP_<SOMETHING>(9) manpage (all uppercase).
193  */
194 
195 /*
196  * Default vop for filesystems that do not support name lookup
197  */
198 static int
199 vop_nolookup(ap)
200 	struct vop_lookup_args /* {
201 		struct vnode *a_dvp;
202 		struct vnode **a_vpp;
203 		struct componentname *a_cnp;
204 	} */ *ap;
205 {
206 
207 	*ap->a_vpp = NULL;
208 	return (ENOTDIR);
209 }
210 
211 /*
212  * vop_norename:
213  *
214  * Handle unlock and reference counting for arguments of vop_rename
215  * for filesystems that do not implement rename operation.
216  */
217 static int
218 vop_norename(struct vop_rename_args *ap)
219 {
220 
221 	vop_rename_fail(ap);
222 	return (EOPNOTSUPP);
223 }
224 
225 /*
226  *	vop_nostrategy:
227  *
228  *	Strategy routine for VFS devices that have none.
229  *
230  *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
231  *	routine.  Typically this is done for a BIO_READ strategy call.
232  *	Typically B_INVAL is assumed to already be clear prior to a write
233  *	and should not be cleared manually unless you just made the buffer
234  *	invalid.  BIO_ERROR should be cleared either way.
235  */
236 
237 static int
238 vop_nostrategy (struct vop_strategy_args *ap)
239 {
240 	printf("No strategy for buffer at %p\n", ap->a_bp);
241 	vprint("vnode", ap->a_vp);
242 	ap->a_bp->b_ioflags |= BIO_ERROR;
243 	ap->a_bp->b_error = EOPNOTSUPP;
244 	bufdone(ap->a_bp);
245 	return (EOPNOTSUPP);
246 }
247 
248 static int
249 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
250 		int dirbuflen, off_t *off, char **cpos, int *len,
251 		int *eofflag, struct thread *td)
252 {
253 	int error, reclen;
254 	struct uio uio;
255 	struct iovec iov;
256 	struct dirent *dp;
257 
258 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
259 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
260 
261 	if (*len == 0) {
262 		iov.iov_base = dirbuf;
263 		iov.iov_len = dirbuflen;
264 
265 		uio.uio_iov = &iov;
266 		uio.uio_iovcnt = 1;
267 		uio.uio_offset = *off;
268 		uio.uio_resid = dirbuflen;
269 		uio.uio_segflg = UIO_SYSSPACE;
270 		uio.uio_rw = UIO_READ;
271 		uio.uio_td = td;
272 
273 		*eofflag = 0;
274 
275 #ifdef MAC
276 		error = mac_vnode_check_readdir(td->td_ucred, vp);
277 		if (error == 0)
278 #endif
279 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
280 		    		NULL, NULL);
281 		if (error)
282 			return (error);
283 
284 		*off = uio.uio_offset;
285 
286 		*cpos = dirbuf;
287 		*len = (dirbuflen - uio.uio_resid);
288 
289 		if (*len == 0)
290 			return (ENOENT);
291 	}
292 
293 	dp = (struct dirent *)(*cpos);
294 	reclen = dp->d_reclen;
295 	*dpp = dp;
296 
297 	/* check for malformed directory.. */
298 	if (reclen < DIRENT_MINSIZE)
299 		return (EINVAL);
300 
301 	*cpos += reclen;
302 	*len -= reclen;
303 
304 	return (0);
305 }
306 
307 /*
308  * Check if a named file exists in a given directory vnode.
309  */
310 static int
311 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
312 {
313 	char *dirbuf, *cpos;
314 	int error, eofflag, dirbuflen, len, found;
315 	off_t off;
316 	struct dirent *dp;
317 	struct vattr va;
318 
319 	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
320 	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
321 
322 	found = 0;
323 
324 	error = VOP_GETATTR(vp, &va, td->td_ucred);
325 	if (error)
326 		return (found);
327 
328 	dirbuflen = DEV_BSIZE;
329 	if (dirbuflen < va.va_blocksize)
330 		dirbuflen = va.va_blocksize;
331 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
332 
333 	off = 0;
334 	len = 0;
335 	do {
336 		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
337 					&cpos, &len, &eofflag, td);
338 		if (error)
339 			goto out;
340 
341 		if ((dp->d_type != DT_WHT) &&
342 		    !strcmp(dp->d_name, dirname)) {
343 			found = 1;
344 			goto out;
345 		}
346 	} while (len > 0 || !eofflag);
347 
348 out:
349 	free(dirbuf, M_TEMP);
350 	return (found);
351 }
352 
353 int
354 vop_stdaccess(struct vop_access_args *ap)
355 {
356 
357 	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
358 	    VAPPEND)) == 0, ("invalid bit in accmode"));
359 
360 	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
361 }
362 
363 int
364 vop_stdaccessx(struct vop_accessx_args *ap)
365 {
366 	int error;
367 	accmode_t accmode = ap->a_accmode;
368 
369 	error = vfs_unixify_accmode(&accmode);
370 	if (error != 0)
371 		return (error);
372 
373 	if (accmode == 0)
374 		return (0);
375 
376 	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
377 }
378 
379 /*
380  * Advisory record locking support
381  */
382 int
383 vop_stdadvlock(struct vop_advlock_args *ap)
384 {
385 	struct vnode *vp;
386 	struct ucred *cred;
387 	struct vattr vattr;
388 	int error;
389 
390 	vp = ap->a_vp;
391 	cred = curthread->td_ucred;
392 	vn_lock(vp, LK_SHARED | LK_RETRY);
393 	error = VOP_GETATTR(vp, &vattr, cred);
394 	VOP_UNLOCK(vp, 0);
395 	if (error)
396 		return (error);
397 
398 	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
399 }
400 
401 int
402 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
403 {
404 	struct vnode *vp;
405 	struct ucred *cred;
406 	struct vattr vattr;
407 	int error;
408 
409 	vp = ap->a_vp;
410 	cred = curthread->td_ucred;
411 	vn_lock(vp, LK_SHARED | LK_RETRY);
412 	error = VOP_GETATTR(vp, &vattr, cred);
413 	VOP_UNLOCK(vp, 0);
414 	if (error)
415 		return (error);
416 
417 	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
418 }
419 
420 int
421 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
422 {
423 	struct vnode *vp;
424 
425 	vp = ap->a_vp;
426 	lf_purgelocks(vp, &vp->v_lockf);
427 	return (0);
428 }
429 
430 /*
431  * vop_stdpathconf:
432  *
433  * Standard implementation of POSIX pathconf, to get information about limits
434  * for a filesystem.
435  * Override per filesystem for the case where the filesystem has smaller
436  * limits.
437  */
438 int
439 vop_stdpathconf(ap)
440 	struct vop_pathconf_args /* {
441 	struct vnode *a_vp;
442 	int a_name;
443 	int *a_retval;
444 	} */ *ap;
445 {
446 
447 	switch (ap->a_name) {
448 		case _PC_NAME_MAX:
449 			*ap->a_retval = NAME_MAX;
450 			return (0);
451 		case _PC_PATH_MAX:
452 			*ap->a_retval = PATH_MAX;
453 			return (0);
454 		case _PC_LINK_MAX:
455 			*ap->a_retval = LINK_MAX;
456 			return (0);
457 		case _PC_MAX_CANON:
458 			*ap->a_retval = MAX_CANON;
459 			return (0);
460 		case _PC_MAX_INPUT:
461 			*ap->a_retval = MAX_INPUT;
462 			return (0);
463 		case _PC_PIPE_BUF:
464 			*ap->a_retval = PIPE_BUF;
465 			return (0);
466 		case _PC_CHOWN_RESTRICTED:
467 			*ap->a_retval = 1;
468 			return (0);
469 		case _PC_VDISABLE:
470 			*ap->a_retval = _POSIX_VDISABLE;
471 			return (0);
472 		default:
473 			return (EINVAL);
474 	}
475 	/* NOTREACHED */
476 }
477 
478 /*
479  * Standard lock, unlock and islocked functions.
480  */
481 int
482 vop_stdlock(ap)
483 	struct vop_lock1_args /* {
484 		struct vnode *a_vp;
485 		int a_flags;
486 		char *file;
487 		int line;
488 	} */ *ap;
489 {
490 	struct vnode *vp = ap->a_vp;
491 
492 	return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
493 	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
494 	    ap->a_line));
495 }
496 
497 /* See above. */
498 int
499 vop_stdunlock(ap)
500 	struct vop_unlock_args /* {
501 		struct vnode *a_vp;
502 		int a_flags;
503 	} */ *ap;
504 {
505 	struct vnode *vp = ap->a_vp;
506 
507 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
508 }
509 
510 /* See above. */
511 int
512 vop_stdislocked(ap)
513 	struct vop_islocked_args /* {
514 		struct vnode *a_vp;
515 	} */ *ap;
516 {
517 
518 	return (lockstatus(ap->a_vp->v_vnlock));
519 }
520 
521 /*
522  * Return true for select/poll.
523  */
524 int
525 vop_nopoll(ap)
526 	struct vop_poll_args /* {
527 		struct vnode *a_vp;
528 		int  a_events;
529 		struct ucred *a_cred;
530 		struct thread *a_td;
531 	} */ *ap;
532 {
533 
534 	return (poll_no_poll(ap->a_events));
535 }
536 
537 /*
538  * Implement poll for local filesystems that support it.
539  */
540 int
541 vop_stdpoll(ap)
542 	struct vop_poll_args /* {
543 		struct vnode *a_vp;
544 		int  a_events;
545 		struct ucred *a_cred;
546 		struct thread *a_td;
547 	} */ *ap;
548 {
549 	if (ap->a_events & ~POLLSTANDARD)
550 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
551 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
552 }
553 
554 /*
555  * Return our mount point, as we will take charge of the writes.
556  */
557 int
558 vop_stdgetwritemount(ap)
559 	struct vop_getwritemount_args /* {
560 		struct vnode *a_vp;
561 		struct mount **a_mpp;
562 	} */ *ap;
563 {
564 	struct mount *mp;
565 
566 	/*
567 	 * XXX Since this is called unlocked we may be recycled while
568 	 * attempting to ref the mount.  If this is the case or mountpoint
569 	 * will be set to NULL.  We only have to prevent this call from
570 	 * returning with a ref to an incorrect mountpoint.  It is not
571 	 * harmful to return with a ref to our previous mountpoint.
572 	 */
573 	mp = ap->a_vp->v_mount;
574 	if (mp != NULL) {
575 		vfs_ref(mp);
576 		if (mp != ap->a_vp->v_mount) {
577 			vfs_rel(mp);
578 			mp = NULL;
579 		}
580 	}
581 	*(ap->a_mpp) = mp;
582 	return (0);
583 }
584 
585 /* XXX Needs good comment and VOP_BMAP(9) manpage */
586 int
587 vop_stdbmap(ap)
588 	struct vop_bmap_args /* {
589 		struct vnode *a_vp;
590 		daddr_t  a_bn;
591 		struct bufobj **a_bop;
592 		daddr_t *a_bnp;
593 		int *a_runp;
594 		int *a_runb;
595 	} */ *ap;
596 {
597 
598 	if (ap->a_bop != NULL)
599 		*ap->a_bop = &ap->a_vp->v_bufobj;
600 	if (ap->a_bnp != NULL)
601 		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
602 	if (ap->a_runp != NULL)
603 		*ap->a_runp = 0;
604 	if (ap->a_runb != NULL)
605 		*ap->a_runb = 0;
606 	return (0);
607 }
608 
609 int
610 vop_stdfsync(ap)
611 	struct vop_fsync_args /* {
612 		struct vnode *a_vp;
613 		struct ucred *a_cred;
614 		int a_waitfor;
615 		struct thread *a_td;
616 	} */ *ap;
617 {
618 	struct vnode *vp = ap->a_vp;
619 	struct buf *bp;
620 	struct bufobj *bo;
621 	struct buf *nbp;
622 	int error = 0;
623 	int maxretry = 1000;     /* large, arbitrarily chosen */
624 
625 	bo = &vp->v_bufobj;
626 	BO_LOCK(bo);
627 loop1:
628 	/*
629 	 * MARK/SCAN initialization to avoid infinite loops.
630 	 */
631         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
632                 bp->b_vflags &= ~BV_SCANNED;
633 		bp->b_error = 0;
634 	}
635 
636 	/*
637 	 * Flush all dirty buffers associated with a vnode.
638 	 */
639 loop2:
640 	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
641 		if ((bp->b_vflags & BV_SCANNED) != 0)
642 			continue;
643 		bp->b_vflags |= BV_SCANNED;
644 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
645 			continue;
646 		BO_UNLOCK(bo);
647 		KASSERT(bp->b_bufobj == bo,
648 		    ("bp %p wrong b_bufobj %p should be %p",
649 		    bp, bp->b_bufobj, bo));
650 		if ((bp->b_flags & B_DELWRI) == 0)
651 			panic("fsync: not dirty");
652 		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
653 			vfs_bio_awrite(bp);
654 		} else {
655 			bremfree(bp);
656 			bawrite(bp);
657 		}
658 		BO_LOCK(bo);
659 		goto loop2;
660 	}
661 
662 	/*
663 	 * If synchronous the caller expects us to completely resolve all
664 	 * dirty buffers in the system.  Wait for in-progress I/O to
665 	 * complete (which could include background bitmap writes), then
666 	 * retry if dirty blocks still exist.
667 	 */
668 	if (ap->a_waitfor == MNT_WAIT) {
669 		bufobj_wwait(bo, 0, 0);
670 		if (bo->bo_dirty.bv_cnt > 0) {
671 			/*
672 			 * If we are unable to write any of these buffers
673 			 * then we fail now rather than trying endlessly
674 			 * to write them out.
675 			 */
676 			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
677 				if ((error = bp->b_error) == 0)
678 					continue;
679 			if (error == 0 && --maxretry >= 0)
680 				goto loop1;
681 			error = EAGAIN;
682 		}
683 	}
684 	BO_UNLOCK(bo);
685 	if (error == EAGAIN)
686 		vprint("fsync: giving up on dirty", vp);
687 
688 	return (error);
689 }
690 
691 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
692 int
693 vop_stdgetpages(ap)
694 	struct vop_getpages_args /* {
695 		struct vnode *a_vp;
696 		vm_page_t *a_m;
697 		int a_count;
698 		int a_reqpage;
699 		vm_ooffset_t a_offset;
700 	} */ *ap;
701 {
702 
703 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
704 	    ap->a_count, ap->a_reqpage);
705 }
706 
707 int
708 vop_stdkqfilter(struct vop_kqfilter_args *ap)
709 {
710 	return vfs_kqfilter(ap);
711 }
712 
713 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
714 int
715 vop_stdputpages(ap)
716 	struct vop_putpages_args /* {
717 		struct vnode *a_vp;
718 		vm_page_t *a_m;
719 		int a_count;
720 		int a_sync;
721 		int *a_rtvals;
722 		vm_ooffset_t a_offset;
723 	} */ *ap;
724 {
725 
726 	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
727 	     ap->a_sync, ap->a_rtvals);
728 }
729 
730 int
731 vop_stdvptofh(struct vop_vptofh_args *ap)
732 {
733 	return (EOPNOTSUPP);
734 }
735 
736 int
737 vop_stdvptocnp(struct vop_vptocnp_args *ap)
738 {
739 	struct vnode *vp = ap->a_vp;
740 	struct vnode **dvp = ap->a_vpp;
741 	struct ucred *cred = ap->a_cred;
742 	char *buf = ap->a_buf;
743 	int *buflen = ap->a_buflen;
744 	char *dirbuf, *cpos;
745 	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
746 	off_t off;
747 	ino_t fileno;
748 	struct vattr va;
749 	struct nameidata nd;
750 	struct thread *td;
751 	struct dirent *dp;
752 	struct vnode *mvp;
753 
754 	i = *buflen;
755 	error = 0;
756 	covered = 0;
757 	td = curthread;
758 
759 	if (vp->v_type != VDIR)
760 		return (ENOENT);
761 
762 	error = VOP_GETATTR(vp, &va, cred);
763 	if (error)
764 		return (error);
765 
766 	VREF(vp);
767 	locked = VOP_ISLOCKED(vp);
768 	VOP_UNLOCK(vp, 0);
769 	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
770 	    "..", vp, td);
771 	flags = FREAD;
772 	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
773 	if (error) {
774 		vn_lock(vp, locked | LK_RETRY);
775 		return (error);
776 	}
777 	NDFREE(&nd, NDF_ONLY_PNBUF);
778 
779 	mvp = *dvp = nd.ni_vp;
780 
781 	if (vp->v_mount != (*dvp)->v_mount &&
782 	    ((*dvp)->v_vflag & VV_ROOT) &&
783 	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
784 		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
785 		VREF(mvp);
786 		VOP_UNLOCK(mvp, 0);
787 		vn_close(mvp, FREAD, cred, td);
788 		VREF(*dvp);
789 		vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
790 		covered = 1;
791 	}
792 
793 	fileno = va.va_fileid;
794 
795 	dirbuflen = DEV_BSIZE;
796 	if (dirbuflen < va.va_blocksize)
797 		dirbuflen = va.va_blocksize;
798 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
799 
800 	if ((*dvp)->v_type != VDIR) {
801 		error = ENOENT;
802 		goto out;
803 	}
804 
805 	off = 0;
806 	len = 0;
807 	do {
808 		/* call VOP_READDIR of parent */
809 		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
810 					&cpos, &len, &eofflag, td);
811 		if (error)
812 			goto out;
813 
814 		if ((dp->d_type != DT_WHT) &&
815 		    (dp->d_fileno == fileno)) {
816 			if (covered) {
817 				VOP_UNLOCK(*dvp, 0);
818 				vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
819 				if (dirent_exists(mvp, dp->d_name, td)) {
820 					error = ENOENT;
821 					VOP_UNLOCK(mvp, 0);
822 					vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
823 					goto out;
824 				}
825 				VOP_UNLOCK(mvp, 0);
826 				vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
827 			}
828 			i -= dp->d_namlen;
829 
830 			if (i < 0) {
831 				error = ENOMEM;
832 				goto out;
833 			}
834 			bcopy(dp->d_name, buf + i, dp->d_namlen);
835 			error = 0;
836 			goto out;
837 		}
838 	} while (len > 0 || !eofflag);
839 	error = ENOENT;
840 
841 out:
842 	free(dirbuf, M_TEMP);
843 	if (!error) {
844 		*buflen = i;
845 		vhold(*dvp);
846 	}
847 	if (covered) {
848 		vput(*dvp);
849 		vrele(mvp);
850 	} else {
851 		VOP_UNLOCK(mvp, 0);
852 		vn_close(mvp, FREAD, cred, td);
853 	}
854 	vn_lock(vp, locked | LK_RETRY);
855 	return (error);
856 }
857 
858 /*
859  * vfs default ops
860  * used to fill the vfs function table to get reasonable default return values.
861  */
862 int
863 vfs_stdroot (mp, flags, vpp)
864 	struct mount *mp;
865 	int flags;
866 	struct vnode **vpp;
867 {
868 
869 	return (EOPNOTSUPP);
870 }
871 
872 int
873 vfs_stdstatfs (mp, sbp)
874 	struct mount *mp;
875 	struct statfs *sbp;
876 {
877 
878 	return (EOPNOTSUPP);
879 }
880 
881 int
882 vfs_stdquotactl (mp, cmds, uid, arg)
883 	struct mount *mp;
884 	int cmds;
885 	uid_t uid;
886 	void *arg;
887 {
888 
889 	return (EOPNOTSUPP);
890 }
891 
892 int
893 vfs_stdsync(mp, waitfor)
894 	struct mount *mp;
895 	int waitfor;
896 {
897 	struct vnode *vp, *mvp;
898 	struct thread *td;
899 	int error, lockreq, allerror = 0;
900 
901 	td = curthread;
902 	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
903 	if (waitfor != MNT_WAIT)
904 		lockreq |= LK_NOWAIT;
905 	/*
906 	 * Force stale buffer cache information to be flushed.
907 	 */
908 	MNT_ILOCK(mp);
909 loop:
910 	MNT_VNODE_FOREACH(vp, mp, mvp) {
911 		/* bv_cnt is an acceptable race here. */
912 		if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
913 			continue;
914 		VI_LOCK(vp);
915 		MNT_IUNLOCK(mp);
916 		if ((error = vget(vp, lockreq, td)) != 0) {
917 			MNT_ILOCK(mp);
918 			if (error == ENOENT) {
919 				MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
920 				goto loop;
921 			}
922 			continue;
923 		}
924 		error = VOP_FSYNC(vp, waitfor, td);
925 		if (error)
926 			allerror = error;
927 		vput(vp);
928 		MNT_ILOCK(mp);
929 	}
930 	MNT_IUNLOCK(mp);
931 	return (allerror);
932 }
933 
934 int
935 vfs_stdnosync (mp, waitfor)
936 	struct mount *mp;
937 	int waitfor;
938 {
939 
940 	return (0);
941 }
942 
943 int
944 vfs_stdvget (mp, ino, flags, vpp)
945 	struct mount *mp;
946 	ino_t ino;
947 	int flags;
948 	struct vnode **vpp;
949 {
950 
951 	return (EOPNOTSUPP);
952 }
953 
954 int
955 vfs_stdfhtovp (mp, fhp, vpp)
956 	struct mount *mp;
957 	struct fid *fhp;
958 	struct vnode **vpp;
959 {
960 
961 	return (EOPNOTSUPP);
962 }
963 
964 int
965 vfs_stdinit (vfsp)
966 	struct vfsconf *vfsp;
967 {
968 
969 	return (0);
970 }
971 
972 int
973 vfs_stduninit (vfsp)
974 	struct vfsconf *vfsp;
975 {
976 
977 	return(0);
978 }
979 
980 int
981 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
982 	struct mount *mp;
983 	int cmd;
984 	struct vnode *filename_vp;
985 	int attrnamespace;
986 	const char *attrname;
987 {
988 
989 	if (filename_vp != NULL)
990 		VOP_UNLOCK(filename_vp, 0);
991 	return (EOPNOTSUPP);
992 }
993 
994 int
995 vfs_stdsysctl(mp, op, req)
996 	struct mount *mp;
997 	fsctlop_t op;
998 	struct sysctl_req *req;
999 {
1000 
1001 	return (EOPNOTSUPP);
1002 }
1003 
1004 /* end of vfs default ops */
1005