xref: /dragonfly/sys/kern/vfs_default.c (revision 984263bc)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *
39  * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/conf.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/unistd.h>
51 #include <sys/vnode.h>
52 #include <sys/poll.h>
53 
54 #include <machine/limits.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vnode_pager.h>
61 
62 static int	vop_nolookup __P((struct vop_lookup_args *));
63 static int	vop_nostrategy __P((struct vop_strategy_args *));
64 
65 /*
66  * This vnode table stores what we want to do if the filesystem doesn't
67  * implement a particular VOP.
68  *
69  * If there is no specific entry here, we will return EOPNOTSUPP.
70  *
71  */
72 
73 vop_t **default_vnodeop_p;
74 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
75 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
76 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
77 	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
78 	{ &vop_close_desc,		(vop_t *) vop_null },
79 	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
80 	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
81 	{ &vop_fsync_desc,		(vop_t *) vop_null },
82 	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
83 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
84 	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
85 	{ &vop_lease_desc,		(vop_t *) vop_null },
86 	{ &vop_lock_desc,		(vop_t *) vop_nolock },
87 	{ &vop_mmap_desc,		(vop_t *) vop_einval },
88 	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
89 	{ &vop_open_desc,		(vop_t *) vop_null },
90 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
91 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
92 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
93 	{ &vop_reallocblks_desc,	(vop_t *) vop_eopnotsupp },
94 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
95 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
96 	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
97 	{ &vop_getacl_desc,		(vop_t *) vop_eopnotsupp },
98 	{ &vop_setacl_desc,		(vop_t *) vop_eopnotsupp },
99 	{ &vop_aclcheck_desc,		(vop_t *) vop_eopnotsupp },
100 	{ &vop_getextattr_desc,		(vop_t *) vop_eopnotsupp },
101 	{ &vop_setextattr_desc,		(vop_t *) vop_eopnotsupp },
102 	{ NULL, NULL }
103 };
104 
105 static struct vnodeopv_desc default_vnodeop_opv_desc =
106         { &default_vnodeop_p, default_vnodeop_entries };
107 
108 VNODEOP_SET(default_vnodeop_opv_desc);
109 
110 int
111 vop_eopnotsupp(struct vop_generic_args *ap)
112 {
113 	/*
114 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
115 	*/
116 
117 	return (EOPNOTSUPP);
118 }
119 
120 int
121 vop_ebadf(struct vop_generic_args *ap)
122 {
123 
124 	return (EBADF);
125 }
126 
127 int
128 vop_enotty(struct vop_generic_args *ap)
129 {
130 
131 	return (ENOTTY);
132 }
133 
134 int
135 vop_einval(struct vop_generic_args *ap)
136 {
137 
138 	return (EINVAL);
139 }
140 
141 int
142 vop_null(struct vop_generic_args *ap)
143 {
144 
145 	return (0);
146 }
147 
148 int
149 vop_defaultop(struct vop_generic_args *ap)
150 {
151 
152 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
153 }
154 
155 int
156 vop_panic(struct vop_generic_args *ap)
157 {
158 
159 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
160 }
161 
162 static int
163 vop_nolookup(ap)
164 	struct vop_lookup_args /* {
165 		struct vnode *a_dvp;
166 		struct vnode **a_vpp;
167 		struct componentname *a_cnp;
168 	} */ *ap;
169 {
170 
171 	*ap->a_vpp = NULL;
172 	return (ENOTDIR);
173 }
174 
175 /*
176  *	vop_nostrategy:
177  *
178  *	Strategy routine for VFS devices that have none.
179  *
180  *	B_ERROR and B_INVAL must be cleared prior to calling any strategy
181  *	routine.  Typically this is done for a B_READ strategy call.  Typically
182  *	B_INVAL is assumed to already be clear prior to a write and should not
183  *	be cleared manually unless you just made the buffer invalid.  B_ERROR
184  *	should be cleared either way.
185  */
186 
187 static int
188 vop_nostrategy (struct vop_strategy_args *ap)
189 {
190 	printf("No strategy for buffer at %p\n", ap->a_bp);
191 	vprint("", ap->a_vp);
192 	vprint("", ap->a_bp->b_vp);
193 	ap->a_bp->b_flags |= B_ERROR;
194 	ap->a_bp->b_error = EOPNOTSUPP;
195 	biodone(ap->a_bp);
196 	return (EOPNOTSUPP);
197 }
198 
199 int
200 vop_stdpathconf(ap)
201 	struct vop_pathconf_args /* {
202 	struct vnode *a_vp;
203 	int a_name;
204 	int *a_retval;
205 	} */ *ap;
206 {
207 
208 	switch (ap->a_name) {
209 		case _PC_LINK_MAX:
210 			*ap->a_retval = LINK_MAX;
211 			return (0);
212 		case _PC_MAX_CANON:
213 			*ap->a_retval = MAX_CANON;
214 			return (0);
215 		case _PC_MAX_INPUT:
216 			*ap->a_retval = MAX_INPUT;
217 			return (0);
218 		case _PC_PIPE_BUF:
219 			*ap->a_retval = PIPE_BUF;
220 			return (0);
221 		case _PC_CHOWN_RESTRICTED:
222 			*ap->a_retval = 1;
223 			return (0);
224 		case _PC_VDISABLE:
225 			*ap->a_retval = _POSIX_VDISABLE;
226 			return (0);
227 		default:
228 			return (EINVAL);
229 	}
230 	/* NOTREACHED */
231 }
232 
233 /*
234  * Standard lock, unlock and islocked functions.
235  *
236  * These depend on the lock structure being the first element in the
237  * inode, ie: vp->v_data points to the the lock!
238  */
239 int
240 vop_stdlock(ap)
241 	struct vop_lock_args /* {
242 		struct vnode *a_vp;
243 		int a_flags;
244 		struct proc *a_p;
245 	} */ *ap;
246 {
247 	struct lock *l;
248 
249 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
250 		if (ap->a_flags & LK_INTERLOCK)
251 			simple_unlock(&ap->a_vp->v_interlock);
252 		return 0;
253 	}
254 
255 #ifndef	DEBUG_LOCKS
256 	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
257 #else
258 	return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
259 	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
260 #endif
261 }
262 
263 int
264 vop_stdunlock(ap)
265 	struct vop_unlock_args /* {
266 		struct vnode *a_vp;
267 		int a_flags;
268 		struct proc *a_p;
269 	} */ *ap;
270 {
271 	struct lock *l;
272 
273 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
274 		if (ap->a_flags & LK_INTERLOCK)
275 			simple_unlock(&ap->a_vp->v_interlock);
276 		return 0;
277 	}
278 
279 	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
280 	    ap->a_p));
281 }
282 
283 int
284 vop_stdislocked(ap)
285 	struct vop_islocked_args /* {
286 		struct vnode *a_vp;
287 		struct proc *a_p;
288 	} */ *ap;
289 {
290 	struct lock *l;
291 
292 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
293 		return 0;
294 
295 	return (lockstatus(l, ap->a_p));
296 }
297 
298 /*
299  * Return true for select/poll.
300  */
301 int
302 vop_nopoll(ap)
303 	struct vop_poll_args /* {
304 		struct vnode *a_vp;
305 		int  a_events;
306 		struct ucred *a_cred;
307 		struct proc *a_p;
308 	} */ *ap;
309 {
310 	/*
311 	 * Return true for read/write.  If the user asked for something
312 	 * special, return POLLNVAL, so that clients have a way of
313 	 * determining reliably whether or not the extended
314 	 * functionality is present without hard-coding knowledge
315 	 * of specific filesystem implementations.
316 	 */
317 	if (ap->a_events & ~POLLSTANDARD)
318 		return (POLLNVAL);
319 
320 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
321 }
322 
323 /*
324  * Implement poll for local filesystems that support it.
325  */
326 int
327 vop_stdpoll(ap)
328 	struct vop_poll_args /* {
329 		struct vnode *a_vp;
330 		int  a_events;
331 		struct ucred *a_cred;
332 		struct proc *a_p;
333 	} */ *ap;
334 {
335 	if (ap->a_events & ~POLLSTANDARD)
336 		return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
337 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
338 }
339 
340 int
341 vop_stdbwrite(ap)
342 	struct vop_bwrite_args *ap;
343 {
344 	return (bwrite(ap->a_bp));
345 }
346 
347 /*
348  * Stubs to use when there is no locking to be done on the underlying object.
349  * A minimal shared lock is necessary to ensure that the underlying object
350  * is not revoked while an operation is in progress. So, an active shared
351  * count is maintained in an auxillary vnode lock structure.
352  */
353 int
354 vop_sharedlock(ap)
355 	struct vop_lock_args /* {
356 		struct vnode *a_vp;
357 		int a_flags;
358 		struct proc *a_p;
359 	} */ *ap;
360 {
361 	/*
362 	 * This code cannot be used until all the non-locking filesystems
363 	 * (notably NFS) are converted to properly lock and release nodes.
364 	 * Also, certain vnode operations change the locking state within
365 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
366 	 * and symlink). Ideally these operations should not change the
367 	 * lock state, but should be changed to let the caller of the
368 	 * function unlock them. Otherwise all intermediate vnode layers
369 	 * (such as union, umapfs, etc) must catch these functions to do
370 	 * the necessary locking at their layer. Note that the inactive
371 	 * and lookup operations also change their lock state, but this
372 	 * cannot be avoided, so these two operations will always need
373 	 * to be handled in intermediate layers.
374 	 */
375 	struct vnode *vp = ap->a_vp;
376 	struct lock *l = (struct lock *)vp->v_data;
377 	int vnflags, flags = ap->a_flags;
378 
379 	if (l == NULL) {
380 		if (ap->a_flags & LK_INTERLOCK)
381 			simple_unlock(&ap->a_vp->v_interlock);
382 		return 0;
383 	}
384 	switch (flags & LK_TYPE_MASK) {
385 	case LK_DRAIN:
386 		vnflags = LK_DRAIN;
387 		break;
388 	case LK_EXCLUSIVE:
389 #ifdef DEBUG_VFS_LOCKS
390 		/*
391 		 * Normally, we use shared locks here, but that confuses
392 		 * the locking assertions.
393 		 */
394 		vnflags = LK_EXCLUSIVE;
395 		break;
396 #endif
397 	case LK_SHARED:
398 		vnflags = LK_SHARED;
399 		break;
400 	case LK_UPGRADE:
401 	case LK_EXCLUPGRADE:
402 	case LK_DOWNGRADE:
403 		return (0);
404 	case LK_RELEASE:
405 	default:
406 		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
407 	}
408 	if (flags & LK_INTERLOCK)
409 		vnflags |= LK_INTERLOCK;
410 #ifndef	DEBUG_LOCKS
411 	return (lockmgr(l, vnflags, &vp->v_interlock, ap->a_p));
412 #else
413 	return (debuglockmgr(l, vnflags, &vp->v_interlock, ap->a_p,
414 	    "vop_sharedlock", vp->filename, vp->line));
415 #endif
416 }
417 
418 /*
419  * Stubs to use when there is no locking to be done on the underlying object.
420  * A minimal shared lock is necessary to ensure that the underlying object
421  * is not revoked while an operation is in progress. So, an active shared
422  * count is maintained in an auxillary vnode lock structure.
423  */
424 int
425 vop_nolock(ap)
426 	struct vop_lock_args /* {
427 		struct vnode *a_vp;
428 		int a_flags;
429 		struct proc *a_p;
430 	} */ *ap;
431 {
432 #ifdef notyet
433 	/*
434 	 * This code cannot be used until all the non-locking filesystems
435 	 * (notably NFS) are converted to properly lock and release nodes.
436 	 * Also, certain vnode operations change the locking state within
437 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
438 	 * and symlink). Ideally these operations should not change the
439 	 * lock state, but should be changed to let the caller of the
440 	 * function unlock them. Otherwise all intermediate vnode layers
441 	 * (such as union, umapfs, etc) must catch these functions to do
442 	 * the necessary locking at their layer. Note that the inactive
443 	 * and lookup operations also change their lock state, but this
444 	 * cannot be avoided, so these two operations will always need
445 	 * to be handled in intermediate layers.
446 	 */
447 	struct vnode *vp = ap->a_vp;
448 	int vnflags, flags = ap->a_flags;
449 
450 	switch (flags & LK_TYPE_MASK) {
451 	case LK_DRAIN:
452 		vnflags = LK_DRAIN;
453 		break;
454 	case LK_EXCLUSIVE:
455 	case LK_SHARED:
456 		vnflags = LK_SHARED;
457 		break;
458 	case LK_UPGRADE:
459 	case LK_EXCLUPGRADE:
460 	case LK_DOWNGRADE:
461 		return (0);
462 	case LK_RELEASE:
463 	default:
464 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
465 	}
466 	if (flags & LK_INTERLOCK)
467 		vnflags |= LK_INTERLOCK;
468 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
469 #else /* for now */
470 	/*
471 	 * Since we are not using the lock manager, we must clear
472 	 * the interlock here.
473 	 */
474 	if (ap->a_flags & LK_INTERLOCK)
475 		simple_unlock(&ap->a_vp->v_interlock);
476 	return (0);
477 #endif
478 }
479 
480 /*
481  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
482  */
483 int
484 vop_nounlock(ap)
485 	struct vop_unlock_args /* {
486 		struct vnode *a_vp;
487 		int a_flags;
488 		struct proc *a_p;
489 	} */ *ap;
490 {
491 	if (ap->a_flags & LK_INTERLOCK)
492 		simple_unlock(&ap->a_vp->v_interlock);
493 	return (0);
494 }
495 
496 /*
497  * Return whether or not the node is in use.
498  */
499 int
500 vop_noislocked(ap)
501 	struct vop_islocked_args /* {
502 		struct vnode *a_vp;
503 		struct proc *a_p;
504 	} */ *ap;
505 {
506 	return (0);
507 }
508 
509 int
510 vop_stdcreatevobject(ap)
511 	struct vop_createvobject_args /* {
512 		struct vnode *vp;
513 		struct ucred *cred;
514 		struct proc *p;
515 	} */ *ap;
516 {
517 	struct vnode *vp = ap->a_vp;
518 	struct ucred *cred = ap->a_cred;
519 	struct proc *p = ap->a_p;
520 	struct vattr vat;
521 	vm_object_t object;
522 	int error = 0;
523 
524 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
525 		return (0);
526 
527 retry:
528 	if ((object = vp->v_object) == NULL) {
529 		if (vp->v_type == VREG || vp->v_type == VDIR) {
530 			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
531 				goto retn;
532 			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
533 		} else if (devsw(vp->v_rdev) != NULL) {
534 			/*
535 			 * This simply allocates the biggest object possible
536 			 * for a disk vnode.  This should be fixed, but doesn't
537 			 * cause any problems (yet).
538 			 */
539 			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
540 		} else {
541 			goto retn;
542 		}
543 		/*
544 		 * Dereference the reference we just created.  This assumes
545 		 * that the object is associated with the vp.
546 		 */
547 		object->ref_count--;
548 		vp->v_usecount--;
549 	} else {
550 		if (object->flags & OBJ_DEAD) {
551 			VOP_UNLOCK(vp, 0, p);
552 			tsleep(object, PVM, "vodead", 0);
553 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
554 			goto retry;
555 		}
556 	}
557 
558 	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
559 	vp->v_flag |= VOBJBUF;
560 
561 retn:
562 	return (error);
563 }
564 
565 int
566 vop_stddestroyvobject(ap)
567 	struct vop_destroyvobject_args /* {
568 		struct vnode *vp;
569 	} */ *ap;
570 {
571 	struct vnode *vp = ap->a_vp;
572 	vm_object_t obj = vp->v_object;
573 
574 	if (vp->v_object == NULL)
575 		return (0);
576 
577 	if (obj->ref_count == 0) {
578 		/*
579 		 * vclean() may be called twice. The first time
580 		 * removes the primary reference to the object,
581 		 * the second time goes one further and is a
582 		 * special-case to terminate the object.
583 		 *
584 		 * don't double-terminate the object.
585 		 */
586 		if ((obj->flags & OBJ_DEAD) == 0)
587 			vm_object_terminate(obj);
588 	} else {
589 		/*
590 		 * Woe to the process that tries to page now :-).
591 		 */
592 		vm_pager_deallocate(obj);
593 	}
594 	return (0);
595 }
596 
597 /*
598  * Return the underlying VM object.  This routine may be called with or
599  * without the vnode interlock held.  If called without, the returned
600  * object is not guarenteed to be valid.  The syncer typically gets the
601  * object without holding the interlock in order to quickly test whether
602  * it might be dirty before going heavy-weight.  vm_object's use zalloc
603  * and thus stable-storage, so this is safe.
604  */
605 int
606 vop_stdgetvobject(ap)
607 	struct vop_getvobject_args /* {
608 		struct vnode *vp;
609 		struct vm_object **objpp;
610 	} */ *ap;
611 {
612 	struct vnode *vp = ap->a_vp;
613 	struct vm_object **objpp = ap->a_objpp;
614 
615 	if (objpp)
616 		*objpp = vp->v_object;
617 	return (vp->v_object ? 0 : EINVAL);
618 }
619 
620 /*
621  * vfs default ops
622  * used to fill the vfs fucntion table to get reasonable default return values.
623  */
624 int
625 vfs_stdmount (mp, path, data, ndp, p)
626 	struct mount *mp;
627 	char *path;
628 	caddr_t data;
629 	struct nameidata *ndp;
630 	struct proc *p;
631 {
632 	return (0);
633 }
634 
635 int
636 vfs_stdunmount (mp, mntflags, p)
637 	struct mount *mp;
638 	int mntflags;
639 	struct proc *p;
640 {
641 	return (0);
642 }
643 
644 int
645 vfs_stdroot (mp, vpp)
646 	struct mount *mp;
647 	struct vnode **vpp;
648 {
649 	return (EOPNOTSUPP);
650 }
651 
652 int
653 vfs_stdstatfs (mp, sbp, p)
654 	struct mount *mp;
655 	struct statfs *sbp;
656 	struct proc *p;
657 {
658 	return (EOPNOTSUPP);
659 }
660 
661 int
662 vfs_stdvptofh (vp, fhp)
663 	struct vnode *vp;
664 	struct fid *fhp;
665 {
666 	return (EOPNOTSUPP);
667 }
668 
669 int
670 vfs_stdstart (mp, flags, p)
671 	struct mount *mp;
672 	int flags;
673 	struct proc *p;
674 {
675 	return (0);
676 }
677 
678 int
679 vfs_stdquotactl (mp, cmds, uid, arg, p)
680 	struct mount *mp;
681 	int cmds;
682 	uid_t uid;
683 	caddr_t arg;
684 	struct proc *p;
685 {
686 	return (EOPNOTSUPP);
687 }
688 
689 int
690 vfs_stdsync (mp, waitfor, cred, p)
691 	struct mount *mp;
692 	int waitfor;
693 	struct ucred *cred;
694 	struct proc *p;
695 {
696 	return (0);
697 }
698 
699 int
700 vfs_stdvget (mp, ino, vpp)
701 	struct mount *mp;
702 	ino_t ino;
703 	struct vnode **vpp;
704 {
705 	return (EOPNOTSUPP);
706 }
707 
708 int
709 vfs_stdfhtovp (mp, fhp, vpp)
710 	struct mount *mp;
711 	struct fid *fhp;
712 	struct vnode **vpp;
713 {
714 	return (EOPNOTSUPP);
715 }
716 
717 int
718 vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
719 	struct mount *mp;
720 	struct sockaddr *nam;
721 	int *extflagsp;
722 	struct ucred **credanonp;
723 {
724 	return (EOPNOTSUPP);
725 }
726 
727 int
728 vfs_stdinit (vfsp)
729 	struct vfsconf *vfsp;
730 {
731 	return (0);
732 }
733 
734 int
735 vfs_stduninit (vfsp)
736 	struct vfsconf *vfsp;
737 {
738 	return(0);
739 }
740 
741 int
742 vfs_stdextattrctl(mp, cmd, attrname, arg, p)
743 	struct mount *mp;
744 	int cmd;
745 	const char *attrname;
746 	caddr_t arg;
747 	struct proc *p;
748 {
749 	return(EOPNOTSUPP);
750 }
751 
752 /* end of vfs default ops */
753