xref: /dragonfly/sys/kern/vfs_default.c (revision aa8d5dcb)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *
39  * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $
40  * $DragonFly: src/sys/kern/vfs_default.c,v 1.9 2004/03/01 06:33:17 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/buf.h>
46 #include <sys/conf.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/unistd.h>
52 #include <sys/vnode.h>
53 #include <sys/poll.h>
54 
55 #include <machine/limits.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vnode_pager.h>
62 
63 static int	vop_nolookup (struct vop_lookup_args *);
64 static int	vop_nostrategy (struct vop_strategy_args *);
65 
66 /*
67  * This vnode table stores what we want to do if the filesystem doesn't
68  * implement a particular VOP.
69  *
70  * If there is no specific entry here, we will return EOPNOTSUPP.
71  *
72  */
73 
74 vop_t **default_vnodeop_p;
75 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
76 	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
77 	{ &vop_advlock_desc,		(vop_t *) vop_einval },
78 	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
79 	{ &vop_close_desc,		(vop_t *) vop_null },
80 	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
81 	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
82 	{ &vop_fsync_desc,		(vop_t *) vop_null },
83 	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
84 	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
85 	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
86 	{ &vop_lease_desc,		(vop_t *) vop_null },
87 	{ &vop_lock_desc,		(vop_t *) vop_nolock },
88 	{ &vop_mmap_desc,		(vop_t *) vop_einval },
89 	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
90 	{ &vop_open_desc,		(vop_t *) vop_null },
91 	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
92 	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
93 	{ &vop_readlink_desc,		(vop_t *) vop_einval },
94 	{ &vop_reallocblks_desc,	(vop_t *) vop_eopnotsupp },
95 	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
96 	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
97 	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
98 	{ &vop_getacl_desc,		(vop_t *) vop_eopnotsupp },
99 	{ &vop_setacl_desc,		(vop_t *) vop_eopnotsupp },
100 	{ &vop_aclcheck_desc,		(vop_t *) vop_eopnotsupp },
101 	{ &vop_getextattr_desc,		(vop_t *) vop_eopnotsupp },
102 	{ &vop_setextattr_desc,		(vop_t *) vop_eopnotsupp },
103 	{ NULL, NULL }
104 };
105 
106 static struct vnodeopv_desc default_vnodeop_opv_desc =
107         { &default_vnodeop_p, default_vnodeop_entries };
108 
109 VNODEOP_SET(default_vnodeop_opv_desc);
110 
111 int
112 vop_eopnotsupp(struct vop_generic_args *ap)
113 {
114 	/*
115 	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
116 	*/
117 
118 	return (EOPNOTSUPP);
119 }
120 
121 int
122 vop_ebadf(struct vop_generic_args *ap)
123 {
124 
125 	return (EBADF);
126 }
127 
128 int
129 vop_enotty(struct vop_generic_args *ap)
130 {
131 
132 	return (ENOTTY);
133 }
134 
135 int
136 vop_einval(struct vop_generic_args *ap)
137 {
138 
139 	return (EINVAL);
140 }
141 
142 int
143 vop_null(struct vop_generic_args *ap)
144 {
145 
146 	return (0);
147 }
148 
149 int
150 vop_defaultop(struct vop_generic_args *ap)
151 {
152 
153 	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
154 }
155 
156 int
157 vop_panic(struct vop_generic_args *ap)
158 {
159 
160 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
161 }
162 
163 static int
164 vop_nolookup(ap)
165 	struct vop_lookup_args /* {
166 		struct vnode *a_dvp;
167 		struct vnode **a_vpp;
168 		struct componentname *a_cnp;
169 	} */ *ap;
170 {
171 
172 	*ap->a_vpp = NULL;
173 	return (ENOTDIR);
174 }
175 
176 /*
177  *	vop_nostrategy:
178  *
179  *	Strategy routine for VFS devices that have none.
180  *
181  *	B_ERROR and B_INVAL must be cleared prior to calling any strategy
182  *	routine.  Typically this is done for a B_READ strategy call.  Typically
183  *	B_INVAL is assumed to already be clear prior to a write and should not
184  *	be cleared manually unless you just made the buffer invalid.  B_ERROR
185  *	should be cleared either way.
186  */
187 
188 static int
189 vop_nostrategy (struct vop_strategy_args *ap)
190 {
191 	printf("No strategy for buffer at %p\n", ap->a_bp);
192 	vprint("", ap->a_vp);
193 	vprint("", ap->a_bp->b_vp);
194 	ap->a_bp->b_flags |= B_ERROR;
195 	ap->a_bp->b_error = EOPNOTSUPP;
196 	biodone(ap->a_bp);
197 	return (EOPNOTSUPP);
198 }
199 
200 int
201 vop_stdpathconf(ap)
202 	struct vop_pathconf_args /* {
203 	struct vnode *a_vp;
204 	int a_name;
205 	int *a_retval;
206 	} */ *ap;
207 {
208 
209 	switch (ap->a_name) {
210 		case _PC_LINK_MAX:
211 			*ap->a_retval = LINK_MAX;
212 			return (0);
213 		case _PC_MAX_CANON:
214 			*ap->a_retval = MAX_CANON;
215 			return (0);
216 		case _PC_MAX_INPUT:
217 			*ap->a_retval = MAX_INPUT;
218 			return (0);
219 		case _PC_PIPE_BUF:
220 			*ap->a_retval = PIPE_BUF;
221 			return (0);
222 		case _PC_CHOWN_RESTRICTED:
223 			*ap->a_retval = 1;
224 			return (0);
225 		case _PC_VDISABLE:
226 			*ap->a_retval = _POSIX_VDISABLE;
227 			return (0);
228 		default:
229 			return (EINVAL);
230 	}
231 	/* NOTREACHED */
232 }
233 
234 /*
235  * Standard lock, unlock and islocked functions.
236  *
237  * These depend on the lock structure being the first element in the
238  * inode, ie: vp->v_data points to the the lock!
239  */
240 int
241 vop_stdlock(ap)
242 	struct vop_lock_args /* {
243 		struct vnode *a_vp;
244 		lwkt_tokref_t a_vlock;
245 		int a_flags;
246 		struct proc *a_p;
247 	} */ *ap;
248 {
249 	struct lock *l;
250 
251 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
252 		if (ap->a_flags & LK_INTERLOCK)
253 			lwkt_reltoken(ap->a_vlock);
254 		return 0;
255 	}
256 
257 #ifndef	DEBUG_LOCKS
258 	return (lockmgr(l, ap->a_flags, ap->a_vlock, ap->a_td));
259 #else
260 	return (debuglockmgr(l, ap->a_flags, ap->a_vlock, ap->a_td,
261 	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
262 #endif
263 }
264 
265 int
266 vop_stdunlock(ap)
267 	struct vop_unlock_args /* {
268 		struct vnode *a_vp;
269 		lwkt_tokref_t a_vlock;
270 		int a_flags;
271 		struct thread *a_td;
272 	} */ *ap;
273 {
274 	struct lock *l;
275 
276 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
277 		if (ap->a_flags & LK_INTERLOCK)
278 			lwkt_reltoken(ap->a_vlock);
279 		return 0;
280 	}
281 
282 	return (lockmgr(l, ap->a_flags | LK_RELEASE, ap->a_vlock, ap->a_td));
283 }
284 
285 int
286 vop_stdislocked(ap)
287 	struct vop_islocked_args /* {
288 		struct vnode *a_vp;
289 		struct thread *a_td;
290 	} */ *ap;
291 {
292 	struct lock *l;
293 
294 	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
295 		return 0;
296 
297 	return (lockstatus(l, ap->a_td));
298 }
299 
300 /*
301  * Return true for select/poll.
302  */
303 int
304 vop_nopoll(ap)
305 	struct vop_poll_args /* {
306 		struct vnode *a_vp;
307 		int  a_events;
308 		struct ucred *a_cred;
309 		struct proc *a_p;
310 	} */ *ap;
311 {
312 	/*
313 	 * Return true for read/write.  If the user asked for something
314 	 * special, return POLLNVAL, so that clients have a way of
315 	 * determining reliably whether or not the extended
316 	 * functionality is present without hard-coding knowledge
317 	 * of specific filesystem implementations.
318 	 */
319 	if (ap->a_events & ~POLLSTANDARD)
320 		return (POLLNVAL);
321 
322 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
323 }
324 
325 /*
326  * Implement poll for local filesystems that support it.
327  */
328 int
329 vop_stdpoll(ap)
330 	struct vop_poll_args /* {
331 		struct vnode *a_vp;
332 		int  a_events;
333 		struct ucred *a_cred;
334 		struct thread *a_td;
335 	} */ *ap;
336 {
337 	if (ap->a_events & ~POLLSTANDARD)
338 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
339 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
340 }
341 
342 int
343 vop_stdbwrite(ap)
344 	struct vop_bwrite_args *ap;
345 {
346 	return (bwrite(ap->a_bp));
347 }
348 
349 /*
350  * Stubs to use when there is no locking to be done on the underlying object.
351  * A minimal shared lock is necessary to ensure that the underlying object
352  * is not revoked while an operation is in progress. So, an active shared
353  * count is maintained in an auxillary vnode lock structure.
354  */
355 int
356 vop_sharedlock(ap)
357 	struct vop_lock_args /* {
358 		struct vnode *a_vp;
359 		lwkt_tokref_t a_vlock;
360 		int a_flags;
361 		struct proc *a_p;
362 	} */ *ap;
363 {
364 	/*
365 	 * This code cannot be used until all the non-locking filesystems
366 	 * (notably NFS) are converted to properly lock and release nodes.
367 	 * Also, certain vnode operations change the locking state within
368 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
369 	 * and symlink). Ideally these operations should not change the
370 	 * lock state, but should be changed to let the caller of the
371 	 * function unlock them. Otherwise all intermediate vnode layers
372 	 * (such as union, umapfs, etc) must catch these functions to do
373 	 * the necessary locking at their layer. Note that the inactive
374 	 * and lookup operations also change their lock state, but this
375 	 * cannot be avoided, so these two operations will always need
376 	 * to be handled in intermediate layers.
377 	 */
378 	struct vnode *vp = ap->a_vp;
379 	struct lock *l = (struct lock *)vp->v_data;
380 	int vnflags, flags = ap->a_flags;
381 
382 	if (l == NULL) {
383 		if (ap->a_flags & LK_INTERLOCK)
384 			lwkt_reltoken(ap->a_vlock);
385 		return 0;
386 	}
387 	switch (flags & LK_TYPE_MASK) {
388 	case LK_DRAIN:
389 		vnflags = LK_DRAIN;
390 		break;
391 	case LK_EXCLUSIVE:
392 #ifdef DEBUG_VFS_LOCKS
393 		/*
394 		 * Normally, we use shared locks here, but that confuses
395 		 * the locking assertions.
396 		 */
397 		vnflags = LK_EXCLUSIVE;
398 		break;
399 #endif
400 	case LK_SHARED:
401 		vnflags = LK_SHARED;
402 		break;
403 	case LK_UPGRADE:
404 	case LK_EXCLUPGRADE:
405 	case LK_DOWNGRADE:
406 		return (0);
407 	case LK_RELEASE:
408 	default:
409 		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
410 	}
411 	if (flags & LK_INTERLOCK)
412 		vnflags |= LK_INTERLOCK;
413 #ifndef	DEBUG_LOCKS
414 	return (lockmgr(l, vnflags, ap->a_vlock, ap->a_td));
415 #else
416 	return (debuglockmgr(l, vnflags, ap->a_vlock, ap->a_td,
417 	    "vop_sharedlock", vp->filename, vp->line));
418 #endif
419 }
420 
421 /*
422  * Stubs to use when there is no locking to be done on the underlying object.
423  * A minimal shared lock is necessary to ensure that the underlying object
424  * is not revoked while an operation is in progress. So, an active shared
425  * count is maintained in an auxillary vnode lock structure.
426  */
427 int
428 vop_nolock(ap)
429 	struct vop_lock_args /* {
430 		struct vnode *a_vp;
431 		lwkt_tokref_t a_vlock;
432 		int a_flags;
433 		struct proc *a_p;
434 	} */ *ap;
435 {
436 #ifdef notyet
437 	/*
438 	 * This code cannot be used until all the non-locking filesystems
439 	 * (notably NFS) are converted to properly lock and release nodes.
440 	 * Also, certain vnode operations change the locking state within
441 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
442 	 * and symlink). Ideally these operations should not change the
443 	 * lock state, but should be changed to let the caller of the
444 	 * function unlock them. Otherwise all intermediate vnode layers
445 	 * (such as union, umapfs, etc) must catch these functions to do
446 	 * the necessary locking at their layer. Note that the inactive
447 	 * and lookup operations also change their lock state, but this
448 	 * cannot be avoided, so these two operations will always need
449 	 * to be handled in intermediate layers.
450 	 */
451 	struct vnode *vp = ap->a_vp;
452 	int vnflags, flags = ap->a_flags;
453 
454 	switch (flags & LK_TYPE_MASK) {
455 	case LK_DRAIN:
456 		vnflags = LK_DRAIN;
457 		break;
458 	case LK_EXCLUSIVE:
459 	case LK_SHARED:
460 		vnflags = LK_SHARED;
461 		break;
462 	case LK_UPGRADE:
463 	case LK_EXCLUPGRADE:
464 	case LK_DOWNGRADE:
465 		return (0);
466 	case LK_RELEASE:
467 	default:
468 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
469 	}
470 	if (flags & LK_INTERLOCK)
471 		vnflags |= LK_INTERLOCK;
472 	return(lockmgr(vp->v_vnlock, vnflags, ap->a_vlock, ap->a_p));
473 #else /* for now */
474 	/*
475 	 * Since we are not using the lock manager, we must clear
476 	 * the interlock here.
477 	 */
478 	if (ap->a_flags & LK_INTERLOCK)
479 		lwkt_reltoken(ap->a_vlock);
480 	return (0);
481 #endif
482 }
483 
484 /*
485  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
486  */
487 int
488 vop_nounlock(ap)
489 	struct vop_unlock_args /* {
490 		struct vnode *a_vp;
491 		lwkt_tokref_t a_vlock;
492 		int a_flags;
493 		struct proc *a_p;
494 	} */ *ap;
495 {
496 	if (ap->a_flags & LK_INTERLOCK)
497 		lwkt_reltoken(ap->a_vlock);
498 	return (0);
499 }
500 
501 /*
502  * Return whether or not the node is in use.
503  */
504 int
505 vop_noislocked(ap)
506 	struct vop_islocked_args /* {
507 		struct vnode *a_vp;
508 		struct proc *a_p;
509 	} */ *ap;
510 {
511 	return (0);
512 }
513 
514 int
515 vop_stdcreatevobject(ap)
516 	struct vop_createvobject_args /* {
517 		struct vnode *a_vp;
518 		struct proc *a_td;
519 	} */ *ap;
520 {
521 	struct vnode *vp = ap->a_vp;
522 	struct thread *td = ap->a_td;
523 	struct vattr vat;
524 	vm_object_t object;
525 	int error = 0;
526 
527 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
528 		return (0);
529 
530 retry:
531 	if ((object = vp->v_object) == NULL) {
532 		if (vp->v_type == VREG || vp->v_type == VDIR) {
533 			if ((error = VOP_GETATTR(vp, &vat, td)) != 0)
534 				goto retn;
535 			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
536 		} else if (dev_dport(vp->v_rdev) != NULL) {
537 			/*
538 			 * This simply allocates the biggest object possible
539 			 * for a disk vnode.  This should be fixed, but doesn't
540 			 * cause any problems (yet).
541 			 */
542 			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
543 		} else {
544 			goto retn;
545 		}
546 		/*
547 		 * Dereference the reference we just created.  This assumes
548 		 * that the object is associated with the vp.
549 		 */
550 		object->ref_count--;
551 		vp->v_usecount--;
552 	} else {
553 		if (object->flags & OBJ_DEAD) {
554 			VOP_UNLOCK(vp, NULL, 0, td);
555 			tsleep(object, 0, "vodead", 0);
556 			vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
557 			goto retry;
558 		}
559 	}
560 
561 	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
562 	vp->v_flag |= VOBJBUF;
563 
564 retn:
565 	return (error);
566 }
567 
568 int
569 vop_stddestroyvobject(ap)
570 	struct vop_destroyvobject_args /* {
571 		struct vnode *vp;
572 	} */ *ap;
573 {
574 	struct vnode *vp = ap->a_vp;
575 	vm_object_t obj = vp->v_object;
576 
577 	if (vp->v_object == NULL)
578 		return (0);
579 
580 	if (obj->ref_count == 0) {
581 		/*
582 		 * vclean() may be called twice. The first time
583 		 * removes the primary reference to the object,
584 		 * the second time goes one further and is a
585 		 * special-case to terminate the object.
586 		 *
587 		 * don't double-terminate the object.
588 		 */
589 		if ((obj->flags & OBJ_DEAD) == 0)
590 			vm_object_terminate(obj);
591 	} else {
592 		/*
593 		 * Woe to the process that tries to page now :-).
594 		 */
595 		vm_pager_deallocate(obj);
596 	}
597 	return (0);
598 }
599 
600 /*
601  * Return the underlying VM object.  This routine may be called with or
602  * without the vnode interlock held.  If called without, the returned
603  * object is not guarenteed to be valid.  The syncer typically gets the
604  * object without holding the interlock in order to quickly test whether
605  * it might be dirty before going heavy-weight.  vm_object's use zalloc
606  * and thus stable-storage, so this is safe.
607  */
608 int
609 vop_stdgetvobject(ap)
610 	struct vop_getvobject_args /* {
611 		struct vnode *vp;
612 		struct vm_object **objpp;
613 	} */ *ap;
614 {
615 	struct vnode *vp = ap->a_vp;
616 	struct vm_object **objpp = ap->a_objpp;
617 
618 	if (objpp)
619 		*objpp = vp->v_object;
620 	return (vp->v_object ? 0 : EINVAL);
621 }
622 
623 /*
624  * vfs default ops
625  * used to fill the vfs fucntion table to get reasonable default return values.
626  */
627 int
628 vfs_stdmount(struct mount *mp, char *path, caddr_t data,
629 	struct nameidata *ndp, struct thread *td)
630 {
631 	return (0);
632 }
633 
634 int
635 vfs_stdunmount(struct mount *mp, int mntflags, struct thread *td)
636 {
637 	return (0);
638 }
639 
640 int
641 vfs_stdroot(struct mount *mp, struct vnode **vpp)
642 {
643 	return (EOPNOTSUPP);
644 }
645 
646 int
647 vfs_stdstatfs(struct mount *mp, struct statfs *sbp, struct thread *td)
648 {
649 	return (EOPNOTSUPP);
650 }
651 
652 int
653 vfs_stdvptofh(struct vnode *vp, struct fid *fhp)
654 {
655 	return (EOPNOTSUPP);
656 }
657 
658 int
659 vfs_stdstart(struct mount *mp, int flags, struct thread *td)
660 {
661 	return (0);
662 }
663 
664 int
665 vfs_stdquotactl(struct mount *mp, int cmds, uid_t uid,
666 	caddr_t arg, struct thread *td)
667 {
668 	return (EOPNOTSUPP);
669 }
670 
671 int
672 vfs_stdsync(struct mount *mp, int waitfor, struct thread *td)
673 {
674 	return (0);
675 }
676 
677 int
678 vfs_stdvget(struct mount *mp, ino_t ino, struct vnode **vpp)
679 {
680 	return (EOPNOTSUPP);
681 }
682 
683 int
684 vfs_stdfhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
685 {
686 	return (EOPNOTSUPP);
687 }
688 
689 int
690 vfs_stdcheckexp(struct mount *mp, struct sockaddr *nam, int *extflagsp,
691 	struct ucred **credanonp)
692 {
693 	return (EOPNOTSUPP);
694 }
695 
696 int
697 vfs_stdinit(struct vfsconf *vfsp)
698 {
699 	return (0);
700 }
701 
702 int
703 vfs_stduninit(struct vfsconf *vfsp)
704 {
705 	return(0);
706 }
707 
708 int
709 vfs_stdextattrctl(struct mount *mp, int cmd, const char *attrname,
710 	caddr_t arg, struct thread *td)
711 {
712 	return(EOPNOTSUPP);
713 }
714 
715 /* end of vfs default ops */
716