1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * The statvfs->statfs conversion code was contributed to the DragonFly
9 * Project by Joerg Sonnenberger <joerg@bec.de>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
36 * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/fcntl.h>
44 #include <sys/file.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/namei.h>
52 #include <sys/mountctl.h>
53 #include <sys/vfs_quota.h>
54 #include <sys/uio.h>
55
56 #include <machine/limits.h>
57
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vnode_pager.h>
63
64 static int vop_nolookup (struct vop_old_lookup_args *);
65 static int vop_nostrategy (struct vop_strategy_args *);
66
67 /*
68 * This vnode table stores what we want to do if the filesystem doesn't
69 * implement a particular VOP.
70 *
71 * If there is no specific entry here, we will return EOPNOTSUPP.
72 */
73 struct vop_ops default_vnode_vops = {
74 .vop_default = vop_eopnotsupp,
75 .vop_advlock = (void *)vop_einval,
76 .vop_fsync = (void *)vop_null,
77 .vop_fdatasync = vop_stdfdatasync,
78 .vop_ioctl = (void *)vop_enotty,
79 .vop_mmap = (void *)vop_einval,
80 .vop_old_lookup = vop_nolookup,
81 .vop_open = vop_stdopen,
82 .vop_close = vop_stdclose,
83 .vop_getattr_lite = vop_stdgetattr_lite,
84 .vop_pathconf = vop_stdpathconf,
85 .vop_readlink = (void *)vop_einval,
86 .vop_reallocblks = (void *)vop_eopnotsupp,
87 .vop_strategy = vop_nostrategy,
88 .vop_getacl = (void *)vop_eopnotsupp,
89 .vop_setacl = (void *)vop_eopnotsupp,
90 .vop_aclcheck = (void *)vop_eopnotsupp,
91 .vop_getextattr = (void *)vop_eopnotsupp,
92 .vop_setextattr = (void *)vop_eopnotsupp,
93 .vop_markatime = vop_stdmarkatime,
94 .vop_allocate = vop_stdallocate,
95 .vop_nresolve = vop_compat_nresolve,
96 .vop_nlookupdotdot = vop_compat_nlookupdotdot,
97 .vop_ncreate = vop_compat_ncreate,
98 .vop_nmkdir = vop_compat_nmkdir,
99 .vop_nmknod = vop_compat_nmknod,
100 .vop_nlink = vop_compat_nlink,
101 .vop_nsymlink = vop_compat_nsymlink,
102 .vop_nwhiteout = vop_compat_nwhiteout,
103 .vop_nremove = vop_compat_nremove,
104 .vop_nrmdir = vop_compat_nrmdir,
105 .vop_nrename = vop_compat_nrename,
106 .vop_mountctl = vop_stdmountctl
107 };
108
109 VNODEOP_SET(default_vnode_vops);
110
111 int
vop_eopnotsupp(struct vop_generic_args * ap)112 vop_eopnotsupp(struct vop_generic_args *ap)
113 {
114 return (EOPNOTSUPP);
115 }
116
117 int
vop_ebadf(struct vop_generic_args * ap)118 vop_ebadf(struct vop_generic_args *ap)
119 {
120 return (EBADF);
121 }
122
123 int
vop_enotty(struct vop_generic_args * ap)124 vop_enotty(struct vop_generic_args *ap)
125 {
126 return (ENOTTY);
127 }
128
129 int
vop_einval(struct vop_generic_args * ap)130 vop_einval(struct vop_generic_args *ap)
131 {
132 return (EINVAL);
133 }
134
135 int
vop_stdmarkatime(struct vop_markatime_args * ap)136 vop_stdmarkatime(struct vop_markatime_args *ap)
137 {
138 return (EOPNOTSUPP);
139 }
140
141 int
vop_null(struct vop_generic_args * ap)142 vop_null(struct vop_generic_args *ap)
143 {
144 return (0);
145 }
146
147 int
vop_defaultop(struct vop_generic_args * ap)148 vop_defaultop(struct vop_generic_args *ap)
149 {
150 return (VOCALL(&default_vnode_vops, ap));
151 }
152
153 /*
154 * vop_compat_resolve { struct nchandle *a_nch, struct vnode *dvp }
155 * XXX STOPGAP FUNCTION
156 *
157 * XXX OLD API ROUTINE! WHEN ALL VFSs HAVE BEEN CLEANED UP THIS PROCEDURE
158 * WILL BE REMOVED. This procedure exists for all VFSs which have not
159 * yet implemented VOP_NRESOLVE(). It converts VOP_NRESOLVE() into a
160 * vop_old_lookup() and does appropriate translations.
161 *
162 * Resolve a ncp for VFSs which do not support the VOP. Eventually all
163 * VFSs will support this VOP and this routine can be removed, since
164 * VOP_NRESOLVE() is far less complex then the older LOOKUP/CACHEDLOOKUP
165 * API.
166 *
167 * A locked ncp is passed in to be resolved. The NCP is resolved by
168 * figuring out the vnode (if any) and calling cache_setvp() to attach the
169 * vnode to the entry. If the entry represents a non-existant node then
170 * cache_setvp() is called with a NULL vnode to resolve the entry into a
171 * negative cache entry. No vnode locks are retained and the
172 * ncp is left locked on return.
173 *
174 * The ncp will NEVER represent "", "." or "..", or contain any slashes.
175 *
176 * There is a potential directory and vnode interlock. The lock order
177 * requirement is: namecache, governing directory, resolved vnode.
178 */
179 int
vop_compat_nresolve(struct vop_nresolve_args * ap)180 vop_compat_nresolve(struct vop_nresolve_args *ap)
181 {
182 int error;
183 struct vnode *dvp;
184 struct vnode *vp;
185 struct nchandle *nch;
186 struct namecache *ncp;
187 struct componentname cnp;
188
189 nch = ap->a_nch; /* locked namecache node */
190 ncp = nch->ncp;
191 dvp = ap->a_dvp;
192
193 /*
194 * UFS currently stores all sorts of side effects, including a loop
195 * variable, in the directory inode. That needs to be fixed and the
196 * other VFS's audited before we can switch to LK_SHARED.
197 */
198 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
199 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
200 ncp, ncp->nc_name);
201 return(EAGAIN);
202 }
203
204 bzero(&cnp, sizeof(cnp));
205 cnp.cn_nameiop = NAMEI_LOOKUP;
206 cnp.cn_flags = 0;
207 cnp.cn_nameptr = ncp->nc_name;
208 cnp.cn_namelen = ncp->nc_nlen;
209 cnp.cn_cred = ap->a_cred;
210 cnp.cn_td = curthread; /* XXX */
211
212 /*
213 * vop_old_lookup() always returns vp locked. dvp may or may not be
214 * left locked depending on CNP_PDIRUNLOCK.
215 */
216 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
217 if (error == 0)
218 vn_unlock(vp);
219 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
220 vn_unlock(dvp);
221 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
222 /* was resolved by another process while we were unlocked */
223 if (error == 0)
224 vrele(vp);
225 } else if (error == 0) {
226 KKASSERT(vp != NULL);
227 cache_setvp(nch, vp);
228 vrele(vp);
229 } else if (error == ENOENT) {
230 KKASSERT(vp == NULL);
231 if (cnp.cn_flags & CNP_ISWHITEOUT)
232 ncp->nc_flag |= NCF_WHITEOUT;
233 cache_setvp(nch, NULL);
234 }
235 vrele(dvp);
236 return (error);
237 }
238
239 /*
240 * vop_compat_nlookupdotdot { struct vnode *a_dvp,
241 * struct vnode **a_vpp,
242 * struct ucred *a_cred }
243 *
244 * Lookup the vnode representing the parent directory of the specified
245 * directory vnode. a_dvp should not be locked. If no error occurs *a_vpp
246 * will contained the parent vnode, locked and refd, else *a_vpp will be NULL.
247 *
248 * This function is designed to aid NFS server-side operations and is
249 * used by cache_fromdvp() to create a consistent, connected namecache
250 * topology.
251 *
252 * As part of the NEW API work, VFSs will first split their CNP_ISDOTDOT
253 * code out from their *_lookup() and create *_nlookupdotdot(). Then as time
254 * permits VFSs will implement the remaining *_n*() calls and finally get
255 * rid of their *_lookup() call.
256 */
257 int
vop_compat_nlookupdotdot(struct vop_nlookupdotdot_args * ap)258 vop_compat_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
259 {
260 struct componentname cnp;
261 int error;
262
263 /*
264 * UFS currently stores all sorts of side effects, including a loop
265 * variable, in the directory inode. That needs to be fixed and the
266 * other VFS's audited before we can switch to LK_SHARED.
267 */
268 *ap->a_vpp = NULL;
269 if ((error = vget(ap->a_dvp, LK_EXCLUSIVE)) != 0)
270 return (error);
271 if (ap->a_dvp->v_type != VDIR) {
272 vput(ap->a_dvp);
273 return (ENOTDIR);
274 }
275
276 bzero(&cnp, sizeof(cnp));
277 cnp.cn_nameiop = NAMEI_LOOKUP;
278 cnp.cn_flags = CNP_ISDOTDOT;
279 cnp.cn_nameptr = "..";
280 cnp.cn_namelen = 2;
281 cnp.cn_cred = ap->a_cred;
282 cnp.cn_td = curthread; /* XXX */
283
284 /*
285 * vop_old_lookup() always returns vp locked. dvp may or may not be
286 * left locked depending on CNP_PDIRUNLOCK.
287 *
288 * (*vpp) will be returned locked if no error occured, which is the
289 * state we want.
290 */
291 error = vop_old_lookup(ap->a_head.a_ops, ap->a_dvp, ap->a_vpp, &cnp);
292 if (cnp.cn_flags & CNP_PDIRUNLOCK)
293 vrele(ap->a_dvp);
294 else
295 vput(ap->a_dvp);
296 return (error);
297 }
298
299 /*
300 * vop_compat_ncreate { struct nchandle *a_nch, XXX STOPGAP FUNCTION
301 * struct vnode *a_dvp,
302 * struct vnode **a_vpp,
303 * struct ucred *a_cred,
304 * struct vattr *a_vap }
305 *
306 * Create a file as specified by a_vap. Compatibility requires us to issue
307 * the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_CREATE in order
308 * to setup the directory inode's i_offset and i_count (e.g. in UFS).
309 */
310 int
vop_compat_ncreate(struct vop_ncreate_args * ap)311 vop_compat_ncreate(struct vop_ncreate_args *ap)
312 {
313 struct thread *td = curthread;
314 struct componentname cnp;
315 struct nchandle *nch;
316 struct namecache *ncp;
317 struct vnode *dvp;
318 int error;
319
320 /*
321 * Sanity checks, get a locked directory vnode.
322 */
323 nch = ap->a_nch; /* locked namecache node */
324 dvp = ap->a_dvp;
325 ncp = nch->ncp;
326
327 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
328 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
329 ncp, ncp->nc_name);
330 return(EAGAIN);
331 }
332
333 /*
334 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
335 * caches all information required to create the entry in the
336 * directory inode. We expect a return code of EJUSTRETURN for
337 * the CREATE case. The cnp must simulated a saved-name situation.
338 */
339 bzero(&cnp, sizeof(cnp));
340 cnp.cn_nameiop = NAMEI_CREATE;
341 cnp.cn_flags = CNP_LOCKPARENT;
342 cnp.cn_nameptr = ncp->nc_name;
343 cnp.cn_namelen = ncp->nc_nlen;
344 cnp.cn_cred = ap->a_cred;
345 cnp.cn_td = td;
346 *ap->a_vpp = NULL;
347
348 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp);
349
350 /*
351 * EJUSTRETURN should be returned for this case, which means that
352 * the VFS has setup the directory inode for the create. The dvp we
353 * passed in is expected to remain in a locked state.
354 *
355 * If the VOP_OLD_CREATE is successful we are responsible for updating
356 * the cache state of the locked ncp that was passed to us.
357 */
358 if (error == EJUSTRETURN) {
359 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
360 error = VOP_OLD_CREATE(dvp, ap->a_vpp, &cnp, ap->a_vap);
361 if (error == 0) {
362 cache_setunresolved(nch);
363 cache_setvp(nch, *ap->a_vpp);
364 }
365 } else {
366 if (error == 0) {
367 vput(*ap->a_vpp);
368 *ap->a_vpp = NULL;
369 error = EEXIST;
370 }
371 KKASSERT(*ap->a_vpp == NULL);
372 }
373 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
374 vn_unlock(dvp);
375 vrele(dvp);
376 return (error);
377 }
378
379 /*
380 * vop_compat_nmkdir { struct nchandle *a_nch, XXX STOPGAP FUNCTION
381 * struct vnode *a_dvp,
382 * struct vnode **a_vpp,
383 * struct ucred *a_cred,
384 * struct vattr *a_vap }
385 *
386 * Create a directory as specified by a_vap. Compatibility requires us to
387 * issue the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_MKDIR in
388 * order to setup the directory inode's i_offset and i_count (e.g. in UFS).
389 */
390 int
vop_compat_nmkdir(struct vop_nmkdir_args * ap)391 vop_compat_nmkdir(struct vop_nmkdir_args *ap)
392 {
393 struct thread *td = curthread;
394 struct componentname cnp;
395 struct nchandle *nch;
396 struct namecache *ncp;
397 struct vnode *dvp;
398 int error;
399
400 /*
401 * Sanity checks, get a locked directory vnode.
402 */
403 nch = ap->a_nch; /* locked namecache node */
404 ncp = nch->ncp;
405 dvp = ap->a_dvp;
406 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
407 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
408 ncp, ncp->nc_name);
409 return(EAGAIN);
410 }
411
412 /*
413 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
414 * caches all information required to create the entry in the
415 * directory inode. We expect a return code of EJUSTRETURN for
416 * the CREATE case. The cnp must simulated a saved-name situation.
417 */
418 bzero(&cnp, sizeof(cnp));
419 cnp.cn_nameiop = NAMEI_CREATE;
420 cnp.cn_flags = CNP_LOCKPARENT;
421 cnp.cn_nameptr = ncp->nc_name;
422 cnp.cn_namelen = ncp->nc_nlen;
423 cnp.cn_cred = ap->a_cred;
424 cnp.cn_td = td;
425 *ap->a_vpp = NULL;
426
427 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp);
428
429 /*
430 * EJUSTRETURN should be returned for this case, which means that
431 * the VFS has setup the directory inode for the create. The dvp we
432 * passed in is expected to remain in a locked state.
433 *
434 * If the VOP_OLD_MKDIR is successful we are responsible for updating
435 * the cache state of the locked ncp that was passed to us.
436 */
437 if (error == EJUSTRETURN) {
438 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
439 error = VOP_OLD_MKDIR(dvp, ap->a_vpp, &cnp, ap->a_vap);
440 if (error == 0) {
441 cache_setunresolved(nch);
442 cache_setvp(nch, *ap->a_vpp);
443 }
444 } else {
445 if (error == 0) {
446 vput(*ap->a_vpp);
447 *ap->a_vpp = NULL;
448 error = EEXIST;
449 }
450 KKASSERT(*ap->a_vpp == NULL);
451 }
452 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
453 vn_unlock(dvp);
454 vrele(dvp);
455 return (error);
456 }
457
458 /*
459 * vop_compat_nmknod { struct nchandle *a_nch, XXX STOPGAP FUNCTION
460 * struct vnode *a_dvp,
461 * struct vnode **a_vpp,
462 * struct ucred *a_cred,
463 * struct vattr *a_vap }
464 *
465 * Create a device or fifo node as specified by a_vap. Compatibility requires
466 * us to issue the appropriate VOP_OLD_LOOKUP before we issue VOP_OLD_MKNOD
467 * in order to setup the directory inode's i_offset and i_count (e.g. in UFS).
468 */
469 int
vop_compat_nmknod(struct vop_nmknod_args * ap)470 vop_compat_nmknod(struct vop_nmknod_args *ap)
471 {
472 struct thread *td = curthread;
473 struct componentname cnp;
474 struct nchandle *nch;
475 struct namecache *ncp;
476 struct vnode *dvp;
477 int error;
478
479 /*
480 * Sanity checks, get a locked directory vnode.
481 */
482 nch = ap->a_nch; /* locked namecache node */
483 ncp = nch->ncp;
484 dvp = ap->a_dvp;
485
486 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
487 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
488 ncp, ncp->nc_name);
489 return(EAGAIN);
490 }
491
492 /*
493 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
494 * caches all information required to create the entry in the
495 * directory inode. We expect a return code of EJUSTRETURN for
496 * the CREATE case. The cnp must simulated a saved-name situation.
497 */
498 bzero(&cnp, sizeof(cnp));
499 cnp.cn_nameiop = NAMEI_CREATE;
500 cnp.cn_flags = CNP_LOCKPARENT;
501 cnp.cn_nameptr = ncp->nc_name;
502 cnp.cn_namelen = ncp->nc_nlen;
503 cnp.cn_cred = ap->a_cred;
504 cnp.cn_td = td;
505 *ap->a_vpp = NULL;
506
507 error = vop_old_lookup(ap->a_head.a_ops, dvp, ap->a_vpp, &cnp);
508
509 /*
510 * EJUSTRETURN should be returned for this case, which means that
511 * the VFS has setup the directory inode for the create. The dvp we
512 * passed in is expected to remain in a locked state.
513 *
514 * If the VOP_OLD_MKNOD is successful we are responsible for updating
515 * the cache state of the locked ncp that was passed to us.
516 */
517 if (error == EJUSTRETURN) {
518 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
519 error = VOP_OLD_MKNOD(dvp, ap->a_vpp, &cnp, ap->a_vap);
520 if (error == 0) {
521 cache_setunresolved(nch);
522 cache_setvp(nch, *ap->a_vpp);
523 }
524 } else {
525 if (error == 0) {
526 vput(*ap->a_vpp);
527 *ap->a_vpp = NULL;
528 error = EEXIST;
529 }
530 KKASSERT(*ap->a_vpp == NULL);
531 }
532 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
533 vn_unlock(dvp);
534 vrele(dvp);
535 return (error);
536 }
537
538 /*
539 * vop_compat_nlink { struct nchandle *a_nch, XXX STOPGAP FUNCTION
540 * struct vnode *a_dvp,
541 * struct vnode *a_vp,
542 * struct ucred *a_cred }
543 *
544 * The passed vp is locked and represents the source. The passed ncp is
545 * locked and represents the target to create.
546 */
547 int
vop_compat_nlink(struct vop_nlink_args * ap)548 vop_compat_nlink(struct vop_nlink_args *ap)
549 {
550 struct thread *td = curthread;
551 struct componentname cnp;
552 struct nchandle *nch;
553 struct namecache *ncp;
554 struct vnode *dvp;
555 struct vnode *tvp;
556 int error;
557
558 /*
559 * Sanity checks, get a locked directory vnode.
560 */
561 nch = ap->a_nch; /* locked namecache node */
562 ncp = nch->ncp;
563 dvp = ap->a_dvp;
564
565 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
566 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
567 ncp, ncp->nc_name);
568 return(EAGAIN);
569 }
570
571 /*
572 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
573 * caches all information required to create the entry in the
574 * directory inode. We expect a return code of EJUSTRETURN for
575 * the CREATE case. The cnp must simulated a saved-name situation.
576 *
577 * It should not be possible for there to be a vnode collision
578 * between the source vp and target (name lookup). However NFS
579 * clients racing each other can cause NFS to alias the same vnode
580 * across several names without the rest of the system knowing it.
581 * Use CNP_NOTVP to avoid a panic in this situation.
582 */
583 bzero(&cnp, sizeof(cnp));
584 cnp.cn_nameiop = NAMEI_CREATE;
585 cnp.cn_flags = CNP_LOCKPARENT | CNP_NOTVP;
586 cnp.cn_nameptr = ncp->nc_name;
587 cnp.cn_namelen = ncp->nc_nlen;
588 cnp.cn_cred = ap->a_cred;
589 cnp.cn_td = td;
590 cnp.cn_notvp = ap->a_vp;
591
592 tvp = NULL;
593 error = vop_old_lookup(ap->a_head.a_ops, dvp, &tvp, &cnp);
594
595 /*
596 * EJUSTRETURN should be returned for this case, which means that
597 * the VFS has setup the directory inode for the create. The dvp we
598 * passed in is expected to remain in a locked state.
599 *
600 * If the VOP_OLD_LINK is successful we are responsible for updating
601 * the cache state of the locked ncp that was passed to us.
602 */
603 if (error == EJUSTRETURN) {
604 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
605 error = VOP_OLD_LINK(dvp, ap->a_vp, &cnp);
606 if (error == 0) {
607 cache_setunresolved(nch);
608 cache_setvp(nch, ap->a_vp);
609 }
610 } else {
611 if (error == 0) {
612 vput(tvp);
613 error = EEXIST;
614 }
615 }
616 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
617 vn_unlock(dvp);
618 vrele(dvp);
619 return (error);
620 }
621
622 int
vop_compat_nsymlink(struct vop_nsymlink_args * ap)623 vop_compat_nsymlink(struct vop_nsymlink_args *ap)
624 {
625 struct thread *td = curthread;
626 struct componentname cnp;
627 struct nchandle *nch;
628 struct namecache *ncp;
629 struct vnode *dvp;
630 struct vnode *vp;
631 int error;
632
633 /*
634 * Sanity checks, get a locked directory vnode.
635 */
636 *ap->a_vpp = NULL;
637 nch = ap->a_nch; /* locked namecache node */
638 ncp = nch->ncp;
639 dvp = ap->a_dvp;
640
641 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
642 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
643 ncp, ncp->nc_name);
644 return(EAGAIN);
645 }
646
647 /*
648 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
649 * caches all information required to create the entry in the
650 * directory inode. We expect a return code of EJUSTRETURN for
651 * the CREATE case. The cnp must simulated a saved-name situation.
652 */
653 bzero(&cnp, sizeof(cnp));
654 cnp.cn_nameiop = NAMEI_CREATE;
655 cnp.cn_flags = CNP_LOCKPARENT;
656 cnp.cn_nameptr = ncp->nc_name;
657 cnp.cn_namelen = ncp->nc_nlen;
658 cnp.cn_cred = ap->a_cred;
659 cnp.cn_td = td;
660
661 vp = NULL;
662 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
663
664 /*
665 * EJUSTRETURN should be returned for this case, which means that
666 * the VFS has setup the directory inode for the create. The dvp we
667 * passed in is expected to remain in a locked state.
668 *
669 * If the VOP_OLD_SYMLINK is successful we are responsible for updating
670 * the cache state of the locked ncp that was passed to us.
671 */
672 if (error == EJUSTRETURN) {
673 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
674 error = VOP_OLD_SYMLINK(dvp, &vp, &cnp, ap->a_vap, ap->a_target);
675 if (error == 0) {
676 cache_setunresolved(nch);
677 cache_setvp(nch, vp);
678 *ap->a_vpp = vp;
679 }
680 } else {
681 if (error == 0) {
682 vput(vp);
683 vp = NULL;
684 error = EEXIST;
685 }
686 KKASSERT(vp == NULL);
687 }
688 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
689 vn_unlock(dvp);
690 vrele(dvp);
691 return (error);
692 }
693
694 /*
695 * vop_compat_nwhiteout { struct nchandle *a_nch, XXX STOPGAP FUNCTION
696 * struct vnode *a_dvp,
697 * struct ucred *a_cred,
698 * int a_flags }
699 *
700 * Issie a whiteout operation (create, lookup, or delete). Compatibility
701 * requires us to issue the appropriate VOP_OLD_LOOKUP before we issue
702 * VOP_OLD_WHITEOUT in order to setup the directory inode's i_offset and i_count
703 * (e.g. in UFS) for the NAMEI_CREATE and NAMEI_DELETE ops. For NAMEI_LOOKUP
704 * no lookup is necessary.
705 */
706 int
vop_compat_nwhiteout(struct vop_nwhiteout_args * ap)707 vop_compat_nwhiteout(struct vop_nwhiteout_args *ap)
708 {
709 struct thread *td = curthread;
710 struct componentname cnp;
711 struct nchandle *nch;
712 struct namecache *ncp;
713 struct vnode *dvp;
714 struct vnode *vp;
715 int error;
716
717 /*
718 * Sanity checks, get a locked directory vnode.
719 */
720 nch = ap->a_nch; /* locked namecache node */
721 ncp = nch->ncp;
722 dvp = ap->a_dvp;
723
724 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
725 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
726 ncp, ncp->nc_name);
727 return(EAGAIN);
728 }
729
730 /*
731 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
732 * caches all information required to create the entry in the
733 * directory inode. We expect a return code of EJUSTRETURN for
734 * the CREATE case. The cnp must simulated a saved-name situation.
735 */
736 bzero(&cnp, sizeof(cnp));
737 cnp.cn_nameiop = ap->a_flags;
738 cnp.cn_flags = CNP_LOCKPARENT;
739 cnp.cn_nameptr = ncp->nc_name;
740 cnp.cn_namelen = ncp->nc_nlen;
741 cnp.cn_cred = ap->a_cred;
742 cnp.cn_td = td;
743
744 vp = NULL;
745
746 /*
747 * EJUSTRETURN should be returned for the CREATE or DELETE cases.
748 * The VFS has setup the directory inode for the create. The dvp we
749 * passed in is expected to remain in a locked state.
750 *
751 * If the VOP_OLD_WHITEOUT is successful we are responsible for updating
752 * the cache state of the locked ncp that was passed to us.
753 */
754 switch(ap->a_flags) {
755 case NAMEI_DELETE:
756 cnp.cn_flags |= CNP_DOWHITEOUT;
757 /* fall through */
758 case NAMEI_CREATE:
759 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
760 if (error == EJUSTRETURN) {
761 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
762 error = VOP_OLD_WHITEOUT(dvp, &cnp, ap->a_flags);
763 if (error == 0)
764 cache_setunresolved(nch);
765 } else {
766 if (error == 0) {
767 vput(vp);
768 vp = NULL;
769 error = EEXIST;
770 }
771 KKASSERT(vp == NULL);
772 }
773 break;
774 case NAMEI_LOOKUP:
775 error = VOP_OLD_WHITEOUT(dvp, NULL, ap->a_flags);
776 break;
777 default:
778 error = EINVAL;
779 break;
780 }
781 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
782 vn_unlock(dvp);
783 vrele(dvp);
784 return (error);
785 }
786
787
788 /*
789 * vop_compat_nremove { struct nchandle *a_nch, XXX STOPGAP FUNCTION
790 * struct vnode *a_dvp,
791 * struct ucred *a_cred }
792 */
793 int
vop_compat_nremove(struct vop_nremove_args * ap)794 vop_compat_nremove(struct vop_nremove_args *ap)
795 {
796 struct thread *td = curthread;
797 struct componentname cnp;
798 struct nchandle *nch;
799 struct namecache *ncp;
800 struct vnode *dvp;
801 struct vnode *vp;
802 int error;
803
804 /*
805 * Sanity checks, get a locked directory vnode.
806 */
807 nch = ap->a_nch; /* locked namecache node */
808 ncp = nch->ncp;
809 dvp = ap->a_dvp;
810
811 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
812 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
813 ncp, ncp->nc_name);
814 return(EAGAIN);
815 }
816
817 /*
818 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
819 * caches all information required to delete the entry in the
820 * directory inode. We expect a return code of 0 for the DELETE
821 * case (meaning that a vp has been found). The cnp must simulated
822 * a saved-name situation.
823 */
824 bzero(&cnp, sizeof(cnp));
825 cnp.cn_nameiop = NAMEI_DELETE;
826 cnp.cn_flags = CNP_LOCKPARENT;
827 cnp.cn_nameptr = ncp->nc_name;
828 cnp.cn_namelen = ncp->nc_nlen;
829 cnp.cn_cred = ap->a_cred;
830 cnp.cn_td = td;
831
832 /*
833 * The vnode must be a directory and must not represent the
834 * current directory.
835 */
836 vp = NULL;
837 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
838 if (error == 0 && vp->v_type == VDIR)
839 error = EPERM;
840 if (error == 0) {
841 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
842 error = VOP_OLD_REMOVE(dvp, vp, &cnp);
843 if (error == 0)
844 cache_unlink(nch);
845 }
846 if (vp) {
847 if (dvp == vp)
848 vrele(vp);
849 else
850 vput(vp);
851 }
852 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
853 vn_unlock(dvp);
854 vrele(dvp);
855 return (error);
856 }
857
858 /*
859 * vop_compat_nrmdir { struct nchandle *a_nch, XXX STOPGAP FUNCTION
860 * struct vnode *dvp,
861 * struct ucred *a_cred }
862 */
863 int
vop_compat_nrmdir(struct vop_nrmdir_args * ap)864 vop_compat_nrmdir(struct vop_nrmdir_args *ap)
865 {
866 struct thread *td = curthread;
867 struct componentname cnp;
868 struct nchandle *nch;
869 struct namecache *ncp;
870 struct vnode *dvp;
871 struct vnode *vp;
872 int error;
873
874 /*
875 * Sanity checks, get a locked directory vnode.
876 */
877 nch = ap->a_nch; /* locked namecache node */
878 ncp = nch->ncp;
879 dvp = ap->a_dvp;
880
881 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
882 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
883 ncp, ncp->nc_name);
884 return(EAGAIN);
885 }
886
887 /*
888 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
889 * caches all information required to delete the entry in the
890 * directory inode. We expect a return code of 0 for the DELETE
891 * case (meaning that a vp has been found). The cnp must simulated
892 * a saved-name situation.
893 */
894 bzero(&cnp, sizeof(cnp));
895 cnp.cn_nameiop = NAMEI_DELETE;
896 cnp.cn_flags = CNP_LOCKPARENT;
897 cnp.cn_nameptr = ncp->nc_name;
898 cnp.cn_namelen = ncp->nc_nlen;
899 cnp.cn_cred = ap->a_cred;
900 cnp.cn_td = td;
901
902 /*
903 * The vnode must be a directory and must not represent the
904 * current directory.
905 */
906 vp = NULL;
907 error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
908 if (error == 0 && vp->v_type != VDIR)
909 error = ENOTDIR;
910 if (error == 0 && vp == dvp)
911 error = EINVAL;
912 if (error == 0 && (vp->v_flag & VROOT))
913 error = EBUSY;
914 if (error == 0) {
915 KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
916 error = VOP_OLD_RMDIR(dvp, vp, &cnp);
917
918 /*
919 * Note that this invalidation will cause any process
920 * currently CD'd into the directory being removed to be
921 * disconnected from the topology and not be able to ".."
922 * back out.
923 */
924 if (error == 0) {
925 cache_inval(nch, CINV_DESTROY);
926 cache_inval_vp(vp, CINV_DESTROY);
927 }
928 }
929 if (vp) {
930 if (dvp == vp)
931 vrele(vp);
932 else
933 vput(vp);
934 }
935 if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
936 vn_unlock(dvp);
937 vrele(dvp);
938 return (error);
939 }
940
941 /*
942 * vop_compat_nrename { struct nchandle *a_fnch, XXX STOPGAP FUNCTION
943 * struct nchandle *a_tnch,
944 * struct ucred *a_cred }
945 *
946 * This is a fairly difficult procedure. The old VOP_OLD_RENAME requires that
947 * the source directory and vnode be unlocked and the target directory and
948 * vnode (if it exists) be locked. All arguments will be vrele'd and
949 * the targets will also be unlocked regardless of the return code.
950 */
951 int
vop_compat_nrename(struct vop_nrename_args * ap)952 vop_compat_nrename(struct vop_nrename_args *ap)
953 {
954 struct thread *td = curthread;
955 struct componentname fcnp;
956 struct componentname tcnp;
957 struct nchandle *fnch;
958 struct nchandle *tnch;
959 struct namecache *fncp;
960 struct namecache *tncp;
961 struct vnode *fdvp, *fvp;
962 struct vnode *tdvp, *tvp;
963 int error;
964
965 /*
966 * Sanity checks, get referenced vnodes representing the source.
967 */
968 fnch = ap->a_fnch; /* locked namecache node */
969 fncp = fnch->ncp;
970 fdvp = ap->a_fdvp;
971
972 /*
973 * Temporarily lock the source directory and lookup in DELETE mode to
974 * check permissions. XXX delete permissions should have been
975 * checked by nlookup(), we need to add NLC_DELETE for delete
976 * checking. It is unclear whether VFS's require the directory setup
977 * info NAMEI_DELETE causes to be stored in the fdvp's inode, but
978 * since it isn't locked and since UFS always does a relookup of
979 * the source, it is believed that the only side effect that matters
980 * is the permissions check.
981 */
982 if ((error = vget(fdvp, LK_EXCLUSIVE)) != 0) {
983 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
984 fncp, fncp->nc_name);
985 return(EAGAIN);
986 }
987
988 bzero(&fcnp, sizeof(fcnp));
989 fcnp.cn_nameiop = NAMEI_DELETE;
990 fcnp.cn_flags = CNP_LOCKPARENT;
991 fcnp.cn_nameptr = fncp->nc_name;
992 fcnp.cn_namelen = fncp->nc_nlen;
993 fcnp.cn_cred = ap->a_cred;
994 fcnp.cn_td = td;
995
996 /*
997 * note: vop_old_lookup (i.e. VOP_OLD_LOOKUP) always returns a locked
998 * fvp.
999 */
1000 fvp = NULL;
1001 error = vop_old_lookup(ap->a_head.a_ops, fdvp, &fvp, &fcnp);
1002 if (error == 0 && (fvp->v_flag & VROOT)) {
1003 vput(fvp); /* as if vop_old_lookup had failed */
1004 error = EBUSY;
1005 }
1006 if ((fcnp.cn_flags & CNP_PDIRUNLOCK) == 0) {
1007 fcnp.cn_flags |= CNP_PDIRUNLOCK;
1008 vn_unlock(fdvp);
1009 }
1010 if (error) {
1011 vrele(fdvp);
1012 return (error);
1013 }
1014 vn_unlock(fvp);
1015
1016 /*
1017 * fdvp and fvp are now referenced and unlocked.
1018 *
1019 * Get a locked directory vnode for the target and lookup the target
1020 * in CREATE mode so it places the required information in the
1021 * directory inode.
1022 */
1023 tnch = ap->a_tnch; /* locked namecache node */
1024 tncp = tnch->ncp;
1025 tdvp = ap->a_tdvp;
1026 if (error) {
1027 vrele(fdvp);
1028 vrele(fvp);
1029 return (error);
1030 }
1031 if ((error = vget(tdvp, LK_EXCLUSIVE)) != 0) {
1032 kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
1033 tncp, tncp->nc_name);
1034 vrele(fdvp);
1035 vrele(fvp);
1036 return(EAGAIN);
1037 }
1038
1039 /*
1040 * Setup the cnp for a traditional vop_old_lookup() call. The lookup
1041 * caches all information required to create the entry in the
1042 * target directory inode.
1043 */
1044 bzero(&tcnp, sizeof(tcnp));
1045 tcnp.cn_nameiop = NAMEI_RENAME;
1046 tcnp.cn_flags = CNP_LOCKPARENT;
1047 tcnp.cn_nameptr = tncp->nc_name;
1048 tcnp.cn_namelen = tncp->nc_nlen;
1049 tcnp.cn_cred = ap->a_cred;
1050 tcnp.cn_td = td;
1051
1052 tvp = NULL;
1053 error = vop_old_lookup(ap->a_head.a_ops, tdvp, &tvp, &tcnp);
1054
1055 if (error == EJUSTRETURN) {
1056 /*
1057 * Target does not exist. tvp should be NULL.
1058 */
1059 KKASSERT(tvp == NULL);
1060 KKASSERT((tcnp.cn_flags & CNP_PDIRUNLOCK) == 0);
1061 error = VOP_OLD_RENAME(fdvp, fvp, &fcnp, tdvp, tvp, &tcnp);
1062 if (error == 0)
1063 cache_rename(fnch, tnch);
1064 } else if (error == 0) {
1065 /*
1066 * Target exists. VOP_OLD_RENAME should correctly delete the
1067 * target.
1068 */
1069 KKASSERT((tcnp.cn_flags & CNP_PDIRUNLOCK) == 0);
1070 error = VOP_OLD_RENAME(fdvp, fvp, &fcnp, tdvp, tvp, &tcnp);
1071 if (error == 0)
1072 cache_rename(fnch, tnch);
1073 } else {
1074 vrele(fdvp);
1075 vrele(fvp);
1076 if (tcnp.cn_flags & CNP_PDIRUNLOCK)
1077 vrele(tdvp);
1078 else
1079 vput(tdvp);
1080 }
1081 return (error);
1082 }
1083
1084 static int
vop_nolookup(struct vop_old_lookup_args * ap)1085 vop_nolookup(struct vop_old_lookup_args *ap)
1086 {
1087
1088 *ap->a_vpp = NULL;
1089 return (ENOTDIR);
1090 }
1091
1092 /*
1093 * vop_nostrategy:
1094 *
1095 * Strategy routine for VFS devices that have none.
1096 *
1097 * B_ERROR and B_INVAL must be cleared prior to calling any strategy
1098 * routine. Typically this is done for a BUF_CMD_READ strategy call.
1099 * Typically B_INVAL is assumed to already be clear prior to a write
1100 * and should not be cleared manually unless you just made the buffer
1101 * invalid. B_ERROR should be cleared either way.
1102 */
1103
1104 static int
vop_nostrategy(struct vop_strategy_args * ap)1105 vop_nostrategy (struct vop_strategy_args *ap)
1106 {
1107 kprintf("No strategy for buffer at %p\n", ap->a_bio->bio_buf);
1108 vprint("", ap->a_vp);
1109 ap->a_bio->bio_buf->b_flags |= B_ERROR;
1110 ap->a_bio->bio_buf->b_error = EOPNOTSUPP;
1111 biodone(ap->a_bio);
1112 return (EOPNOTSUPP);
1113 }
1114
1115 int
vop_stdpathconf(struct vop_pathconf_args * ap)1116 vop_stdpathconf(struct vop_pathconf_args *ap)
1117 {
1118 int error = 0;
1119
1120 switch (ap->a_name) {
1121 case _PC_CHOWN_RESTRICTED:
1122 *ap->a_retval = _POSIX_CHOWN_RESTRICTED;
1123 break;
1124 case _PC_LINK_MAX:
1125 *ap->a_retval = LINK_MAX;
1126 break;
1127 case _PC_MAX_CANON:
1128 *ap->a_retval = MAX_CANON;
1129 break;
1130 case _PC_MAX_INPUT:
1131 *ap->a_retval = MAX_INPUT;
1132 break;
1133 case _PC_NAME_MAX:
1134 *ap->a_retval = NAME_MAX;
1135 break;
1136 case _PC_NO_TRUNC:
1137 *ap->a_retval = _POSIX_NO_TRUNC;
1138 break;
1139 case _PC_PATH_MAX:
1140 *ap->a_retval = PATH_MAX;
1141 break;
1142 case _PC_PIPE_BUF:
1143 *ap->a_retval = PIPE_BUF;
1144 break;
1145 case _PC_VDISABLE:
1146 *ap->a_retval = _POSIX_VDISABLE;
1147 break;
1148 default:
1149 error = EINVAL;
1150 break;
1151 }
1152 return (error);
1153 }
1154
1155 /*
1156 * Standard open.
1157 *
1158 * (struct vnode *a_vp, int a_mode, struct ucred *a_ucred, struct file *a_fp)
1159 *
1160 * a_mode: note, 'F' modes, e.g. FREAD, FWRITE
1161 */
1162 int
vop_stdopen(struct vop_open_args * ap)1163 vop_stdopen(struct vop_open_args *ap)
1164 {
1165 struct vnode *vp = ap->a_vp;
1166 struct file *fp;
1167
1168 if (ap->a_fpp) {
1169 fp = *ap->a_fpp;
1170
1171 switch(vp->v_type) {
1172 case VFIFO:
1173 fp->f_type = DTYPE_FIFO;
1174 break;
1175 default:
1176 fp->f_type = DTYPE_VNODE;
1177 break;
1178 }
1179 /* retain flags not to be copied */
1180 fp->f_flag = (fp->f_flag & ~FMASK) | (ap->a_mode & FMASK);
1181 fp->f_ops = &vnode_fileops;
1182 fp->f_data = vp;
1183 vref(vp);
1184 }
1185 if (ap->a_mode & FWRITE)
1186 atomic_add_int(&vp->v_writecount, 1);
1187 KKASSERT(vp->v_opencount >= 0 && vp->v_opencount != INT_MAX);
1188 atomic_add_int(&vp->v_opencount, 1);
1189 return (0);
1190 }
1191
1192 /*
1193 * Standard close.
1194 *
1195 * (struct vnode *a_vp, int a_fflag)
1196 *
1197 * a_fflag: note, 'F' modes, e.g. FREAD, FWRITE. same as a_mode in stdopen?
1198 *
1199 * v_lastwrite_ts is used to record the timestamp that should be used to
1200 * set the file mtime for any asynchronously flushed pages modified via
1201 * mmap(), which can occur after the last close().
1202 */
1203 int
vop_stdclose(struct vop_close_args * ap)1204 vop_stdclose(struct vop_close_args *ap)
1205 {
1206 struct vnode *vp = ap->a_vp;
1207
1208 KASSERT(vp->v_opencount > 0,
1209 ("VOP_STDCLOSE: BAD OPENCOUNT %p %d type=%d ops=%p flgs=%08x",
1210 vp, vp->v_opencount, vp->v_type, *vp->v_ops, vp->v_flag));
1211 if (ap->a_fflag & FWRITE) {
1212 KASSERT(vp->v_writecount > 0,
1213 ("VOP_STDCLOSE: BAD WRITECOUNT %p %d",
1214 vp, vp->v_writecount));
1215 atomic_add_int(&vp->v_writecount, -1);
1216 }
1217 atomic_add_int(&vp->v_opencount, -1);
1218 return (0);
1219 }
1220
1221 /*
1222 * Standard getattr_lite
1223 *
1224 * Just calls getattr
1225 */
1226 int
vop_stdgetattr_lite(struct vop_getattr_lite_args * ap)1227 vop_stdgetattr_lite(struct vop_getattr_lite_args *ap)
1228 {
1229 struct vattr va;
1230 struct vattr_lite *lvap;
1231 int error;
1232
1233 error = VOP_GETATTR(ap->a_vp, &va);
1234 if (__predict_true(error == 0)) {
1235 lvap = ap->a_lvap;
1236 lvap->va_type = va.va_type;
1237 lvap->va_nlink = va.va_nlink;
1238 lvap->va_mode = va.va_mode;
1239 lvap->va_uid = va.va_uid;
1240 lvap->va_gid = va.va_gid;
1241 lvap->va_size = va.va_size;
1242 lvap->va_flags = va.va_flags;
1243 }
1244 return error;
1245 }
1246
1247 /*
1248 * Implement standard getpages and putpages. All filesystems must use
1249 * the buffer cache to back regular files.
1250 */
1251 int
vop_stdgetpages(struct vop_getpages_args * ap)1252 vop_stdgetpages(struct vop_getpages_args *ap)
1253 {
1254 struct mount *mp;
1255 int error;
1256
1257 if ((mp = ap->a_vp->v_mount) != NULL) {
1258 error = vnode_pager_generic_getpages(
1259 ap->a_vp, ap->a_m, ap->a_count,
1260 ap->a_reqpage, ap->a_seqaccess);
1261 } else {
1262 error = VM_PAGER_BAD;
1263 }
1264 return (error);
1265 }
1266
1267 int
vop_stdputpages(struct vop_putpages_args * ap)1268 vop_stdputpages(struct vop_putpages_args *ap)
1269 {
1270 struct mount *mp;
1271 int error;
1272
1273 if ((mp = ap->a_vp->v_mount) != NULL) {
1274 error = vnode_pager_generic_putpages(
1275 ap->a_vp, ap->a_m, ap->a_count,
1276 ap->a_flags, ap->a_rtvals);
1277 } else {
1278 error = VM_PAGER_BAD;
1279 }
1280 return (error);
1281 }
1282
1283 int
vop_stdnoread(struct vop_read_args * ap)1284 vop_stdnoread(struct vop_read_args *ap)
1285 {
1286 return (EINVAL);
1287 }
1288
1289 int
vop_stdnowrite(struct vop_write_args * ap)1290 vop_stdnowrite(struct vop_write_args *ap)
1291 {
1292 return (EINVAL);
1293 }
1294
1295 /*
1296 * vfs default ops
1297 * used to fill the vfs fucntion table to get reasonable default return values.
1298 */
1299 int
vop_stdmountctl(struct vop_mountctl_args * ap)1300 vop_stdmountctl(struct vop_mountctl_args *ap)
1301 {
1302
1303 struct mount *mp;
1304 int error = 0;
1305
1306 mp = ap->a_head.a_ops->head.vv_mount;
1307
1308 switch(ap->a_op) {
1309 case MOUNTCTL_MOUNTFLAGS:
1310 /*
1311 * Get a string buffer with all the mount flags
1312 * names comman separated.
1313 * mount(2) will use this information.
1314 */
1315 *ap->a_res = vfs_flagstostr(mp->mnt_flag & MNT_VISFLAGMASK, NULL,
1316 ap->a_buf, ap->a_buflen, &error);
1317 break;
1318 case MOUNTCTL_INSTALL_VFS_JOURNAL:
1319 case MOUNTCTL_RESTART_VFS_JOURNAL:
1320 case MOUNTCTL_REMOVE_VFS_JOURNAL:
1321 case MOUNTCTL_RESYNC_VFS_JOURNAL:
1322 case MOUNTCTL_STATUS_VFS_JOURNAL:
1323 error = journal_mountctl(ap);
1324 break;
1325 default:
1326 error = EOPNOTSUPP;
1327 break;
1328 }
1329 return (error);
1330 }
1331
1332 int
vop_stdallocate(struct vop_allocate_args * ap)1333 vop_stdallocate(struct vop_allocate_args *ap)
1334 {
1335 struct thread *td;
1336 struct vnode *vp;
1337 struct vattr vattr, *vap;
1338 struct uio auio;
1339 struct iovec aiov;
1340 uint8_t *buf;
1341 off_t offset, len, fsize;
1342 size_t iosize;
1343 int error;
1344
1345 bzero(&auio, sizeof(auio));
1346
1347 td = curthread;
1348 vap = &vattr;
1349 buf = NULL;
1350
1351 vp = ap->a_vp;
1352 offset = ap->a_offset;
1353 len = ap->a_len;
1354
1355 error = VOP_GETATTR(vp, vap);
1356 if (error != 0)
1357 goto out;
1358 fsize = vap->va_size;
1359 iosize = vap->va_blocksize;
1360 if (iosize == 0)
1361 iosize = BLKDEV_IOSIZE;
1362 if (iosize > vmaxiosize(vp))
1363 iosize = vmaxiosize(vp);
1364 buf = kmalloc(iosize, M_TEMP, M_WAITOK);
1365
1366 if (offset + len > vap->va_size) {
1367 /*
1368 * Test offset + len against the filesystem's maxfilesize.
1369 */
1370 VATTR_NULL(&vattr);
1371 vap->va_size = offset + len;
1372 error = VOP_SETATTR(vp, vap, td->td_ucred);
1373 if (error != 0)
1374 goto out;
1375 VATTR_NULL(&vattr);
1376 vap->va_size = fsize;
1377 error = VOP_SETATTR(vp, vap, td->td_ucred);
1378 if (error != 0)
1379 goto out;
1380 }
1381
1382 for (;;) {
1383 /*
1384 * Read and write back anything below the nominal file
1385 * size. There's currently no way outside the filesystem
1386 * to know whether this area is sparse or not.
1387 */
1388 off_t cur = iosize;
1389 if ((offset % iosize) != 0)
1390 cur -= (offset % iosize);
1391 if (cur > len)
1392 cur = len;
1393 if (offset < fsize) {
1394 aiov.iov_base = buf;
1395 aiov.iov_len = cur;
1396 auio.uio_iov = &aiov;
1397 auio.uio_iovcnt = 1;
1398 auio.uio_offset = offset;
1399 auio.uio_resid = cur;
1400 auio.uio_segflg = UIO_SYSSPACE;
1401 auio.uio_rw = UIO_READ;
1402 auio.uio_td = td;
1403 error = VOP_READ(vp, &auio, 0, td->td_ucred);
1404 if (error != 0)
1405 break;
1406 if (auio.uio_resid > 0) {
1407 bzero(buf + cur - auio.uio_resid,
1408 auio.uio_resid);
1409 }
1410 } else {
1411 bzero(buf, cur);
1412 }
1413
1414 aiov.iov_base = buf;
1415 aiov.iov_len = cur;
1416 auio.uio_iov = &aiov;
1417 auio.uio_iovcnt = 1;
1418 auio.uio_offset = offset;
1419 auio.uio_resid = cur;
1420 auio.uio_segflg = UIO_SYSSPACE;
1421 auio.uio_rw = UIO_WRITE;
1422 auio.uio_td = td;
1423
1424 error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
1425 if (error != 0)
1426 break;
1427
1428 len -= cur;
1429 offset += cur;
1430 if (len == 0)
1431 break;
1432 /*
1433 if (should_yield())
1434 break;
1435 */
1436 }
1437 out:
1438 ap->a_offset = offset;
1439 ap->a_len = len;
1440 kfree(buf, M_TEMP);
1441
1442 return (error);
1443 }
1444
1445 int
vop_stdfdatasync(struct vop_fdatasync_args * ap)1446 vop_stdfdatasync(struct vop_fdatasync_args *ap)
1447 {
1448 return (VOP_FSYNC_FP(ap->a_vp, ap->a_waitfor, ap->a_flags, ap->a_fp));
1449 }
1450
1451 int
vfs_stdroot(struct mount * mp,struct vnode ** vpp)1452 vfs_stdroot(struct mount *mp, struct vnode **vpp)
1453 {
1454 return (EOPNOTSUPP);
1455 }
1456
1457 int
vfs_stdstatfs(struct mount * mp,struct statfs * sbp,struct ucred * cred)1458 vfs_stdstatfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
1459 {
1460 return (EOPNOTSUPP);
1461 }
1462
1463 /*
1464 * If the VFS does not implement statvfs, then call statfs and convert
1465 * the values. This code was taken from libc's __cvtstatvfs() function,
1466 * contributed by Joerg Sonnenberger.
1467 */
1468 int
vfs_stdstatvfs(struct mount * mp,struct statvfs * sbp,struct ucred * cred)1469 vfs_stdstatvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
1470 {
1471 struct statfs *in;
1472 int error;
1473
1474 in = &mp->mnt_stat;
1475 error = VFS_STATFS(mp, in, cred);
1476 if (error == 0) {
1477 bzero(sbp, sizeof(*sbp));
1478
1479 sbp->f_bsize = in->f_bsize;
1480 sbp->f_frsize = in->f_bsize;
1481 sbp->f_blocks = in->f_blocks;
1482 sbp->f_bfree = in->f_bfree;
1483 sbp->f_bavail = in->f_bavail;
1484 sbp->f_files = in->f_files;
1485 sbp->f_ffree = in->f_ffree;
1486
1487 /*
1488 * XXX
1489 * This field counts the number of available inodes to non-root
1490 * users, but this information is not available via statfs.
1491 * Just ignore this issue by returning the total number
1492 * instead.
1493 */
1494 sbp->f_favail = in->f_ffree;
1495
1496 /*
1497 * XXX
1498 * This field has a different meaning for statfs and statvfs.
1499 * For the former it is the cookie exported for NFS and not
1500 * intended for normal userland use.
1501 */
1502 sbp->f_fsid = 0;
1503
1504 sbp->f_flag = 0;
1505 if (in->f_flags & MNT_RDONLY)
1506 sbp->f_flag |= ST_RDONLY;
1507 if (in->f_flags & MNT_NOSUID)
1508 sbp->f_flag |= ST_NOSUID;
1509 sbp->f_namemax = 0;
1510 sbp->f_owner = in->f_owner;
1511 /*
1512 * XXX
1513 * statfs contains the type as string, statvfs expects it as
1514 * enumeration.
1515 */
1516 sbp->f_type = 0;
1517
1518 sbp->f_syncreads = in->f_syncreads;
1519 sbp->f_syncwrites = in->f_syncwrites;
1520 sbp->f_asyncreads = in->f_asyncreads;
1521 sbp->f_asyncwrites = in->f_asyncwrites;
1522 }
1523 return (error);
1524 }
1525
1526 int
vfs_stdvptofh(struct vnode * vp,struct fid * fhp)1527 vfs_stdvptofh(struct vnode *vp, struct fid *fhp)
1528 {
1529 return (EOPNOTSUPP);
1530 }
1531
1532 int
vfs_stdstart(struct mount * mp,int flags)1533 vfs_stdstart(struct mount *mp, int flags)
1534 {
1535 return (0);
1536 }
1537
1538 int
vfs_stdquotactl(struct mount * mp,int cmds,uid_t uid,caddr_t arg,struct ucred * cred)1539 vfs_stdquotactl(struct mount *mp, int cmds, uid_t uid,
1540 caddr_t arg, struct ucred *cred)
1541 {
1542 return (EOPNOTSUPP);
1543 }
1544
1545 int
vfs_stdsync(struct mount * mp,int waitfor)1546 vfs_stdsync(struct mount *mp, int waitfor)
1547 {
1548 return (0);
1549 }
1550
1551 int
vfs_stdnosync(struct mount * mp,int waitfor)1552 vfs_stdnosync(struct mount *mp, int waitfor)
1553 {
1554 return (EOPNOTSUPP);
1555 }
1556
1557 int
vfs_stdvget(struct mount * mp,struct vnode * dvp,ino_t ino,struct vnode ** vpp)1558 vfs_stdvget(struct mount *mp, struct vnode *dvp, ino_t ino, struct vnode **vpp)
1559 {
1560 return (EOPNOTSUPP);
1561 }
1562
1563 int
vfs_stdfhtovp(struct mount * mp,struct vnode * rootvp,struct fid * fhp,struct vnode ** vpp)1564 vfs_stdfhtovp(struct mount *mp, struct vnode *rootvp,
1565 struct fid *fhp, struct vnode **vpp)
1566 {
1567 return (EOPNOTSUPP);
1568 }
1569
1570 int
vfs_stdcheckexp(struct mount * mp,struct sockaddr * nam,int * extflagsp,struct ucred ** credanonp)1571 vfs_stdcheckexp(struct mount *mp, struct sockaddr *nam, int *extflagsp,
1572 struct ucred **credanonp)
1573 {
1574 return (EOPNOTSUPP);
1575 }
1576
1577 int
vfs_stdinit(struct vfsconf * vfsp)1578 vfs_stdinit(struct vfsconf *vfsp)
1579 {
1580 return (0);
1581 }
1582
1583 int
vfs_stduninit(struct vfsconf * vfsp)1584 vfs_stduninit(struct vfsconf *vfsp)
1585 {
1586 return(0);
1587 }
1588
1589 int
vfs_stdextattrctl(struct mount * mp,int cmd,struct vnode * vp,int attrnamespace,const char * attrname,struct ucred * cred)1590 vfs_stdextattrctl(struct mount *mp, int cmd, struct vnode *vp,
1591 int attrnamespace, const char *attrname,
1592 struct ucred *cred)
1593 {
1594 return(EOPNOTSUPP);
1595 }
1596
1597 #define ACCOUNTING_NB_FSTYPES 7
1598
1599 static const char *accounting_fstypes[ACCOUNTING_NB_FSTYPES] = {
1600 "ext2fs", "hammer", "mfs", "ntfs", "null", "tmpfs", "ufs" };
1601
1602 int
vfs_stdac_init(struct mount * mp)1603 vfs_stdac_init(struct mount *mp)
1604 {
1605 const char* fs_type;
1606 int i, fstype_ok = 0;
1607
1608 /* is mounted fs type one we want to do some accounting for ? */
1609 for (i=0; i<ACCOUNTING_NB_FSTYPES; i++) {
1610 fs_type = accounting_fstypes[i];
1611 if (strncmp(mp->mnt_stat.f_fstypename, fs_type,
1612 sizeof(mp->mnt_stat)) == 0) {
1613 fstype_ok = 1;
1614 break;
1615 }
1616 }
1617 if (fstype_ok == 0)
1618 return (0);
1619
1620 vq_init(mp);
1621 return (0);
1622 }
1623
1624 void
vfs_stdac_done(struct mount * mp)1625 vfs_stdac_done(struct mount *mp)
1626 {
1627 vq_done(mp);
1628 }
1629
1630 void
vfs_stdncpgen_set(struct mount * mp,struct namecache * ncp)1631 vfs_stdncpgen_set(struct mount *mp, struct namecache *ncp)
1632 {
1633 }
1634
1635 int
vfs_stdncpgen_test(struct mount * mp,struct namecache * ncp)1636 vfs_stdncpgen_test(struct mount *mp, struct namecache *ncp)
1637 {
1638 return 0;
1639 }
1640
1641 int
vfs_stdmodifying(struct mount * mp)1642 vfs_stdmodifying(struct mount *mp)
1643 {
1644 if (mp->mnt_flag & MNT_RDONLY)
1645 return EROFS;
1646 return 0;
1647 }
1648 /* end of vfs default ops */
1649