1 /* $OpenBSD: vfs_vnops.c,v 1.125 2025/01/06 08:57:23 mpi Exp $ */
2 /* $NetBSD: vfs_vnops.c,v 1.20 1996/02/04 02:18:41 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)vfs_vnops.c 8.5 (Berkeley) 12/8/94
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/fcntl.h>
44 #include <sys/file.h>
45 #include <sys/stat.h>
46 #include <sys/proc.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/lock.h>
52 #include <sys/vnode.h>
53 #include <sys/ioctl.h>
54 #include <sys/tty.h>
55 #include <sys/specdev.h>
56 #include <sys/unistd.h>
57
58 int vn_read(struct file *, struct uio *, int);
59 int vn_write(struct file *, struct uio *, int);
60 int vn_kqfilter(struct file *, struct knote *);
61 int vn_closefile(struct file *, struct proc *);
62 int vn_seek(struct file *, off_t *, int, struct proc *);
63
64 const struct fileops vnops = {
65 .fo_read = vn_read,
66 .fo_write = vn_write,
67 .fo_ioctl = vn_ioctl,
68 .fo_kqfilter = vn_kqfilter,
69 .fo_stat = vn_statfile,
70 .fo_close = vn_closefile,
71 .fo_seek = vn_seek,
72 };
73
74 /*
75 * Common code for vnode open operations.
76 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
77 */
78 int
vn_open(struct nameidata * ndp,int fmode,int cmode)79 vn_open(struct nameidata *ndp, int fmode, int cmode)
80 {
81 struct vnode *vp;
82 struct proc *p = ndp->ni_cnd.cn_proc;
83 struct ucred *cred = p->p_ucred;
84 struct vattr va;
85 struct cloneinfo *cip;
86 int error;
87
88 /*
89 * The only valid flags to pass in here from NDINIT are
90 * KERNELPATH or BYPASSUNVEIL. This function will override the
91 * nameiop based on the fmode and cmode flags, so validate that
92 * our caller has not set other flags or operations in the nameidata
93 * structure.
94 */
95 KASSERT((ndp->ni_cnd.cn_flags & ~(KERNELPATH|BYPASSUNVEIL)) == 0);
96 KASSERT(ndp->ni_cnd.cn_nameiop == 0);
97
98 if ((fmode & (FREAD|FWRITE)) == 0)
99 return (EINVAL);
100 if ((fmode & (O_TRUNC | FWRITE)) == O_TRUNC)
101 return (EINVAL);
102 if (fmode & O_CREAT) {
103 ndp->ni_cnd.cn_nameiop = CREATE;
104 ndp->ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
105 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
106 ndp->ni_cnd.cn_flags |= FOLLOW;
107 if ((error = namei(ndp)) != 0)
108 return (error);
109
110 if (ndp->ni_vp == NULL) {
111 vattr_null(&va);
112 va.va_type = VREG;
113 va.va_mode = cmode;
114 if (fmode & O_EXCL)
115 va.va_vaflags |= VA_EXCLUSIVE;
116 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
117 &ndp->ni_cnd, &va);
118 vput(ndp->ni_dvp);
119 if (error)
120 return (error);
121 fmode &= ~O_TRUNC;
122 vp = ndp->ni_vp;
123 } else {
124 VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
125 if (ndp->ni_dvp == ndp->ni_vp)
126 vrele(ndp->ni_dvp);
127 else
128 vput(ndp->ni_dvp);
129 ndp->ni_dvp = NULL;
130 vp = ndp->ni_vp;
131 if (fmode & O_EXCL) {
132 error = EEXIST;
133 goto bad;
134 }
135 fmode &= ~O_CREAT;
136 }
137 } else {
138 ndp->ni_cnd.cn_nameiop = LOOKUP;
139 ndp->ni_cnd.cn_flags |= ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
140 if ((error = namei(ndp)) != 0)
141 return (error);
142 vp = ndp->ni_vp;
143 }
144 if (vp->v_type == VSOCK) {
145 error = EOPNOTSUPP;
146 goto bad;
147 }
148 if (vp->v_type == VLNK) {
149 error = ELOOP;
150 goto bad;
151 }
152 if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) {
153 error = ENOTDIR;
154 goto bad;
155 }
156 if ((fmode & O_CREAT) == 0) {
157 if (fmode & FREAD) {
158 if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0)
159 goto bad;
160 }
161 if (fmode & FWRITE) {
162 if (vp->v_type == VDIR) {
163 error = EISDIR;
164 goto bad;
165 }
166 if ((error = vn_writechk(vp)) != 0 ||
167 (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0)
168 goto bad;
169 }
170 }
171 if ((fmode & O_TRUNC) && vp->v_type == VREG) {
172 vattr_null(&va);
173 va.va_size = 0;
174 if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0)
175 goto bad;
176 }
177 if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
178 goto bad;
179
180 if (vp->v_flag & VCLONED) {
181 cip = (struct cloneinfo *)vp->v_data;
182
183 vp->v_flag &= ~VCLONED;
184
185 ndp->ni_vp = cip->ci_vp; /* return cloned vnode */
186 vp->v_data = cip->ci_data; /* restore v_data */
187 VOP_UNLOCK(vp); /* keep a reference */
188 vp = ndp->ni_vp; /* for the increment below */
189
190 free(cip, M_TEMP, sizeof(*cip));
191 }
192
193 if (fmode & FWRITE)
194 vp->v_writecount++;
195 return (0);
196 bad:
197 vput(vp);
198 return (error);
199 }
200
201 /*
202 * Check for write permissions on the specified vnode.
203 * Prototype text segments cannot be written.
204 */
205 int
vn_writechk(struct vnode * vp)206 vn_writechk(struct vnode *vp)
207 {
208 /*
209 * Disallow write attempts on read-only file systems;
210 * unless the file is a socket or a block or character
211 * device resident on the file system.
212 */
213 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
214 switch (vp->v_type) {
215 case VREG:
216 case VDIR:
217 case VLNK:
218 return (EROFS);
219 case VNON:
220 case VCHR:
221 case VSOCK:
222 case VFIFO:
223 case VBAD:
224 case VBLK:
225 break;
226 }
227 }
228 /*
229 * If there's shared text associated with
230 * the vnode, try to free it up once. If
231 * we fail, we can't allow writing.
232 */
233 if ((vp->v_flag & VTEXT) && !uvm_vnp_uncache(vp))
234 return (ETXTBSY);
235
236 return (0);
237 }
238
239 /*
240 * Check whether a write operation would exceed the file size rlimit
241 * for the process, if one should be applied for this operation.
242 * If a partial write should take place, the uio is adjusted and the
243 * amount by which the request would have exceeded the limit is returned
244 * via the 'overrun' argument.
245 */
246 int
vn_fsizechk(struct vnode * vp,struct uio * uio,int ioflag,ssize_t * overrun)247 vn_fsizechk(struct vnode *vp, struct uio *uio, int ioflag, ssize_t *overrun)
248 {
249 struct proc *p = uio->uio_procp;
250
251 *overrun = 0;
252 if (vp->v_type == VREG && p != NULL && !(ioflag & IO_NOLIMIT)) {
253 rlim_t limit = lim_cur_proc(p, RLIMIT_FSIZE);
254
255 /* if already at or over the limit, send the signal and fail */
256 if (uio->uio_offset >= limit) {
257 psignal(p, SIGXFSZ);
258 return (EFBIG);
259 }
260
261 /* otherwise, clamp the write to stay under the limit */
262 if (uio->uio_resid > limit - uio->uio_offset) {
263 *overrun = uio->uio_resid - (limit - uio->uio_offset);
264 uio->uio_resid = limit - uio->uio_offset;
265 }
266 }
267
268 return (0);
269 }
270
271
272 /*
273 * Mark a vnode as being the text image of a running process.
274 */
275 void
vn_marktext(struct vnode * vp)276 vn_marktext(struct vnode *vp)
277 {
278 vp->v_flag |= VTEXT;
279 }
280
281 /*
282 * Vnode close call
283 */
284 int
vn_close(struct vnode * vp,int flags,struct ucred * cred,struct proc * p)285 vn_close(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
286 {
287 int error;
288
289 if (flags & FWRITE)
290 vp->v_writecount--;
291 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
292 error = VOP_CLOSE(vp, flags, cred, p);
293 vput(vp);
294 return (error);
295 }
296
297 /*
298 * Package up an I/O request on a vnode into a uio and do it.
299 */
300 int
vn_rdwr(enum uio_rw rw,struct vnode * vp,caddr_t base,int len,off_t offset,enum uio_seg segflg,int ioflg,struct ucred * cred,size_t * aresid,struct proc * p)301 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset,
302 enum uio_seg segflg, int ioflg, struct ucred *cred, size_t *aresid,
303 struct proc *p)
304 {
305 struct uio auio;
306 struct iovec aiov;
307 int error;
308
309 auio.uio_iov = &aiov;
310 auio.uio_iovcnt = 1;
311 aiov.iov_base = base;
312 aiov.iov_len = len;
313 auio.uio_resid = len;
314 auio.uio_offset = offset;
315 auio.uio_segflg = segflg;
316 auio.uio_rw = rw;
317 auio.uio_procp = p;
318
319 if ((ioflg & IO_NODELOCKED) == 0)
320 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
321 if (rw == UIO_READ) {
322 error = VOP_READ(vp, &auio, ioflg, cred);
323 } else {
324 error = VOP_WRITE(vp, &auio, ioflg, cred);
325 }
326 if ((ioflg & IO_NODELOCKED) == 0)
327 VOP_UNLOCK(vp);
328
329 if (aresid)
330 *aresid = auio.uio_resid;
331 else
332 if (auio.uio_resid && error == 0)
333 error = EIO;
334 return (error);
335 }
336
337 /*
338 * File table vnode read routine.
339 */
340 int
vn_read(struct file * fp,struct uio * uio,int fflags)341 vn_read(struct file *fp, struct uio *uio, int fflags)
342 {
343 struct vnode *vp = fp->f_data;
344 struct ucred *cred = fp->f_cred;
345 size_t count = uio->uio_resid;
346 off_t offset;
347 int error;
348
349 KERNEL_LOCK();
350
351 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
352
353 if ((fflags & FO_POSITION) == 0)
354 offset = uio->uio_offset = fp->f_offset;
355 else
356 offset = uio->uio_offset;
357
358 /* no wrap around of offsets except on character devices */
359 if (vp->v_type != VCHR && count > LLONG_MAX - offset) {
360 error = EINVAL;
361 goto done;
362 }
363
364 if (vp->v_type == VDIR) {
365 error = EISDIR;
366 goto done;
367 }
368
369 error = VOP_READ(vp, uio, (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0,
370 cred);
371 if ((fflags & FO_POSITION) == 0) {
372 mtx_enter(&fp->f_mtx);
373 fp->f_offset += count - uio->uio_resid;
374 mtx_leave(&fp->f_mtx);
375 }
376 done:
377 VOP_UNLOCK(vp);
378 KERNEL_UNLOCK();
379 return (error);
380 }
381
382 /*
383 * File table vnode write routine.
384 */
385 int
vn_write(struct file * fp,struct uio * uio,int fflags)386 vn_write(struct file *fp, struct uio *uio, int fflags)
387 {
388 struct vnode *vp = fp->f_data;
389 struct ucred *cred = fp->f_cred;
390 int error, ioflag = IO_UNIT;
391 size_t count;
392
393 KERNEL_LOCK();
394
395 /* note: pwrite/pwritev are unaffected by O_APPEND */
396 if (vp->v_type == VREG && (fp->f_flag & O_APPEND) &&
397 (fflags & FO_POSITION) == 0)
398 ioflag |= IO_APPEND;
399 if (fp->f_flag & FNONBLOCK)
400 ioflag |= IO_NDELAY;
401 if ((fp->f_flag & FFSYNC) ||
402 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
403 ioflag |= IO_SYNC;
404 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
405 if ((fflags & FO_POSITION) == 0)
406 uio->uio_offset = fp->f_offset;
407 count = uio->uio_resid;
408 error = VOP_WRITE(vp, uio, ioflag, cred);
409 if ((fflags & FO_POSITION) == 0) {
410 mtx_enter(&fp->f_mtx);
411 if (ioflag & IO_APPEND)
412 fp->f_offset = uio->uio_offset;
413 else
414 fp->f_offset += count - uio->uio_resid;
415 mtx_leave(&fp->f_mtx);
416 }
417 VOP_UNLOCK(vp);
418
419 KERNEL_UNLOCK();
420 return (error);
421 }
422
423 /*
424 * File table wrapper for vn_stat
425 */
426 int
vn_statfile(struct file * fp,struct stat * sb,struct proc * p)427 vn_statfile(struct file *fp, struct stat *sb, struct proc *p)
428 {
429 struct vnode *vp = fp->f_data;
430 int error;
431
432 KERNEL_LOCK();
433 error = vn_stat(vp, sb, p);
434 KERNEL_UNLOCK();
435
436 return (error);
437 }
438
439 /*
440 * vnode stat routine.
441 */
442 int
vn_stat(struct vnode * vp,struct stat * sb,struct proc * p)443 vn_stat(struct vnode *vp, struct stat *sb, struct proc *p)
444 {
445 struct vattr va;
446 int error;
447 mode_t mode;
448
449 error = VOP_GETATTR(vp, &va, p->p_ucred, p);
450 if (error)
451 return (error);
452 /*
453 * Copy from vattr table
454 */
455 memset(sb, 0, sizeof(*sb));
456 sb->st_dev = va.va_fsid;
457 sb->st_ino = va.va_fileid;
458 mode = va.va_mode;
459 switch (vp->v_type) {
460 case VREG:
461 mode |= S_IFREG;
462 break;
463 case VDIR:
464 mode |= S_IFDIR;
465 break;
466 case VBLK:
467 mode |= S_IFBLK;
468 break;
469 case VCHR:
470 mode |= S_IFCHR;
471 break;
472 case VLNK:
473 mode |= S_IFLNK;
474 break;
475 case VSOCK:
476 mode |= S_IFSOCK;
477 break;
478 case VFIFO:
479 mode |= S_IFIFO;
480 break;
481 default:
482 return (EBADF);
483 }
484 sb->st_mode = mode;
485 sb->st_nlink = va.va_nlink;
486 sb->st_uid = va.va_uid;
487 sb->st_gid = va.va_gid;
488 sb->st_rdev = va.va_rdev;
489 sb->st_size = va.va_size;
490 sb->st_atim.tv_sec = va.va_atime.tv_sec;
491 sb->st_atim.tv_nsec = va.va_atime.tv_nsec;
492 sb->st_mtim.tv_sec = va.va_mtime.tv_sec;
493 sb->st_mtim.tv_nsec = va.va_mtime.tv_nsec;
494 sb->st_ctim.tv_sec = va.va_ctime.tv_sec;
495 sb->st_ctim.tv_nsec = va.va_ctime.tv_nsec;
496 sb->st_blksize = va.va_blocksize;
497 sb->st_flags = va.va_flags;
498 sb->st_gen = va.va_gen;
499 sb->st_blocks = va.va_bytes / S_BLKSIZE;
500 return (0);
501 }
502
503 /*
504 * File table vnode ioctl routine.
505 */
506 int
vn_ioctl(struct file * fp,u_long com,caddr_t data,struct proc * p)507 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
508 {
509 struct vnode *vp = fp->f_data;
510 struct vattr vattr;
511 int error = ENOTTY;
512
513 KERNEL_LOCK();
514 switch (vp->v_type) {
515
516 case VREG:
517 case VDIR:
518 if (com == FIONREAD) {
519 error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
520 if (error)
521 break;
522 *(int *)data = vattr.va_size - foffset(fp);
523
524 } else if (com == FIOASYNC) /* XXX */
525 error = 0; /* XXX */
526 break;
527
528 case VFIFO:
529 case VCHR:
530 case VBLK:
531 error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
532 if (error == 0 && com == TIOCSCTTY) {
533 struct session *s = p->p_p->ps_session;
534 struct vnode *ovp = s->s_ttyvp;
535 s->s_ttyvp = vp;
536 vref(vp);
537 if (ovp)
538 vrele(ovp);
539 }
540 break;
541
542 default:
543 break;
544 }
545 KERNEL_UNLOCK();
546
547 return (error);
548 }
549
550 /*
551 * Check that the vnode is still valid, and if so
552 * acquire requested lock.
553 */
554 int
vn_lock(struct vnode * vp,int flags)555 vn_lock(struct vnode *vp, int flags)
556 {
557 int error, xlocked, do_wakeup;
558
559 do {
560 mtx_enter(&vnode_mtx);
561 if (vp->v_lflag & VXLOCK) {
562 vp->v_lflag |= VXWANT;
563 msleep_nsec(vp, &vnode_mtx, PINOD, "vn_lock", INFSLP);
564 mtx_leave(&vnode_mtx);
565 error = ENOENT;
566 } else {
567 vp->v_lockcount++;
568 mtx_leave(&vnode_mtx);
569
570 error = VOP_LOCK(vp, flags);
571
572 mtx_enter(&vnode_mtx);
573 vp->v_lockcount--;
574 do_wakeup = (vp->v_lockcount == 0);
575 xlocked = vp->v_lflag & VXLOCK;
576 mtx_leave(&vnode_mtx);
577
578 if (error == 0) {
579 if (!xlocked)
580 return (0);
581
582 /*
583 * The vnode was exclusively locked while
584 * acquiring the requested lock. Release it and
585 * try again.
586 */
587 error = ENOENT;
588 VOP_UNLOCK(vp);
589 if (do_wakeup)
590 wakeup_one(&vp->v_lockcount);
591 }
592 }
593 } while (flags & LK_RETRY);
594 return (error);
595 }
596
597 /*
598 * File table vnode close routine.
599 */
600 int
vn_closefile(struct file * fp,struct proc * p)601 vn_closefile(struct file *fp, struct proc *p)
602 {
603 struct vnode *vp = fp->f_data;
604 struct flock lf;
605 int error;
606
607 KERNEL_LOCK();
608 if ((fp->f_iflags & FIF_HASLOCK)) {
609 lf.l_whence = SEEK_SET;
610 lf.l_start = 0;
611 lf.l_len = 0;
612 lf.l_type = F_UNLCK;
613 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
614 }
615 error = vn_close(vp, fp->f_flag, fp->f_cred, p);
616 KERNEL_UNLOCK();
617 return (error);
618 }
619
620 int
vn_kqfilter(struct file * fp,struct knote * kn)621 vn_kqfilter(struct file *fp, struct knote *kn)
622 {
623 int error;
624
625 KERNEL_LOCK();
626 error = VOP_KQFILTER(fp->f_data, fp->f_flag, kn);
627 KERNEL_UNLOCK();
628 return (error);
629 }
630
631 int
vn_seek(struct file * fp,off_t * offset,int whence,struct proc * p)632 vn_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
633 {
634 struct ucred *cred = p->p_ucred;
635 struct vnode *vp = fp->f_data;
636 struct vattr vattr;
637 off_t newoff;
638 int error = 0;
639 int special;
640
641 if (vp->v_type == VFIFO)
642 return (ESPIPE);
643
644 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
645
646 if (vp->v_type == VCHR)
647 special = 1;
648 else
649 special = 0;
650
651 switch (whence) {
652 case SEEK_CUR:
653 newoff = fp->f_offset + *offset;
654 break;
655 case SEEK_END:
656 KERNEL_LOCK();
657 error = VOP_GETATTR(vp, &vattr, cred, p);
658 KERNEL_UNLOCK();
659 if (error)
660 goto out;
661 newoff = *offset + (off_t)vattr.va_size;
662 break;
663 case SEEK_SET:
664 newoff = *offset;
665 break;
666 default:
667 error = EINVAL;
668 goto out;
669 }
670 if (!special && newoff < 0) {
671 error = EINVAL;
672 goto out;
673 }
674 mtx_enter(&fp->f_mtx);
675 fp->f_offset = newoff;
676 mtx_leave(&fp->f_mtx);
677 *offset = newoff;
678
679 out:
680 VOP_UNLOCK(vp);
681 return (error);
682 }
683
684 /*
685 * Common code for vnode access operations.
686 */
687
688 /* Check if a directory can be found inside another in the hierarchy */
689 int
vn_isunder(struct vnode * lvp,struct vnode * rvp,struct proc * p)690 vn_isunder(struct vnode *lvp, struct vnode *rvp, struct proc *p)
691 {
692 int error;
693
694 error = vfs_getcwd_common(lvp, rvp, NULL, NULL, MAXPATHLEN/2, 0, p);
695
696 if (!error)
697 return (1);
698
699 return (0);
700 }
701