xref: /dragonfly/sys/kern/vfs_syscalls.c (revision 9bb2a92d)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_syscalls.c	8.13 (Berkeley) 4/15/94
39  * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40  * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.29 2004/03/01 06:33:17 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/buf.h>
46 #include <sys/sysent.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/sysproto.h>
50 #include <sys/filedesc.h>
51 #include <sys/kernel.h>
52 #include <sys/fcntl.h>
53 #include <sys/file.h>
54 #include <sys/linker.h>
55 #include <sys/stat.h>
56 #include <sys/unistd.h>
57 #include <sys/vnode.h>
58 #include <sys/proc.h>
59 #include <sys/namei.h>
60 #include <sys/dirent.h>
61 #include <sys/extattr.h>
62 #include <sys/kern_syscall.h>
63 
64 #include <machine/limits.h>
65 #include <vfs/union/union.h>
66 #include <sys/sysctl.h>
67 #include <vm/vm.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_zone.h>
70 #include <vm/vm_page.h>
71 
72 #include <sys/file2.h>
73 
74 static int change_dir (struct nameidata *ndp, struct thread *td);
75 static void checkdirs (struct vnode *olddp);
76 static int chroot_refuse_vdir_fds (struct filedesc *fdp);
77 static int getutimes (const struct timeval *, struct timespec *);
78 static int setfown (struct vnode *, uid_t, gid_t);
79 static int setfmode (struct vnode *, int);
80 static int setfflags (struct vnode *, int);
81 static int setutimes (struct vnode *, const struct timespec *, int);
82 static int	usermount = 0;	/* if 1, non-root can mount fs. */
83 
84 int (*union_dircheckp) (struct thread *, struct vnode **, struct file *);
85 
86 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
87 
88 /*
89  * Virtual File System System Calls
90  */
91 
92 /*
93  * Mount a file system.
94  */
95 /*
96  * mount_args(char *type, char *path, int flags, caddr_t data)
97  */
98 /* ARGSUSED */
99 int
100 mount(struct mount_args *uap)
101 {
102 	struct thread *td = curthread;
103 	struct proc *p = td->td_proc;
104 	struct vnode *vp;
105 	struct mount *mp;
106 	struct vfsconf *vfsp;
107 	int error, flag = 0, flag2 = 0;
108 	struct vattr va;
109 	struct nameidata nd;
110 	char fstypename[MFSNAMELEN];
111 	lwkt_tokref vlock;
112 	lwkt_tokref ilock;
113 
114 	if (usermount == 0 && (error = suser(td)))
115 		return (error);
116 	/*
117 	 * Do not allow NFS export by non-root users.
118 	 */
119 	if (SCARG(uap, flags) & MNT_EXPORTED) {
120 		error = suser(td);
121 		if (error)
122 			return (error);
123 	}
124 	/*
125 	 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
126 	 */
127 	if (suser(td))
128 		SCARG(uap, flags) |= MNT_NOSUID | MNT_NODEV;
129 	/*
130 	 * Get vnode to be covered
131 	 */
132 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
133 	    SCARG(uap, path), td);
134 	if ((error = namei(&nd)) != 0)
135 		return (error);
136 	NDFREE(&nd, NDF_ONLY_PNBUF);
137 	vp = nd.ni_vp;
138 	if (SCARG(uap, flags) & MNT_UPDATE) {
139 		if ((vp->v_flag & VROOT) == 0) {
140 			vput(vp);
141 			return (EINVAL);
142 		}
143 		mp = vp->v_mount;
144 		flag = mp->mnt_flag;
145 		flag2 = mp->mnt_kern_flag;
146 		/*
147 		 * We only allow the filesystem to be reloaded if it
148 		 * is currently mounted read-only.
149 		 */
150 		if ((SCARG(uap, flags) & MNT_RELOAD) &&
151 		    ((mp->mnt_flag & MNT_RDONLY) == 0)) {
152 			vput(vp);
153 			return (EOPNOTSUPP);	/* Needs translation */
154 		}
155 		/*
156 		 * Only root, or the user that did the original mount is
157 		 * permitted to update it.
158 		 */
159 		if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
160 		    (error = suser(td))) {
161 			vput(vp);
162 			return (error);
163 		}
164 		if (vfs_busy(mp, LK_NOWAIT, NULL, td)) {
165 			vput(vp);
166 			return (EBUSY);
167 		}
168 		lwkt_gettoken(&vlock, vp->v_interlock);
169 		if ((vp->v_flag & VMOUNT) != 0 ||
170 		    vp->v_mountedhere != NULL) {
171 			lwkt_reltoken(&vlock);
172 			vfs_unbusy(mp, td);
173 			vput(vp);
174 			return (EBUSY);
175 		}
176 		vp->v_flag |= VMOUNT;
177 		lwkt_reltoken(&vlock);
178 		mp->mnt_flag |=
179 		    SCARG(uap, flags) & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
180 		VOP_UNLOCK(vp, NULL, 0, td);
181 		goto update;
182 	}
183 	/*
184 	 * If the user is not root, ensure that they own the directory
185 	 * onto which we are attempting to mount.
186 	 */
187 	if ((error = VOP_GETATTR(vp, &va, td)) ||
188 	    (va.va_uid != p->p_ucred->cr_uid &&
189 	     (error = suser(td)))) {
190 		vput(vp);
191 		return (error);
192 	}
193 	if ((error = vinvalbuf(vp, V_SAVE, td, 0, 0)) != 0) {
194 		vput(vp);
195 		return (error);
196 	}
197 	if (vp->v_type != VDIR) {
198 		vput(vp);
199 		return (ENOTDIR);
200 	}
201 	if ((error = copyinstr(SCARG(uap, type), fstypename, MFSNAMELEN, NULL)) != 0) {
202 		vput(vp);
203 		return (error);
204 	}
205 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
206 		if (!strcmp(vfsp->vfc_name, fstypename))
207 			break;
208 	if (vfsp == NULL) {
209 		linker_file_t lf;
210 
211 		/* Only load modules for root (very important!) */
212 		if ((error = suser(td)) != 0) {
213 			vput(vp);
214 			return error;
215 		}
216 		error = linker_load_file(fstypename, &lf);
217 		if (error || lf == NULL) {
218 			vput(vp);
219 			if (lf == NULL)
220 				error = ENODEV;
221 			return error;
222 		}
223 		lf->userrefs++;
224 		/* lookup again, see if the VFS was loaded */
225 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
226 			if (!strcmp(vfsp->vfc_name, fstypename))
227 				break;
228 		if (vfsp == NULL) {
229 			lf->userrefs--;
230 			linker_file_unload(lf);
231 			vput(vp);
232 			return (ENODEV);
233 		}
234 	}
235 	lwkt_gettoken(&vlock, vp->v_interlock);
236 	if ((vp->v_flag & VMOUNT) != 0 ||
237 	    vp->v_mountedhere != NULL) {
238 		lwkt_reltoken(&vlock);
239 		vput(vp);
240 		return (EBUSY);
241 	}
242 	vp->v_flag |= VMOUNT;
243 	lwkt_reltoken(&vlock);
244 
245 	/*
246 	 * Allocate and initialize the filesystem.
247 	 */
248 	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
249 	bzero((char *)mp, (u_long)sizeof(struct mount));
250 	TAILQ_INIT(&mp->mnt_nvnodelist);
251 	TAILQ_INIT(&mp->mnt_reservedvnlist);
252 	mp->mnt_nvnodelistsize = 0;
253 	lockinit(&mp->mnt_lock, 0, "vfslock", 0, LK_NOPAUSE);
254 	vfs_busy(mp, LK_NOWAIT, NULL, td);
255 	mp->mnt_op = vfsp->vfc_vfsops;
256 	mp->mnt_vfc = vfsp;
257 	vfsp->vfc_refcount++;
258 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
259 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
260 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
261 	mp->mnt_vnodecovered = vp;
262 	mp->mnt_stat.f_owner = p->p_ucred->cr_uid;
263 	mp->mnt_iosize_max = DFLTPHYS;
264 	VOP_UNLOCK(vp, NULL, 0, td);
265 update:
266 	/*
267 	 * Set the mount level flags.
268 	 */
269 	if (SCARG(uap, flags) & MNT_RDONLY)
270 		mp->mnt_flag |= MNT_RDONLY;
271 	else if (mp->mnt_flag & MNT_RDONLY)
272 		mp->mnt_kern_flag |= MNTK_WANTRDWR;
273 	mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
274 	    MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
275 	    MNT_NOSYMFOLLOW | MNT_IGNORE |
276 	    MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
277 	mp->mnt_flag |= SCARG(uap, flags) & (MNT_NOSUID | MNT_NOEXEC |
278 	    MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
279 	    MNT_NOSYMFOLLOW | MNT_IGNORE |
280 	    MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
281 	/*
282 	 * Mount the filesystem.
283 	 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
284 	 * get.  No freeing of cn_pnbuf.
285 	 */
286 	error = VFS_MOUNT(mp, SCARG(uap, path), SCARG(uap, data), &nd, td);
287 	if (mp->mnt_flag & MNT_UPDATE) {
288 		if (mp->mnt_kern_flag & MNTK_WANTRDWR)
289 			mp->mnt_flag &= ~MNT_RDONLY;
290 		mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
291 		mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
292 		if (error) {
293 			mp->mnt_flag = flag;
294 			mp->mnt_kern_flag = flag2;
295 		}
296 		vfs_unbusy(mp, td);
297 		lwkt_gettoken(&vlock, vp->v_interlock);
298 		vp->v_flag &= ~VMOUNT;
299 		lwkt_reltoken(&vlock);
300 		vrele(vp);
301 		return (error);
302 	}
303 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
304 	/*
305 	 * Put the new filesystem on the mount list after root.
306 	 */
307 	cache_purge(vp);
308 	if (!error) {
309 		lwkt_gettoken(&vlock, vp->v_interlock);
310 		vp->v_flag &= ~VMOUNT;
311 		vp->v_mountedhere = mp;
312 		lwkt_reltoken(&vlock);
313 		lwkt_gettoken(&ilock, &mountlist_token);
314 		TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
315 		lwkt_reltoken(&ilock);
316 		checkdirs(vp);
317 		VOP_UNLOCK(vp, NULL, 0, td);
318 		error = vfs_allocate_syncvnode(mp);
319 		vfs_unbusy(mp, td);
320 		if ((error = VFS_START(mp, 0, td)) != 0)
321 			vrele(vp);
322 	} else {
323 		lwkt_gettoken(&vlock, vp->v_interlock);
324 		vp->v_flag &= ~VMOUNT;
325 		lwkt_reltoken(&vlock);
326 		mp->mnt_vfc->vfc_refcount--;
327 		vfs_unbusy(mp, td);
328 		free((caddr_t)mp, M_MOUNT);
329 		vput(vp);
330 	}
331 	return (error);
332 }
333 
334 /*
335  * Scan all active processes to see if any of them have a current
336  * or root directory onto which the new filesystem has just been
337  * mounted. If so, replace them with the new mount point.
338  */
339 static void
340 checkdirs(struct vnode *olddp)
341 {
342 	struct filedesc *fdp;
343 	struct vnode *newdp;
344 	struct proc *p;
345 
346 	if (olddp->v_usecount == 1)
347 		return;
348 	if (VFS_ROOT(olddp->v_mountedhere, &newdp))
349 		panic("mount: lost mount");
350 	FOREACH_PROC_IN_SYSTEM(p) {
351 		fdp = p->p_fd;
352 		if (fdp->fd_cdir == olddp) {
353 			vrele(fdp->fd_cdir);
354 			VREF(newdp);
355 			fdp->fd_cdir = newdp;
356 		}
357 		if (fdp->fd_rdir == olddp) {
358 			vrele(fdp->fd_rdir);
359 			VREF(newdp);
360 			fdp->fd_rdir = newdp;
361 		}
362 	}
363 	if (rootvnode == olddp) {
364 		vrele(rootvnode);
365 		VREF(newdp);
366 		rootvnode = newdp;
367 		vfs_cache_setroot(rootvnode);
368 	}
369 	vput(newdp);
370 }
371 
372 /*
373  * Unmount a file system.
374  *
375  * Note: unmount takes a path to the vnode mounted on as argument,
376  * not special file (as before).
377  */
378 /*
379  * umount_args(char *path, int flags)
380  */
381 /* ARGSUSED */
382 int
383 unmount(struct unmount_args *uap)
384 {
385 	struct thread *td = curthread;
386 	struct proc *p = td->td_proc;
387 	struct vnode *vp;
388 	struct mount *mp;
389 	int error;
390 	struct nameidata nd;
391 
392 	KKASSERT(p);
393 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
394 	    SCARG(uap, path), td);
395 	if ((error = namei(&nd)) != 0)
396 		return (error);
397 	vp = nd.ni_vp;
398 	NDFREE(&nd, NDF_ONLY_PNBUF);
399 	mp = vp->v_mount;
400 
401 	/*
402 	 * Only root, or the user that did the original mount is
403 	 * permitted to unmount this filesystem.
404 	 */
405 	if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
406 	    (error = suser(td))) {
407 		vput(vp);
408 		return (error);
409 	}
410 
411 	/*
412 	 * Don't allow unmounting the root file system.
413 	 */
414 	if (mp->mnt_flag & MNT_ROOTFS) {
415 		vput(vp);
416 		return (EINVAL);
417 	}
418 
419 	/*
420 	 * Must be the root of the filesystem
421 	 */
422 	if ((vp->v_flag & VROOT) == 0) {
423 		vput(vp);
424 		return (EINVAL);
425 	}
426 	vput(vp);
427 	return (dounmount(mp, SCARG(uap, flags), td));
428 }
429 
430 /*
431  * Do the actual file system unmount.
432  */
433 int
434 dounmount(struct mount *mp, int flags, struct thread *td)
435 {
436 	struct vnode *coveredvp;
437 	int error;
438 	int async_flag;
439 	lwkt_tokref ilock;
440 
441 	lwkt_gettoken(&ilock, &mountlist_token);
442 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
443 		lwkt_reltoken(&ilock);
444 		return (EBUSY);
445 	}
446 	mp->mnt_kern_flag |= MNTK_UNMOUNT;
447 	/* Allow filesystems to detect that a forced unmount is in progress. */
448 	if (flags & MNT_FORCE)
449 		mp->mnt_kern_flag |= MNTK_UNMOUNTF;
450 	error = lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK |
451 	    ((flags & MNT_FORCE) ? 0 : LK_NOWAIT), &ilock, td);
452 	if (error) {
453 		mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
454 		if (mp->mnt_kern_flag & MNTK_MWAIT)
455 			wakeup((caddr_t)mp);
456 		return (error);
457 	}
458 
459 	if (mp->mnt_flag & MNT_EXPUBLIC)
460 		vfs_setpublicfs(NULL, NULL, NULL);
461 
462 	vfs_msync(mp, MNT_WAIT);
463 	async_flag = mp->mnt_flag & MNT_ASYNC;
464 	mp->mnt_flag &=~ MNT_ASYNC;
465 	cache_purgevfs(mp);	/* remove cache entries for this file sys */
466 	if (mp->mnt_syncer != NULL)
467 		vrele(mp->mnt_syncer);
468 	if (((mp->mnt_flag & MNT_RDONLY) ||
469 	     (error = VFS_SYNC(mp, MNT_WAIT, td)) == 0) ||
470 	    (flags & MNT_FORCE))
471 		error = VFS_UNMOUNT(mp, flags, td);
472 	lwkt_gettokref(&ilock);
473 	if (error) {
474 		if (mp->mnt_syncer == NULL)
475 			vfs_allocate_syncvnode(mp);
476 		mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
477 		mp->mnt_flag |= async_flag;
478 		lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
479 		    &ilock, td);
480 		if (mp->mnt_kern_flag & MNTK_MWAIT)
481 			wakeup((caddr_t)mp);
482 		return (error);
483 	}
484 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
485 	if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
486 		coveredvp->v_mountedhere = NULL;
487 		vrele(coveredvp);
488 	}
489 	mp->mnt_vfc->vfc_refcount--;
490 	if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
491 		panic("unmount: dangling vnode");
492 	lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &ilock, td);
493 	if (mp->mnt_kern_flag & MNTK_MWAIT)
494 		wakeup((caddr_t)mp);
495 	free((caddr_t)mp, M_MOUNT);
496 	return (0);
497 }
498 
499 /*
500  * Sync each mounted filesystem.
501  */
502 
503 #ifdef DEBUG
504 static int syncprt = 0;
505 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
506 #endif
507 
508 /* ARGSUSED */
509 int
510 sync(struct sync_args *uap)
511 {
512 	struct thread *td = curthread;
513 	struct mount *mp, *nmp;
514 	lwkt_tokref ilock;
515 	int asyncflag;
516 
517 	lwkt_gettoken(&ilock, &mountlist_token);
518 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
519 		if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
520 			nmp = TAILQ_NEXT(mp, mnt_list);
521 			continue;
522 		}
523 		if ((mp->mnt_flag & MNT_RDONLY) == 0) {
524 			asyncflag = mp->mnt_flag & MNT_ASYNC;
525 			mp->mnt_flag &= ~MNT_ASYNC;
526 			vfs_msync(mp, MNT_NOWAIT);
527 			VFS_SYNC(mp, MNT_NOWAIT, td);
528 			mp->mnt_flag |= asyncflag;
529 		}
530 		lwkt_gettokref(&ilock);
531 		nmp = TAILQ_NEXT(mp, mnt_list);
532 		vfs_unbusy(mp, td);
533 	}
534 	lwkt_reltoken(&ilock);
535 #if 0
536 /*
537  * XXX don't call vfs_bufstats() yet because that routine
538  * was not imported in the Lite2 merge.
539  */
540 #ifdef DIAGNOSTIC
541 	if (syncprt)
542 		vfs_bufstats();
543 #endif /* DIAGNOSTIC */
544 #endif
545 	return (0);
546 }
547 
548 /* XXX PRISON: could be per prison flag */
549 static int prison_quotas;
550 #if 0
551 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
552 #endif
553 
554 /*
555  *  quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
556  *
557  * Change filesystem quotas.
558  */
559 /* ARGSUSED */
560 int
561 quotactl(struct quotactl_args *uap)
562 {
563 	struct thread *td = curthread;
564 	struct proc *p = td->td_proc;
565 	struct mount *mp;
566 	int error;
567 	struct nameidata nd;
568 
569 	KKASSERT(p);
570 	if (p->p_ucred->cr_prison && !prison_quotas)
571 		return (EPERM);
572 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE,
573 	    SCARG(uap, path), td);
574 	if ((error = namei(&nd)) != 0)
575 		return (error);
576 	mp = nd.ni_vp->v_mount;
577 	NDFREE(&nd, NDF_ONLY_PNBUF);
578 	vrele(nd.ni_vp);
579 	return (VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
580 	    SCARG(uap, arg), td));
581 }
582 
583 int
584 kern_statfs(struct nameidata *nd, struct statfs *buf)
585 {
586 	struct thread *td = curthread;
587 	struct mount *mp;
588 	struct statfs *sp;
589 	int error;
590 
591 	error = namei(nd);
592 	if (error)
593 		return (error);
594 	mp = nd->ni_vp->v_mount;
595 	sp = &mp->mnt_stat;
596 	NDFREE(nd, NDF_ONLY_PNBUF);
597 	vrele(nd->ni_vp);
598 	error = VFS_STATFS(mp, sp, td);
599 	if (error)
600 		return (error);
601 	sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
602 	bcopy(sp, buf, sizeof(*buf));
603 	/* Only root should have access to the fsid's. */
604 	if (suser(td))
605 		buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
606 	return (0);
607 }
608 
609 /*
610  * statfs_args(char *path, struct statfs *buf)
611  *
612  * Get filesystem statistics.
613  */
614 int
615 statfs(struct statfs_args *uap)
616 {
617 	struct thread *td = curthread;
618 	struct nameidata nd;
619 	struct statfs buf;
620 	int error;
621 
622 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
623 
624 	error = kern_statfs(&nd, &buf);
625 
626 	if (error == 0)
627 		error = copyout(&buf, uap->buf, sizeof(*uap->buf));
628 	return (error);
629 }
630 
631 int
632 kern_fstatfs(int fd, struct statfs *buf)
633 {
634 	struct thread *td = curthread;
635 	struct proc *p = td->td_proc;
636 	struct file *fp;
637 	struct mount *mp;
638 	struct statfs *sp;
639 	int error;
640 
641 	KKASSERT(p);
642 	error = getvnode(p->p_fd, fd, &fp);
643 	if (error)
644 		return (error);
645 	mp = ((struct vnode *)fp->f_data)->v_mount;
646 	if (mp == NULL)
647 		return (EBADF);
648 	sp = &mp->mnt_stat;
649 	error = VFS_STATFS(mp, sp, td);
650 	if (error)
651 		return (error);
652 	sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
653 	bcopy(sp, buf, sizeof(*buf));
654 	/* Only root should have access to the fsid's. */
655 	if (suser(td))
656 		buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
657 	return (0);
658 }
659 
660 /*
661  * fstatfs_args(int fd, struct statfs *buf)
662  *
663  * Get filesystem statistics.
664  */
665 int
666 fstatfs(struct fstatfs_args *uap)
667 {
668 	struct statfs buf;
669 	int error;
670 
671 	error = kern_fstatfs(uap->fd, &buf);
672 
673 	if (error == 0)
674 		error = copyout(&buf, uap->buf, sizeof(*uap->buf));
675 	return (error);
676 }
677 
678 /*
679  * getfsstat_args(struct statfs *buf, long bufsize, int flags)
680  *
681  * Get statistics on all filesystems.
682  */
683 /* ARGSUSED */
684 int
685 getfsstat(struct getfsstat_args *uap)
686 {
687 	struct thread *td = curthread;
688 	struct mount *mp, *nmp;
689 	struct statfs *sp;
690 	caddr_t sfsp;
691 	lwkt_tokref ilock;
692 	long count, maxcount, error;
693 
694 	maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
695 	sfsp = (caddr_t)SCARG(uap, buf);
696 	count = 0;
697 	lwkt_gettoken(&ilock, &mountlist_token);
698 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
699 		if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
700 			nmp = TAILQ_NEXT(mp, mnt_list);
701 			continue;
702 		}
703 		if (sfsp && count < maxcount) {
704 			sp = &mp->mnt_stat;
705 			/*
706 			 * If MNT_NOWAIT or MNT_LAZY is specified, do not
707 			 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
708 			 * overrides MNT_WAIT.
709 			 */
710 			if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
711 			    (SCARG(uap, flags) & MNT_WAIT)) &&
712 			    (error = VFS_STATFS(mp, sp, td))) {
713 				lwkt_gettokref(&ilock);
714 				nmp = TAILQ_NEXT(mp, mnt_list);
715 				vfs_unbusy(mp, td);
716 				continue;
717 			}
718 			sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
719 			error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
720 			if (error) {
721 				vfs_unbusy(mp, td);
722 				return (error);
723 			}
724 			sfsp += sizeof(*sp);
725 		}
726 		count++;
727 		lwkt_gettokref(&ilock);
728 		nmp = TAILQ_NEXT(mp, mnt_list);
729 		vfs_unbusy(mp, td);
730 	}
731 	lwkt_reltoken(&ilock);
732 	if (sfsp && count > maxcount)
733 		uap->sysmsg_result = maxcount;
734 	else
735 		uap->sysmsg_result = count;
736 	return (0);
737 }
738 
739 /*
740  * fchdir_args(int fd)
741  *
742  * Change current working directory to a given file descriptor.
743  */
744 /* ARGSUSED */
745 int
746 fchdir(struct fchdir_args *uap)
747 {
748 	struct thread *td = curthread;
749 	struct proc *p = td->td_proc;
750 	struct filedesc *fdp = p->p_fd;
751 	struct vnode *vp, *tdp;
752 	struct mount *mp;
753 	struct file *fp;
754 	int error;
755 
756 	if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
757 		return (error);
758 	vp = (struct vnode *)fp->f_data;
759 	VREF(vp);
760 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
761 	if (vp->v_type != VDIR)
762 		error = ENOTDIR;
763 	else
764 		error = VOP_ACCESS(vp, VEXEC, p->p_ucred, td);
765 	while (!error && (mp = vp->v_mountedhere) != NULL) {
766 		if (vfs_busy(mp, 0, NULL, td))
767 			continue;
768 		error = VFS_ROOT(mp, &tdp);
769 		vfs_unbusy(mp, td);
770 		if (error)
771 			break;
772 		vput(vp);
773 		vp = tdp;
774 	}
775 	if (error) {
776 		vput(vp);
777 		return (error);
778 	}
779 	VOP_UNLOCK(vp, NULL, 0, td);
780 	vrele(fdp->fd_cdir);
781 	fdp->fd_cdir = vp;
782 	return (0);
783 }
784 
785 int
786 kern_chdir(struct nameidata *nd)
787 {
788 	struct thread *td = curthread;
789 	struct proc *p = td->td_proc;
790 	struct filedesc *fdp = p->p_fd;
791 	int error;
792 
793 	error = change_dir(nd, td);
794 	if (error)
795 		return (error);
796 	NDFREE(nd, NDF_ONLY_PNBUF);
797 	vrele(fdp->fd_cdir);
798 	fdp->fd_cdir = nd->ni_vp;
799 	return (0);
800 }
801 
802 /*
803  * chdir_args(char *path)
804  *
805  * Change current working directory (``.'').
806  */
807 int
808 chdir(struct chdir_args *uap)
809 {
810 	struct thread *td = curthread;
811 	struct nameidata nd;
812 	int error;
813 
814 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
815 	    uap->path, td);
816 
817 	error = kern_chdir(&nd);
818 
819 	return (error);
820 }
821 
822 /*
823  * Helper function for raised chroot(2) security function:  Refuse if
824  * any filedescriptors are open directories.
825  */
826 static int
827 chroot_refuse_vdir_fds(fdp)
828 	struct filedesc *fdp;
829 {
830 	struct vnode *vp;
831 	struct file *fp;
832 	int error;
833 	int fd;
834 
835 	for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
836 		error = getvnode(fdp, fd, &fp);
837 		if (error)
838 			continue;
839 		vp = (struct vnode *)fp->f_data;
840 		if (vp->v_type != VDIR)
841 			continue;
842 		return(EPERM);
843 	}
844 	return (0);
845 }
846 
847 /*
848  * This sysctl determines if we will allow a process to chroot(2) if it
849  * has a directory open:
850  *	0: disallowed for all processes.
851  *	1: allowed for processes that were not already chroot(2)'ed.
852  *	2: allowed for all processes.
853  */
854 
855 static int chroot_allow_open_directories = 1;
856 
857 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
858      &chroot_allow_open_directories, 0, "");
859 
860 /*
861  * chroot_args(char *path)
862  *
863  * Change notion of root (``/'') directory.
864  */
865 /* ARGSUSED */
866 int
867 chroot(struct chroot_args *uap)
868 {
869 	struct thread *td = curthread;
870 	struct proc *p = td->td_proc;
871 	struct filedesc *fdp = p->p_fd;
872 	int error;
873 	struct nameidata nd;
874 
875 	KKASSERT(p);
876 	error = suser_cred(p->p_ucred, PRISON_ROOT);
877 	if (error)
878 		return (error);
879 	if (chroot_allow_open_directories == 0 ||
880 	    (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode))
881 		error = chroot_refuse_vdir_fds(fdp);
882 	if (error)
883 		return (error);
884 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
885 	    SCARG(uap, path), td);
886 	if ((error = change_dir(&nd, td)) != 0)
887 		return (error);
888 	NDFREE(&nd, NDF_ONLY_PNBUF);
889 	vrele(fdp->fd_rdir);
890 	fdp->fd_rdir = nd.ni_vp;
891 	if (!fdp->fd_jdir) {
892 		fdp->fd_jdir = nd.ni_vp;
893                 VREF(fdp->fd_jdir);
894 	}
895 	return (0);
896 }
897 
898 /*
899  * Common routine for chroot and chdir.
900  */
901 static int
902 change_dir(struct nameidata *ndp, struct thread *td)
903 {
904 	struct vnode *vp;
905 	int error;
906 
907 	error = namei(ndp);
908 	if (error)
909 		return (error);
910 	vp = ndp->ni_vp;
911 	if (vp->v_type != VDIR)
912 		error = ENOTDIR;
913 	else
914 		error = VOP_ACCESS(vp, VEXEC, ndp->ni_cnd.cn_cred, td);
915 	if (error)
916 		vput(vp);
917 	else
918 		VOP_UNLOCK(vp, NULL, 0, td);
919 	return (error);
920 }
921 
922 int
923 kern_open(struct nameidata *nd, int oflags, int mode, int *res)
924 {
925 	struct thread *td = curthread;
926 	struct proc *p = td->td_proc;
927 	struct filedesc *fdp = p->p_fd;
928 	struct file *fp;
929 	struct vnode *vp;
930 	int cmode, flags;
931 	struct file *nfp;
932 	int type, indx, error;
933 	struct flock lf;
934 
935 	if ((oflags & O_ACCMODE) == O_ACCMODE)
936 		return (EINVAL);
937 	flags = FFLAGS(oflags);
938 	error = falloc(p, &nfp, &indx);
939 	if (error)
940 		return (error);
941 	fp = nfp;
942 	cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
943 	p->p_dupfd = -indx - 1;			/* XXX check for fdopen */
944 	/*
945 	 * Bump the ref count to prevent another process from closing
946 	 * the descriptor while we are blocked in vn_open()
947 	 */
948 	fhold(fp);
949 	error = vn_open(nd, flags, cmode);
950 	if (error) {
951 		/*
952 		 * release our own reference
953 		 */
954 		fdrop(fp, td);
955 
956 		/*
957 		 * handle special fdopen() case.  bleh.  dupfdopen() is
958 		 * responsible for dropping the old contents of ofiles[indx]
959 		 * if it succeeds.
960 		 */
961 		if ((error == ENODEV || error == ENXIO) &&
962 		    p->p_dupfd >= 0 &&			/* XXX from fdopen */
963 		    (error =
964 			dupfdopen(fdp, indx, p->p_dupfd, flags, error)) == 0) {
965 			*res = indx;
966 			return (0);
967 		}
968 		/*
969 		 * Clean up the descriptor, but only if another thread hadn't
970 		 * replaced or closed it.
971 		 */
972 		if (fdp->fd_ofiles[indx] == fp) {
973 			fdp->fd_ofiles[indx] = NULL;
974 			fdrop(fp, td);
975 		}
976 
977 		if (error == ERESTART)
978 			error = EINTR;
979 		return (error);
980 	}
981 	p->p_dupfd = 0;
982 	NDFREE(nd, NDF_ONLY_PNBUF);
983 	vp = nd->ni_vp;
984 
985 	/*
986 	 * There should be 2 references on the file, one from the descriptor
987 	 * table, and one for us.
988 	 *
989 	 * Handle the case where someone closed the file (via its file
990 	 * descriptor) while we were blocked.  The end result should look
991 	 * like opening the file succeeded but it was immediately closed.
992 	 */
993 	if (fp->f_count == 1) {
994 		KASSERT(fdp->fd_ofiles[indx] != fp,
995 		    ("Open file descriptor lost all refs"));
996 		VOP_UNLOCK(vp, NULL, 0, td);
997 		vn_close(vp, flags & FMASK, td);
998 		fdrop(fp, td);
999 		*res = indx;
1000 		return 0;
1001 	}
1002 
1003 	fp->f_data = (caddr_t)vp;
1004 	fp->f_flag = flags & FMASK;
1005 	fp->f_ops = &vnops;
1006 	fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
1007 	if (flags & (O_EXLOCK | O_SHLOCK)) {
1008 		lf.l_whence = SEEK_SET;
1009 		lf.l_start = 0;
1010 		lf.l_len = 0;
1011 		if (flags & O_EXLOCK)
1012 			lf.l_type = F_WRLCK;
1013 		else
1014 			lf.l_type = F_RDLCK;
1015 		type = F_FLOCK;
1016 		if ((flags & FNONBLOCK) == 0)
1017 			type |= F_WAIT;
1018 		VOP_UNLOCK(vp, NULL, 0, td);
1019 		if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1020 			/*
1021 			 * lock request failed.  Normally close the descriptor
1022 			 * but handle the case where someone might have dup()d
1023 			 * it when we weren't looking.  One reference is
1024 			 * owned by the descriptor array, the other by us.
1025 			 */
1026 			if (fdp->fd_ofiles[indx] == fp) {
1027 				fdp->fd_ofiles[indx] = NULL;
1028 				fdrop(fp, td);
1029 			}
1030 			fdrop(fp, td);
1031 			return (error);
1032 		}
1033 		vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1034 		fp->f_flag |= FHASLOCK;
1035 	}
1036 	/* assert that vn_open created a backing object if one is needed */
1037 	KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
1038 		("open: vmio vnode has no backing object after vn_open"));
1039 	VOP_UNLOCK(vp, NULL, 0, td);
1040 
1041 	/*
1042 	 * release our private reference, leaving the one associated with the
1043 	 * descriptor table intact.
1044 	 */
1045 	fdrop(fp, td);
1046 	*res = indx;
1047 	return (0);
1048 }
1049 
1050 /*
1051  * open_args(char *path, int flags, int mode)
1052  *
1053  * Check permissions, allocate an open file structure,
1054  * and call the device open routine if any.
1055  */
1056 int
1057 open(struct open_args *uap)
1058 {
1059 	struct thread *td = curthread;
1060 	struct nameidata nd;
1061 	int error;
1062 
1063 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
1064 
1065 	error = kern_open(&nd, uap->flags, uap->mode, &uap->sysmsg_result);
1066 
1067 	return (error);
1068 }
1069 
1070 int
1071 kern_mknod(struct nameidata *nd, int mode, int dev)
1072 {
1073 	struct thread *td = curthread;
1074 	struct proc *p = td->td_proc;
1075 	struct vnode *vp;
1076 	struct vattr vattr;
1077 	int error;
1078 	int whiteout = 0;
1079 
1080 	KKASSERT(p);
1081 
1082 	switch (mode & S_IFMT) {
1083 	case S_IFCHR:
1084 	case S_IFBLK:
1085 		error = suser(td);
1086 		break;
1087 	default:
1088 		error = suser_cred(p->p_ucred, PRISON_ROOT);
1089 		break;
1090 	}
1091 	if (error)
1092 		return (error);
1093 	bwillwrite();
1094 	error = namei(nd);
1095 	if (error)
1096 		return (error);
1097 	vp = nd->ni_vp;
1098 	if (vp != NULL)
1099 		error = EEXIST;
1100 	else {
1101 		VATTR_NULL(&vattr);
1102 		vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1103 		vattr.va_rdev = dev;
1104 		whiteout = 0;
1105 
1106 		switch (mode & S_IFMT) {
1107 		case S_IFMT:	/* used by badsect to flag bad sectors */
1108 			vattr.va_type = VBAD;
1109 			break;
1110 		case S_IFCHR:
1111 			vattr.va_type = VCHR;
1112 			break;
1113 		case S_IFBLK:
1114 			vattr.va_type = VBLK;
1115 			break;
1116 		case S_IFWHT:
1117 			whiteout = 1;
1118 			break;
1119 		default:
1120 			error = EINVAL;
1121 			break;
1122 		}
1123 	}
1124 	if (error == 0) {
1125 		VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1126 		if (whiteout)
1127 			error = VOP_WHITEOUT(nd->ni_dvp, NCPNULL,
1128 			    &nd->ni_cnd, NAMEI_CREATE);
1129 		else {
1130 			error = VOP_MKNOD(nd->ni_dvp, NCPNULL, &nd->ni_vp,
1131 			    &nd->ni_cnd, &vattr);
1132 			if (error == 0)
1133 				vput(nd->ni_vp);
1134 		}
1135 		NDFREE(nd, NDF_ONLY_PNBUF);
1136 		vput(nd->ni_dvp);
1137 	} else {
1138 		NDFREE(nd, NDF_ONLY_PNBUF);
1139 		if (nd->ni_dvp == vp)
1140 			vrele(nd->ni_dvp);
1141 		else
1142 			vput(nd->ni_dvp);
1143 		if (vp)
1144 			vrele(vp);
1145 	}
1146 	ASSERT_VOP_UNLOCKED(nd->ni_dvp, "mknod");
1147 	ASSERT_VOP_UNLOCKED(nd->ni_vp, "mknod");
1148 	return (error);
1149 }
1150 
1151 /*
1152  * mknod_args(char *path, int mode, int dev)
1153  *
1154  * Create a special file.
1155  */
1156 int
1157 mknod(struct mknod_args *uap)
1158 {
1159 	struct thread *td = curthread;
1160 	struct nameidata nd;
1161 	int error;
1162 
1163 	NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
1164 	    td);
1165 
1166 	error = kern_mknod(&nd, uap->mode, uap->dev);
1167 
1168 	return (error);
1169 }
1170 
1171 int
1172 kern_mkfifo(struct nameidata *nd, int mode)
1173 {
1174 	struct thread *td = curthread;
1175 	struct proc *p = td->td_proc;
1176 	struct vattr vattr;
1177 	int error;
1178 
1179 	bwillwrite();
1180 	error = namei(nd);
1181 	if (error)
1182 		return (error);
1183 	if (nd->ni_vp != NULL) {
1184 		NDFREE(nd, NDF_ONLY_PNBUF);
1185 		if (nd->ni_dvp == nd->ni_vp)
1186 			vrele(nd->ni_dvp);
1187 		else
1188 			vput(nd->ni_dvp);
1189 		vrele(nd->ni_vp);
1190 		return (EEXIST);
1191 	}
1192 	VATTR_NULL(&vattr);
1193 	vattr.va_type = VFIFO;
1194 	vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1195 	VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1196 	error = VOP_MKNOD(nd->ni_dvp, NCPNULL, &nd->ni_vp, &nd->ni_cnd, &vattr);
1197 	if (error == 0)
1198 		vput(nd->ni_vp);
1199 	NDFREE(nd, NDF_ONLY_PNBUF);
1200 	vput(nd->ni_dvp);
1201 	return (error);
1202 }
1203 
1204 /*
1205  * mkfifo_args(char *path, int mode)
1206  *
1207  * Create a named pipe.
1208  */
1209 int
1210 mkfifo(struct mkfifo_args *uap)
1211 {
1212 	struct thread *td = curthread;
1213 	struct nameidata nd;
1214 	int error;
1215 
1216 	NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
1217 	    td);
1218 
1219 	error = kern_mkfifo(&nd, uap->mode);
1220 
1221 	return (error);
1222 }
1223 
1224 int
1225 kern_link(struct nameidata *nd, struct nameidata *linknd)
1226 {
1227 	struct thread *td = curthread;
1228 	struct proc *p = td->td_proc;
1229 	struct vnode *vp;
1230 	int error;
1231 
1232 	bwillwrite();
1233 	error = namei(nd);
1234 	if (error)
1235 		return (error);
1236 	NDFREE(nd, NDF_ONLY_PNBUF);
1237 	vp = nd->ni_vp;
1238 	if (vp->v_type == VDIR)
1239 		error = EPERM;		/* POSIX */
1240 	else {
1241 		error = namei(linknd);
1242 		if (error == 0) {
1243 			if (linknd->ni_vp != NULL) {
1244 				if (linknd->ni_vp)
1245 					vrele(linknd->ni_vp);
1246 				error = EEXIST;
1247 			} else {
1248 				VOP_LEASE(linknd->ni_dvp, td, p->p_ucred,
1249 				    LEASE_WRITE);
1250 				VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1251 				error = VOP_LINK(linknd->ni_dvp, NCPNULL, vp,
1252 				    &linknd->ni_cnd);
1253 			}
1254 			NDFREE(linknd, NDF_ONLY_PNBUF);
1255 			if (linknd->ni_dvp == linknd->ni_vp)
1256 				vrele(linknd->ni_dvp);
1257 			else
1258 				vput(linknd->ni_dvp);
1259 			ASSERT_VOP_UNLOCKED(linknd->ni_dvp, "link");
1260 			ASSERT_VOP_UNLOCKED(linknd->ni_vp, "link");
1261 		}
1262 	}
1263 	vrele(vp);
1264 	return (error);
1265 }
1266 
1267 /*
1268  * link_args(char *path, char *link)
1269  *
1270  * Make a hard file link.
1271  */
1272 int
1273 link(struct link_args *uap)
1274 {
1275 	struct thread *td = curthread;
1276 	struct nameidata nd, linknd;
1277 	int error;
1278 
1279 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_NOOBJ, UIO_USERSPACE,
1280 	    uap->path, td);
1281 	NDINIT(&linknd, NAMEI_CREATE, CNP_LOCKPARENT | CNP_NOOBJ,
1282 	    UIO_USERSPACE, uap->link, td);
1283 
1284 	error = kern_link(&nd, &linknd);
1285 
1286 	return (error);
1287 }
1288 
1289 int
1290 kern_symlink(char *path, struct nameidata *nd)
1291 {
1292 	struct thread *td = curthread;
1293 	struct proc *p = td->td_proc;
1294 	struct vattr vattr;
1295 	int error;
1296 
1297 	bwillwrite();
1298 	error = namei(nd);
1299 	if (error)
1300 		return (error);
1301 	if (nd->ni_vp) {
1302 		NDFREE(nd, NDF_ONLY_PNBUF);
1303 		if (nd->ni_dvp == nd->ni_vp)
1304 			vrele(nd->ni_dvp);
1305 		else
1306 			vput(nd->ni_dvp);
1307 		vrele(nd->ni_vp);
1308 		return (EEXIST);
1309 	}
1310 	VATTR_NULL(&vattr);
1311 	vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
1312 	VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1313 	error = VOP_SYMLINK(nd->ni_dvp, NCPNULL, &nd->ni_vp, &nd->ni_cnd,
1314 	    &vattr, path);
1315 	NDFREE(nd, NDF_ONLY_PNBUF);
1316 	if (error == 0)
1317 		vput(nd->ni_vp);
1318 	vput(nd->ni_dvp);
1319 	ASSERT_VOP_UNLOCKED(nd->ni_dvp, "symlink");
1320 	ASSERT_VOP_UNLOCKED(nd->ni_vp, "symlink");
1321 
1322 	return (error);
1323 }
1324 
1325 /*
1326  * symlink(char *path, char *link)
1327  *
1328  * Make a symbolic link.
1329  */
1330 int
1331 symlink(struct symlink_args *uap)
1332 {
1333 	struct thread *td = curthread;
1334 	struct nameidata nd;
1335 	char *path;
1336 	int error;
1337 
1338 	path = zalloc(namei_zone);
1339 	error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
1340 	if (error)
1341 		return (error);
1342 	NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT | CNP_NOOBJ, UIO_USERSPACE,
1343 	    uap->link, td);
1344 
1345 	error = kern_symlink(path, &nd);
1346 
1347 	zfree(namei_zone, path);
1348 	return (error);
1349 }
1350 
1351 /*
1352  * undelete_args(char *path)
1353  *
1354  * Delete a whiteout from the filesystem.
1355  */
1356 /* ARGSUSED */
1357 int
1358 undelete(struct undelete_args *uap)
1359 {
1360 	struct thread *td = curthread;
1361 	struct proc *p = td->td_proc;
1362 	int error;
1363 	struct nameidata nd;
1364 
1365 	bwillwrite();
1366 	NDINIT(&nd, NAMEI_DELETE, CNP_LOCKPARENT | CNP_DOWHITEOUT, UIO_USERSPACE,
1367 	    SCARG(uap, path), td);
1368 	error = namei(&nd);
1369 	if (error)
1370 		return (error);
1371 
1372 	if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & CNP_ISWHITEOUT)) {
1373 		NDFREE(&nd, NDF_ONLY_PNBUF);
1374 		if (nd.ni_dvp == nd.ni_vp)
1375 			vrele(nd.ni_dvp);
1376 		else
1377 			vput(nd.ni_dvp);
1378 		if (nd.ni_vp)
1379 			vrele(nd.ni_vp);
1380 		return (EEXIST);
1381 	}
1382 
1383 	VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1384 	error = VOP_WHITEOUT(nd.ni_dvp, NCPNULL, &nd.ni_cnd, NAMEI_DELETE);
1385 	NDFREE(&nd, NDF_ONLY_PNBUF);
1386 	vput(nd.ni_dvp);
1387 	ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
1388 	ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
1389 	return (error);
1390 }
1391 
1392 int
1393 kern_unlink(struct nameidata *nd)
1394 {
1395 	struct thread *td = curthread;
1396 	struct proc *p = td->td_proc;
1397 	struct vnode *vp;
1398 	int error;
1399 
1400 	bwillwrite();
1401 	error = namei(nd);
1402 	if (error)
1403 		return (error);
1404 	vp = nd->ni_vp;
1405 	VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1406 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1407 
1408 	if (vp->v_type == VDIR)
1409 		error = EPERM;		/* POSIX */
1410 	else {
1411 		/*
1412 		 * The root of a mounted filesystem cannot be deleted.
1413 		 *
1414 		 * XXX: can this only be a VDIR case?
1415 		 */
1416 		if (vp->v_flag & VROOT)
1417 			error = EBUSY;
1418 	}
1419 
1420 	if (error == 0) {
1421 		VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1422 		error = VOP_REMOVE(nd->ni_dvp, NCPNULL, vp, &nd->ni_cnd);
1423 	}
1424 	NDFREE(nd, NDF_ONLY_PNBUF);
1425 	if (nd->ni_dvp == vp)
1426 		vrele(nd->ni_dvp);
1427 	else
1428 		vput(nd->ni_dvp);
1429 	if (vp != NULLVP)
1430 		vput(vp);
1431 	ASSERT_VOP_UNLOCKED(nd->ni_dvp, "unlink");
1432 	ASSERT_VOP_UNLOCKED(nd->ni_vp, "unlink");
1433 	return (error);
1434 }
1435 
1436 /*
1437  * unlink_args(char *path)
1438  *
1439  * Delete a name from the filesystem.
1440  */
1441 int
1442 unlink(struct unlink_args *uap)
1443 {
1444 	struct thread *td = curthread;
1445 	struct nameidata nd;
1446 	int error;
1447 
1448 	NDINIT(&nd, NAMEI_DELETE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
1449 	    td);
1450 
1451 	error = kern_unlink(&nd);
1452 
1453 	return (error);
1454 }
1455 
1456 int
1457 kern_lseek(int fd, off_t offset, int whence, off_t *res)
1458 {
1459 	struct thread *td = curthread;
1460 	struct proc *p = td->td_proc;
1461 	struct filedesc *fdp = p->p_fd;
1462 	struct file *fp;
1463 	struct vattr vattr;
1464 	int error;
1465 
1466 	if (fd >= fdp->fd_nfiles ||
1467 	    (fp = fdp->fd_ofiles[fd]) == NULL)
1468 		return (EBADF);
1469 	if (fp->f_type != DTYPE_VNODE)
1470 		return (ESPIPE);
1471 	switch (whence) {
1472 	case L_INCR:
1473 		fp->f_offset += offset;
1474 		break;
1475 	case L_XTND:
1476 		error=VOP_GETATTR((struct vnode *)fp->f_data, &vattr, td);
1477 		if (error)
1478 			return (error);
1479 		fp->f_offset = offset + vattr.va_size;
1480 		break;
1481 	case L_SET:
1482 		fp->f_offset = offset;
1483 		break;
1484 	default:
1485 		return (EINVAL);
1486 	}
1487 	*res = fp->f_offset;
1488 	return (0);
1489 }
1490 
1491 /*
1492  * lseek_args(int fd, int pad, off_t offset, int whence)
1493  *
1494  * Reposition read/write file offset.
1495  */
1496 int
1497 lseek(struct lseek_args *uap)
1498 {
1499 	int error;
1500 
1501 	error = kern_lseek(uap->fd, uap->offset, uap->whence,
1502 	    &uap->sysmsg_offset);
1503 
1504 	return (error);
1505 }
1506 
1507 int
1508 kern_access(struct nameidata *nd, int aflags)
1509 {
1510 	struct thread *td = curthread;
1511 	struct proc *p = td->td_proc;
1512 	struct ucred *cred, *tmpcred;
1513 	struct vnode *vp;
1514 	int error, flags;
1515 
1516 	cred = p->p_ucred;
1517 	/*
1518 	 * Create and modify a temporary credential instead of one that
1519 	 * is potentially shared.  This could also mess up socket
1520 	 * buffer accounting which can run in an interrupt context.
1521 	 */
1522 	tmpcred = crdup(cred);
1523 	tmpcred->cr_uid = p->p_ucred->cr_ruid;
1524 	tmpcred->cr_groups[0] = p->p_ucred->cr_rgid;
1525 	p->p_ucred = tmpcred;
1526 	nd->ni_cnd.cn_cred = tmpcred;
1527 	error = namei(nd);
1528 	if (error)
1529 		goto out1;
1530 	vp = nd->ni_vp;
1531 
1532 	/* Flags == 0 means only check for existence. */
1533 	if (aflags) {
1534 		flags = 0;
1535 		if (aflags & R_OK)
1536 			flags |= VREAD;
1537 		if (aflags & W_OK)
1538 			flags |= VWRITE;
1539 		if (aflags & X_OK)
1540 			flags |= VEXEC;
1541 		if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
1542 			error = VOP_ACCESS(vp, flags, tmpcred, td);
1543 	}
1544 	NDFREE(nd, NDF_ONLY_PNBUF);
1545 	vput(vp);
1546 out1:
1547 	p->p_ucred = cred;
1548 	crfree(tmpcred);
1549 	return (error);
1550 }
1551 
1552 /*
1553  * access_args(char *path, int flags)
1554  *
1555  * Check access permissions.
1556  */
1557 int
1558 access(struct access_args *uap)
1559 {
1560 	struct thread *td = curthread;
1561 	struct nameidata nd;
1562 	int error;
1563 
1564 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1565 	    UIO_USERSPACE, uap->path, td);
1566 
1567 	error = kern_access(&nd, uap->flags);
1568 
1569 	return (error);
1570 }
1571 
1572 int
1573 kern_stat(struct nameidata *nd, struct stat *st)
1574 {
1575 	struct thread *td = curthread;
1576 	int error;
1577 
1578 	error = namei(nd);
1579 	if (error)
1580 		return (error);
1581 	error = vn_stat(nd->ni_vp, st, td);
1582 	NDFREE(nd, NDF_ONLY_PNBUF);
1583 	vput(nd->ni_vp);
1584 	return (error);
1585 }
1586 
1587 /*
1588  * stat_args(char *path, struct stat *ub)
1589  *
1590  * Get file status; this version follows links.
1591  */
1592 int
1593 stat(struct stat_args *uap)
1594 {
1595 	struct thread *td = curthread;
1596 	struct nameidata nd;
1597 	struct stat st;
1598 	int error;
1599 
1600 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1601 	    UIO_USERSPACE, uap->path, td);
1602 
1603 	error = kern_stat(&nd, &st);
1604 
1605 	if (error == 0)
1606 		error = copyout(&st, uap->ub, sizeof(*uap->ub));
1607 	return (error);
1608 }
1609 
1610 /*
1611  * lstat_args(char *path, struct stat *ub)
1612  *
1613  * Get file status; this version does not follow links.
1614  */
1615 int
1616 lstat(struct lstat_args *uap)
1617 {
1618 	struct thread *td = curthread;
1619 	struct nameidata nd;
1620 	struct stat st;
1621 	int error;
1622 
1623 	NDINIT(&nd, NAMEI_LOOKUP, CNP_LOCKLEAF | CNP_NOOBJ,
1624 	    UIO_USERSPACE, SCARG(uap, path), td);
1625 
1626 	error = kern_stat(&nd, &st);
1627 
1628 	if (error == 0)
1629 		error = copyout(&st, uap->ub, sizeof(*uap->ub));
1630 	return (error);
1631 }
1632 
1633 void
1634 cvtnstat(sb, nsb)
1635 	struct stat *sb;
1636 	struct nstat *nsb;
1637 {
1638 	nsb->st_dev = sb->st_dev;
1639 	nsb->st_ino = sb->st_ino;
1640 	nsb->st_mode = sb->st_mode;
1641 	nsb->st_nlink = sb->st_nlink;
1642 	nsb->st_uid = sb->st_uid;
1643 	nsb->st_gid = sb->st_gid;
1644 	nsb->st_rdev = sb->st_rdev;
1645 	nsb->st_atimespec = sb->st_atimespec;
1646 	nsb->st_mtimespec = sb->st_mtimespec;
1647 	nsb->st_ctimespec = sb->st_ctimespec;
1648 	nsb->st_size = sb->st_size;
1649 	nsb->st_blocks = sb->st_blocks;
1650 	nsb->st_blksize = sb->st_blksize;
1651 	nsb->st_flags = sb->st_flags;
1652 	nsb->st_gen = sb->st_gen;
1653 	nsb->st_qspare[0] = sb->st_qspare[0];
1654 	nsb->st_qspare[1] = sb->st_qspare[1];
1655 }
1656 
1657 /*
1658  * nstat_args(char *path, struct nstat *ub)
1659  */
1660 /* ARGSUSED */
1661 int
1662 nstat(struct nstat_args *uap)
1663 {
1664 	struct thread *td = curthread;
1665 	struct stat sb;
1666 	struct nstat nsb;
1667 	int error;
1668 	struct nameidata nd;
1669 
1670 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1671 	    UIO_USERSPACE, SCARG(uap, path), td);
1672 	if ((error = namei(&nd)) != 0)
1673 		return (error);
1674 	NDFREE(&nd, NDF_ONLY_PNBUF);
1675 	error = vn_stat(nd.ni_vp, &sb, td);
1676 	vput(nd.ni_vp);
1677 	if (error)
1678 		return (error);
1679 	cvtnstat(&sb, &nsb);
1680 	error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1681 	return (error);
1682 }
1683 
1684 /*
1685  * lstat_args(char *path, struct stat *ub)
1686  *
1687  * Get file status; this version does not follow links.
1688  */
1689 /* ARGSUSED */
1690 int
1691 nlstat(struct nlstat_args *uap)
1692 {
1693 	struct thread *td = curthread;
1694 	int error;
1695 	struct vnode *vp;
1696 	struct stat sb;
1697 	struct nstat nsb;
1698 	struct nameidata nd;
1699 
1700 	NDINIT(&nd, NAMEI_LOOKUP, CNP_LOCKLEAF | CNP_NOOBJ,
1701 	    UIO_USERSPACE, SCARG(uap, path), td);
1702 	if ((error = namei(&nd)) != 0)
1703 		return (error);
1704 	vp = nd.ni_vp;
1705 	NDFREE(&nd, NDF_ONLY_PNBUF);
1706 	error = vn_stat(vp, &sb, td);
1707 	vput(vp);
1708 	if (error)
1709 		return (error);
1710 	cvtnstat(&sb, &nsb);
1711 	error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1712 	return (error);
1713 }
1714 
1715 /*
1716  * pathconf_Args(char *path, int name)
1717  *
1718  * Get configurable pathname variables.
1719  */
1720 /* ARGSUSED */
1721 int
1722 pathconf(struct pathconf_args *uap)
1723 {
1724 	struct thread *td = curthread;
1725 	int error;
1726 	struct nameidata nd;
1727 
1728 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1729 	    UIO_USERSPACE, SCARG(uap, path), td);
1730 	if ((error = namei(&nd)) != 0)
1731 		return (error);
1732 	NDFREE(&nd, NDF_ONLY_PNBUF);
1733 	error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), uap->sysmsg_fds);
1734 	vput(nd.ni_vp);
1735 	return (error);
1736 }
1737 
1738 /*
1739  * XXX: daver
1740  * kern_readlink isn't properly split yet.  There is a copyin burried
1741  * in VOP_READLINK().
1742  */
1743 int
1744 kern_readlink(struct nameidata *nd, char *buf, int count, int *res)
1745 {
1746 	struct thread *td = curthread;
1747 	struct proc *p = td->td_proc;
1748 	struct vnode *vp;
1749 	struct iovec aiov;
1750 	struct uio auio;
1751 	int error;
1752 
1753 	error = namei(nd);
1754 	if (error)
1755 		return (error);
1756 	NDFREE(nd, NDF_ONLY_PNBUF);
1757 	vp = nd->ni_vp;
1758 	if (vp->v_type != VLNK)
1759 		error = EINVAL;
1760 	else {
1761 		aiov.iov_base = buf;
1762 		aiov.iov_len = count;
1763 		auio.uio_iov = &aiov;
1764 		auio.uio_iovcnt = 1;
1765 		auio.uio_offset = 0;
1766 		auio.uio_rw = UIO_READ;
1767 		auio.uio_segflg = UIO_USERSPACE;
1768 		auio.uio_td = td;
1769 		auio.uio_resid = count;
1770 		error = VOP_READLINK(vp, &auio, p->p_ucred);
1771 	}
1772 	vput(vp);
1773 	*res = count - auio.uio_resid;
1774 	return (error);
1775 }
1776 
1777 /*
1778  * readlink_args(char *path, char *buf, int count)
1779  *
1780  * Return target name of a symbolic link.
1781  */
1782 int
1783 readlink(struct readlink_args *uap)
1784 {
1785 	struct thread *td = curthread;
1786 	struct nameidata nd;
1787 	int error;
1788 
1789 	NDINIT(&nd, NAMEI_LOOKUP, CNP_LOCKLEAF | CNP_NOOBJ, UIO_USERSPACE,
1790 	    uap->path, td);
1791 
1792 	error = kern_readlink(&nd, uap->buf, uap->count,
1793 	    &uap->sysmsg_result);
1794 
1795 	return (error);
1796 }
1797 
1798 static int
1799 setfflags(struct vnode *vp, int flags)
1800 {
1801 	struct thread *td = curthread;
1802 	struct proc *p = td->td_proc;
1803 	int error;
1804 	struct vattr vattr;
1805 
1806 	/*
1807 	 * Prevent non-root users from setting flags on devices.  When
1808 	 * a device is reused, users can retain ownership of the device
1809 	 * if they are allowed to set flags and programs assume that
1810 	 * chown can't fail when done as root.
1811 	 */
1812 	if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
1813 	    ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
1814 		return (error);
1815 
1816 	VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1817 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1818 	VATTR_NULL(&vattr);
1819 	vattr.va_flags = flags;
1820 	error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1821 	VOP_UNLOCK(vp, NULL, 0, td);
1822 	return (error);
1823 }
1824 
1825 /*
1826  * chflags(char *path, int flags)
1827  *
1828  * Change flags of a file given a path name.
1829  */
1830 /* ARGSUSED */
1831 int
1832 chflags(struct chflags_args *uap)
1833 {
1834 	struct thread *td = curthread;
1835 	int error;
1836 	struct nameidata nd;
1837 
1838 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE,
1839 	    SCARG(uap, path), td);
1840 	if ((error = namei(&nd)) != 0)
1841 		return (error);
1842 	NDFREE(&nd, NDF_ONLY_PNBUF);
1843 	error = setfflags(nd.ni_vp, SCARG(uap, flags));
1844 	vrele(nd.ni_vp);
1845 	return error;
1846 }
1847 
1848 /*
1849  * fchflags_args(int fd, int flags)
1850  *
1851  * Change flags of a file given a file descriptor.
1852  */
1853 /* ARGSUSED */
1854 int
1855 fchflags(struct fchflags_args *uap)
1856 {
1857 	struct thread *td = curthread;
1858 	struct proc *p = td->td_proc;
1859 	struct file *fp;
1860 	int error;
1861 
1862 	if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1863 		return (error);
1864 	return setfflags((struct vnode *) fp->f_data, SCARG(uap, flags));
1865 }
1866 
1867 static int
1868 setfmode(struct vnode *vp, int mode)
1869 {
1870 	struct thread *td = curthread;
1871 	struct proc *p = td->td_proc;
1872 	int error;
1873 	struct vattr vattr;
1874 
1875 	VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1876 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1877 	VATTR_NULL(&vattr);
1878 	vattr.va_mode = mode & ALLPERMS;
1879 	error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1880 	VOP_UNLOCK(vp, NULL, 0, td);
1881 	return error;
1882 }
1883 
1884 int
1885 kern_chmod(struct nameidata *nd, int mode)
1886 {
1887 	int error;
1888 
1889 	error = namei(nd);
1890 	if (error)
1891 		return (error);
1892 	NDFREE(nd, NDF_ONLY_PNBUF);
1893 	error = setfmode(nd->ni_vp, mode);
1894 	vrele(nd->ni_vp);
1895 	return error;
1896 }
1897 
1898 /*
1899  * chmod_args(char *path, int mode)
1900  *
1901  * Change mode of a file given path name.
1902  */
1903 /* ARGSUSED */
1904 int
1905 chmod(struct chmod_args *uap)
1906 {
1907 	struct thread *td = curthread;
1908 	struct nameidata nd;
1909 	int error;
1910 
1911 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
1912 
1913 	error = kern_chmod(&nd, uap->mode);
1914 
1915 	return (error);
1916 }
1917 
1918 /*
1919  * lchmod_args(char *path, int mode)
1920  *
1921  * Change mode of a file given path name (don't follow links.)
1922  */
1923 /* ARGSUSED */
1924 int
1925 lchmod(struct lchmod_args *uap)
1926 {
1927 	struct thread *td = curthread;
1928 	int error;
1929 	struct nameidata nd;
1930 
1931 	NDINIT(&nd, NAMEI_LOOKUP, 0, UIO_USERSPACE, SCARG(uap, path), td);
1932 	if ((error = namei(&nd)) != 0)
1933 		return (error);
1934 	NDFREE(&nd, NDF_ONLY_PNBUF);
1935 	error = setfmode(nd.ni_vp, SCARG(uap, mode));
1936 	vrele(nd.ni_vp);
1937 	return error;
1938 }
1939 
1940 /*
1941  * fchmod_args(int fd, int mode)
1942  *
1943  * Change mode of a file given a file descriptor.
1944  */
1945 /* ARGSUSED */
1946 int
1947 fchmod(struct fchmod_args *uap)
1948 {
1949 	struct thread *td = curthread;
1950 	struct proc *p = td->td_proc;
1951 	struct file *fp;
1952 	int error;
1953 
1954 	if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1955 		return (error);
1956 	return setfmode((struct vnode *)fp->f_data, SCARG(uap, mode));
1957 }
1958 
1959 static int
1960 setfown(struct vnode *vp, uid_t uid, gid_t gid)
1961 {
1962 	struct thread *td = curthread;
1963 	struct proc *p = td->td_proc;
1964 	int error;
1965 	struct vattr vattr;
1966 
1967 	VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1968 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1969 	VATTR_NULL(&vattr);
1970 	vattr.va_uid = uid;
1971 	vattr.va_gid = gid;
1972 	error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1973 	VOP_UNLOCK(vp, NULL, 0, td);
1974 	return error;
1975 }
1976 
1977 int
1978 kern_chown(struct nameidata *nd, int uid, int gid)
1979 {
1980 	int error;
1981 
1982 	error = namei(nd);
1983 	if (error)
1984 		return (error);
1985 	NDFREE(nd, NDF_ONLY_PNBUF);
1986 	error = setfown(nd->ni_vp, uid, gid);
1987 	vrele(nd->ni_vp);
1988 	return (error);
1989 }
1990 
1991 /*
1992  * chown(char *path, int uid, int gid)
1993  *
1994  * Set ownership given a path name.
1995  */
1996 int
1997 chown(struct chown_args *uap)
1998 {
1999 	struct thread *td = curthread;
2000 	struct nameidata nd;
2001 	int error;
2002 
2003 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
2004 
2005 	error = kern_chown(&nd, uap->uid, uap->gid);
2006 
2007 	return (error);
2008 }
2009 
2010 /*
2011  * lchown_args(char *path, int uid, int gid)
2012  *
2013  * Set ownership given a path name, do not cross symlinks.
2014  */
2015 int
2016 lchown(struct lchown_args *uap)
2017 {
2018 	struct thread *td = curthread;
2019 	int error;
2020 	struct nameidata nd;
2021 
2022 	NDINIT(&nd, NAMEI_LOOKUP, 0, UIO_USERSPACE, uap->path, td);
2023 
2024 	error = kern_chown(&nd, uap->uid, uap->gid);
2025 
2026 	return (error);
2027 }
2028 
2029 /*
2030  * fchown_args(int fd, int uid, int gid)
2031  *
2032  * Set ownership given a file descriptor.
2033  */
2034 /* ARGSUSED */
2035 int
2036 fchown(struct fchown_args *uap)
2037 {
2038 	struct thread *td = curthread;
2039 	struct proc *p = td->td_proc;
2040 	struct file *fp;
2041 	int error;
2042 
2043 	if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2044 		return (error);
2045 	return setfown((struct vnode *)fp->f_data,
2046 		SCARG(uap, uid), SCARG(uap, gid));
2047 }
2048 
2049 static int
2050 getutimes(const struct timeval *tvp, struct timespec *tsp)
2051 {
2052 	struct timeval tv[2];
2053 
2054 	if (tvp == NULL) {
2055 		microtime(&tv[0]);
2056 		TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2057 		tsp[1] = tsp[0];
2058 	} else {
2059 		TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]);
2060 		TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]);
2061 	}
2062 	return 0;
2063 }
2064 
2065 static int
2066 setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2067 {
2068 	struct thread *td = curthread;
2069 	struct proc *p = td->td_proc;
2070 	int error;
2071 	struct vattr vattr;
2072 
2073 	VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2074 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2075 	VATTR_NULL(&vattr);
2076 	vattr.va_atime = ts[0];
2077 	vattr.va_mtime = ts[1];
2078 	if (nullflag)
2079 		vattr.va_vaflags |= VA_UTIMES_NULL;
2080 	error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2081 	VOP_UNLOCK(vp, NULL, 0, td);
2082 	return error;
2083 }
2084 
2085 int
2086 kern_utimes(struct nameidata *nd, struct timeval *tptr)
2087 {
2088 	struct timespec ts[2];
2089 	int error;
2090 
2091 	error = getutimes(tptr, ts);
2092 	if (error)
2093 		return (error);
2094 	error = namei(nd);
2095 	if (error)
2096 		return (error);
2097 	NDFREE(nd, NDF_ONLY_PNBUF);
2098 	error = setutimes(nd->ni_vp, ts, tptr == NULL);
2099 	vrele(nd->ni_vp);
2100 	return (error);
2101 }
2102 
2103 /*
2104  * utimes_args(char *path, struct timeval *tptr)
2105  *
2106  * Set the access and modification times of a file.
2107  */
2108 int
2109 utimes(struct utimes_args *uap)
2110 {
2111 	struct thread *td = curthread;
2112 	struct timeval tv[2];
2113 	struct nameidata nd;
2114 	int error;
2115 
2116 	if (uap->tptr) {
2117  		error = copyin(uap->tptr, tv, sizeof(tv));
2118 		if (error)
2119 			return (error);
2120 	}
2121 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
2122 
2123 	error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2124 
2125 	return (error);
2126 }
2127 
2128 /*
2129  * lutimes_args(char *path, struct timeval *tptr)
2130  *
2131  * Set the access and modification times of a file.
2132  */
2133 int
2134 lutimes(struct lutimes_args *uap)
2135 {
2136 	struct thread *td = curthread;
2137 	struct timeval tv[2];
2138 	struct nameidata nd;
2139 	int error;
2140 
2141 	if (uap->tptr) {
2142 		error = copyin(uap->tptr, tv, sizeof(tv));
2143 		if (error)
2144 			return (error);
2145 	}
2146 	NDINIT(&nd, NAMEI_LOOKUP, 0, UIO_USERSPACE, uap->path, td);
2147 
2148 	error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2149 
2150 	return (error);
2151 }
2152 
2153 int
2154 kern_futimes(int fd, struct timeval *tptr)
2155 {
2156 	struct thread *td = curthread;
2157 	struct proc *p = td->td_proc;
2158 	struct timespec ts[2];
2159 	struct file *fp;
2160 	int error;
2161 
2162 	error = getutimes(tptr, ts);
2163 	if (error)
2164 		return (error);
2165 	error = getvnode(p->p_fd, fd, &fp);
2166 	if (error)
2167 		return (error);
2168 	error =  setutimes((struct vnode *)fp->f_data, ts, tptr == NULL);
2169 	return (error);
2170 }
2171 
2172 /*
2173  * futimes_args(int fd, struct timeval *tptr)
2174  *
2175  * Set the access and modification times of a file.
2176  */
2177 int
2178 futimes(struct futimes_args *uap)
2179 {
2180 	struct timeval tv[2];
2181 	int error;
2182 
2183 	if (uap->tptr) {
2184 		error = copyin(uap->tptr, tv, sizeof(tv));
2185 		if (error)
2186 			return (error);
2187 	}
2188 
2189 	error = kern_futimes(uap->fd, uap->tptr ? tv : NULL);
2190 
2191 	return (error);
2192 }
2193 
2194 int
2195 kern_truncate(struct nameidata* nd, off_t length)
2196 {
2197 	struct thread *td = curthread;
2198 	struct proc *p = td->td_proc;
2199 	struct vnode *vp;
2200 	struct vattr vattr;
2201 	int error;
2202 
2203 	if (length < 0)
2204 		return(EINVAL);
2205 	if ((error = namei(nd)) != 0)
2206 		return (error);
2207 	vp = nd->ni_vp;
2208 	NDFREE(nd, NDF_ONLY_PNBUF);
2209 	VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2210 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2211 	if (vp->v_type == VDIR)
2212 		error = EISDIR;
2213 	else if ((error = vn_writechk(vp)) == 0 &&
2214 	    (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, td)) == 0) {
2215 		VATTR_NULL(&vattr);
2216 		vattr.va_size = length;
2217 		error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2218 	}
2219 	vput(vp);
2220 	return (error);
2221 }
2222 
2223 /*
2224  * truncate(char *path, int pad, off_t length)
2225  *
2226  * Truncate a file given its path name.
2227  */
2228 int
2229 truncate(struct truncate_args *uap)
2230 {
2231 	struct thread *td = curthread;
2232 	struct nameidata nd;
2233 	int error;
2234 
2235 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
2236 
2237 	error = kern_truncate(&nd, uap->length);
2238 
2239 	return error;
2240 }
2241 
2242 int
2243 kern_ftruncate(int fd, off_t length)
2244 {
2245 	struct thread *td = curthread;
2246 	struct proc *p = td->td_proc;
2247 	struct vattr vattr;
2248 	struct vnode *vp;
2249 	struct file *fp;
2250 	int error;
2251 
2252 	if (length < 0)
2253 		return(EINVAL);
2254 	if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
2255 		return (error);
2256 	if ((fp->f_flag & FWRITE) == 0)
2257 		return (EINVAL);
2258 	vp = (struct vnode *)fp->f_data;
2259 	VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2260 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2261 	if (vp->v_type == VDIR)
2262 		error = EISDIR;
2263 	else if ((error = vn_writechk(vp)) == 0) {
2264 		VATTR_NULL(&vattr);
2265 		vattr.va_size = length;
2266 		error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
2267 	}
2268 	VOP_UNLOCK(vp, NULL, 0, td);
2269 	return (error);
2270 }
2271 
2272 /*
2273  * ftruncate_args(int fd, int pad, off_t length)
2274  *
2275  * Truncate a file given a file descriptor.
2276  */
2277 int
2278 ftruncate(struct ftruncate_args *uap)
2279 {
2280 	int error;
2281 
2282 	error = kern_ftruncate(uap->fd, uap->length);
2283 
2284 	return (error);
2285 }
2286 
2287 /*
2288  * fsync(int fd)
2289  *
2290  * Sync an open file.
2291  */
2292 /* ARGSUSED */
2293 int
2294 fsync(struct fsync_args *uap)
2295 {
2296 	struct thread *td = curthread;
2297 	struct proc *p = td->td_proc;
2298 	struct vnode *vp;
2299 	struct file *fp;
2300 	vm_object_t obj;
2301 	int error;
2302 
2303 	if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2304 		return (error);
2305 	vp = (struct vnode *)fp->f_data;
2306 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2307 	if (VOP_GETVOBJECT(vp, &obj) == 0)
2308 		vm_object_page_clean(obj, 0, 0, 0);
2309 	if ((error = VOP_FSYNC(vp, MNT_WAIT, td)) == 0 &&
2310 	    vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2311 	    bioops.io_fsync)
2312 		error = (*bioops.io_fsync)(vp);
2313 	VOP_UNLOCK(vp, NULL, 0, td);
2314 	return (error);
2315 }
2316 
2317 int
2318 kern_rename(struct nameidata *fromnd, struct nameidata *tond)
2319 {
2320 	struct thread *td = curthread;
2321 	struct proc *p = td->td_proc;
2322 	struct vnode *tvp, *fvp, *tdvp;
2323 	int error;
2324 
2325 	bwillwrite();
2326 	error = namei(fromnd);
2327 	if (error)
2328 		return (error);
2329 	fvp = fromnd->ni_vp;
2330 	if (fromnd->ni_vp->v_type == VDIR)
2331 		tond->ni_cnd.cn_flags |= CNP_WILLBEDIR;
2332 	error = namei(tond);
2333 	if (error) {
2334 		/* Translate error code for rename("dir1", "dir2/."). */
2335 		if (error == EISDIR && fvp->v_type == VDIR)
2336 			error = EINVAL;
2337 		NDFREE(fromnd, NDF_ONLY_PNBUF);
2338 		vrele(fromnd->ni_dvp);
2339 		vrele(fvp);
2340 		goto out1;
2341 	}
2342 	tdvp = tond->ni_dvp;
2343 	tvp = tond->ni_vp;
2344 	if (tvp != NULL) {
2345 		if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
2346 			error = ENOTDIR;
2347 			goto out;
2348 		} else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
2349 			error = EISDIR;
2350 			goto out;
2351 		}
2352 	}
2353 	if (fvp == tdvp)
2354 		error = EINVAL;
2355 	/*
2356 	 * If the source is the same as the destination (that is, if they
2357 	 * are links to the same vnode), then there is nothing to do.
2358 	 */
2359 	if (fvp == tvp)
2360 		error = -1;
2361 out:
2362 	if (!error) {
2363 		VOP_LEASE(tdvp, td, p->p_ucred, LEASE_WRITE);
2364 		if (fromnd->ni_dvp != tdvp) {
2365 			VOP_LEASE(fromnd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
2366 		}
2367 		if (tvp) {
2368 			VOP_LEASE(tvp, td, p->p_ucred, LEASE_WRITE);
2369 		}
2370 		error = VOP_RENAME(fromnd->ni_dvp, NCPNULL, fromnd->ni_vp,
2371 		    &fromnd->ni_cnd, tond->ni_dvp, NCPNULL, tond->ni_vp,
2372 		    &tond->ni_cnd);
2373 		NDFREE(fromnd, NDF_ONLY_PNBUF);
2374 		NDFREE(tond, NDF_ONLY_PNBUF);
2375 	} else {
2376 		NDFREE(fromnd, NDF_ONLY_PNBUF);
2377 		NDFREE(tond, NDF_ONLY_PNBUF);
2378 		if (tdvp == tvp)
2379 			vrele(tdvp);
2380 		else
2381 			vput(tdvp);
2382 		if (tvp)
2383 			vput(tvp);
2384 		vrele(fromnd->ni_dvp);
2385 		vrele(fvp);
2386 	}
2387 	vrele(tond->ni_startdir);
2388 	ASSERT_VOP_UNLOCKED(fromnd->ni_dvp, "rename");
2389 	ASSERT_VOP_UNLOCKED(fromnd->ni_vp, "rename");
2390 	ASSERT_VOP_UNLOCKED(tond->ni_dvp, "rename");
2391 	ASSERT_VOP_UNLOCKED(tond->ni_vp, "rename");
2392 out1:
2393 	if (fromnd->ni_startdir)
2394 		vrele(fromnd->ni_startdir);
2395 	if (error == -1)
2396 		return (0);
2397 	return (error);
2398 }
2399 
2400 /*
2401  * rename_args(char *from, char *to)
2402  *
2403  * Rename files.  Source and destination must either both be directories,
2404  * or both not be directories.  If target is a directory, it must be empty.
2405  */
2406 int
2407 rename(struct rename_args *uap)
2408 {
2409 	struct thread *td = curthread;
2410 	struct nameidata fromnd, tond;
2411 	int error;
2412 
2413 	NDINIT(&fromnd, NAMEI_DELETE, CNP_WANTPARENT | CNP_SAVESTART,
2414 		UIO_USERSPACE, uap->from, td);
2415 	NDINIT(&tond, NAMEI_RENAME,
2416 	    CNP_LOCKPARENT | CNP_LOCKLEAF | CNP_NOCACHE |
2417 	     CNP_SAVESTART | CNP_NOOBJ,
2418 	    UIO_USERSPACE, uap->to, td);
2419 
2420 	error = kern_rename(&fromnd, &tond);
2421 
2422 	return (error);
2423 }
2424 
2425 int
2426 kern_mkdir(struct nameidata *nd, int mode)
2427 {
2428 	struct thread *td = curthread;
2429 	struct proc *p = td->td_proc;
2430 	struct vnode *vp;
2431 	struct vattr vattr;
2432 	int error;
2433 
2434 	bwillwrite();
2435 	nd->ni_cnd.cn_flags |= CNP_WILLBEDIR;
2436 	error = namei(nd);
2437 	if (error)
2438 		return (error);
2439 	vp = nd->ni_vp;
2440 	if (vp) {
2441 		NDFREE(nd, NDF_ONLY_PNBUF);
2442 		if (nd->ni_dvp == vp)
2443 			vrele(nd->ni_dvp);
2444 		else
2445 			vput(nd->ni_dvp);
2446 		vrele(vp);
2447 		return (EEXIST);
2448 	}
2449 	VATTR_NULL(&vattr);
2450 	vattr.va_type = VDIR;
2451 	vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask;
2452 	VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
2453 	error = VOP_MKDIR(nd->ni_dvp, NCPNULL, &nd->ni_vp, &nd->ni_cnd,
2454 	    &vattr);
2455 	NDFREE(nd, NDF_ONLY_PNBUF);
2456 	vput(nd->ni_dvp);
2457 	if (error == 0)
2458 		vput(nd->ni_vp);
2459 	ASSERT_VOP_UNLOCKED(nd->ni_dvp, "mkdir");
2460 	ASSERT_VOP_UNLOCKED(nd->ni_vp, "mkdir");
2461 	return (error);
2462 }
2463 
2464 /*
2465  * mkdir_args(char *path, int mode)
2466  *
2467  * Make a directory file.
2468  */
2469 /* ARGSUSED */
2470 int
2471 mkdir(struct mkdir_args *uap)
2472 {
2473 	struct thread *td = curthread;
2474 	struct nameidata nd;
2475 	int error;
2476 
2477 	NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
2478 	    td);
2479 
2480 	error = kern_mkdir(&nd, uap->mode);
2481 
2482 	return (error);
2483 }
2484 
2485 int
2486 kern_rmdir(struct nameidata *nd)
2487 {
2488 	struct thread *td = curthread;
2489 	struct proc *p = td->td_proc;
2490 	struct vnode *vp;
2491 	int error;
2492 
2493 	bwillwrite();
2494 	error = namei(nd);
2495 	if (error)
2496 		return (error);
2497 	vp = nd->ni_vp;
2498 	if (vp->v_type != VDIR) {
2499 		error = ENOTDIR;
2500 		goto out;
2501 	}
2502 	/*
2503 	 * No rmdir "." please.
2504 	 */
2505 	if (nd->ni_dvp == vp) {
2506 		error = EINVAL;
2507 		goto out;
2508 	}
2509 	/*
2510 	 * The root of a mounted filesystem cannot be deleted.
2511 	 */
2512 	if (vp->v_flag & VROOT)
2513 		error = EBUSY;
2514 	else {
2515 		VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
2516 		VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2517 		error = VOP_RMDIR(nd->ni_dvp, NCPNULL, nd->ni_vp,
2518 		    &nd->ni_cnd);
2519 	}
2520 out:
2521 	NDFREE(nd, NDF_ONLY_PNBUF);
2522 	if (nd->ni_dvp == vp)
2523 		vrele(nd->ni_dvp);
2524 	else
2525 		vput(nd->ni_dvp);
2526 	if (vp != NULLVP)
2527 		vput(vp);
2528 	ASSERT_VOP_UNLOCKED(nd->ni_dvp, "rmdir");
2529 	ASSERT_VOP_UNLOCKED(nd->ni_vp, "rmdir");
2530 	return (error);
2531 }
2532 
2533 /*
2534  * rmdir_args(char *path)
2535  *
2536  * Remove a directory file.
2537  */
2538 /* ARGSUSED */
2539 int
2540 rmdir(struct rmdir_args *uap)
2541 {
2542 	struct thread *td = curthread;
2543 	struct nameidata nd;
2544 	int error;
2545 
2546 	NDINIT(&nd, NAMEI_DELETE, CNP_LOCKPARENT | CNP_LOCKLEAF,
2547 	    UIO_USERSPACE, uap->path, td);
2548 
2549 	error = kern_rmdir(&nd);
2550 
2551 	return (error);
2552 }
2553 
2554 int
2555 kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res)
2556 {
2557 	struct thread *td = curthread;
2558 	struct proc *p = td->td_proc;
2559 	struct vnode *vp;
2560 	struct file *fp;
2561 	struct uio auio;
2562 	struct iovec aiov;
2563 	long loff;
2564 	int error, eofflag;
2565 
2566 	if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
2567 		return (error);
2568 	if ((fp->f_flag & FREAD) == 0)
2569 		return (EBADF);
2570 	vp = (struct vnode *)fp->f_data;
2571 unionread:
2572 	if (vp->v_type != VDIR)
2573 		return (EINVAL);
2574 	aiov.iov_base = buf;
2575 	aiov.iov_len = count;
2576 	auio.uio_iov = &aiov;
2577 	auio.uio_iovcnt = 1;
2578 	auio.uio_rw = UIO_READ;
2579 	auio.uio_segflg = UIO_USERSPACE;
2580 	auio.uio_td = td;
2581 	auio.uio_resid = count;
2582 	/* vn_lock(vp, NULL, LK_SHARED | LK_RETRY, td); */
2583 	vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2584 	loff = auio.uio_offset = fp->f_offset;
2585 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
2586 	fp->f_offset = auio.uio_offset;
2587 	VOP_UNLOCK(vp, NULL, 0, td);
2588 	if (error)
2589 		return (error);
2590 	if (count == auio.uio_resid) {
2591 		if (union_dircheckp) {
2592 			error = union_dircheckp(td, &vp, fp);
2593 			if (error == -1)
2594 				goto unionread;
2595 			if (error)
2596 				return (error);
2597 		}
2598 		if ((vp->v_flag & VROOT) &&
2599 		    (vp->v_mount->mnt_flag & MNT_UNION)) {
2600 			struct vnode *tvp = vp;
2601 			vp = vp->v_mount->mnt_vnodecovered;
2602 			VREF(vp);
2603 			fp->f_data = (caddr_t) vp;
2604 			fp->f_offset = 0;
2605 			vrele(tvp);
2606 			goto unionread;
2607 		}
2608 	}
2609 	if (basep) {
2610 		*basep = loff;
2611 	}
2612 	*res = count - auio.uio_resid;
2613 	return (error);
2614 }
2615 
2616 /*
2617  * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
2618  *
2619  * Read a block of directory entries in a file system independent format.
2620  */
2621 int
2622 getdirentries(struct getdirentries_args *uap)
2623 {
2624 	long base;
2625 	int error;
2626 
2627 	error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base,
2628 	    &uap->sysmsg_result);
2629 
2630 	if (error == 0)
2631 		error = copyout(&base, uap->basep, sizeof(*uap->basep));
2632 	return (error);
2633 }
2634 
2635 /*
2636  * getdents_args(int fd, char *buf, size_t count)
2637  */
2638 int
2639 getdents(struct getdents_args *uap)
2640 {
2641 	int error;
2642 
2643 	error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL,
2644 	    &uap->sysmsg_result);
2645 
2646 	return (error);
2647 }
2648 
2649 /*
2650  * umask(int newmask)
2651  *
2652  * Set the mode mask for creation of filesystem nodes.
2653  *
2654  * MP SAFE
2655  */
2656 int
2657 umask(struct umask_args *uap)
2658 {
2659 	struct thread *td = curthread;
2660 	struct proc *p = td->td_proc;
2661 	struct filedesc *fdp;
2662 
2663 	fdp = p->p_fd;
2664 	uap->sysmsg_result = fdp->fd_cmask;
2665 	fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
2666 	return (0);
2667 }
2668 
2669 /*
2670  * revoke(char *path)
2671  *
2672  * Void all references to file by ripping underlying filesystem
2673  * away from vnode.
2674  */
2675 /* ARGSUSED */
2676 int
2677 revoke(struct revoke_args *uap)
2678 {
2679 	struct thread *td = curthread;
2680 	struct proc *p = td->td_proc;
2681 	struct vnode *vp;
2682 	struct vattr vattr;
2683 	int error;
2684 	struct nameidata nd;
2685 
2686 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2687 	if ((error = namei(&nd)) != 0)
2688 		return (error);
2689 	vp = nd.ni_vp;
2690 	NDFREE(&nd, NDF_ONLY_PNBUF);
2691 	if (vp->v_type != VCHR && vp->v_type != VBLK) {
2692 		error = EINVAL;
2693 		goto out;
2694 	}
2695 	if ((error = VOP_GETATTR(vp, &vattr, td)) != 0)
2696 		goto out;
2697 	if (p->p_ucred->cr_uid != vattr.va_uid &&
2698 	    (error = suser_cred(p->p_ucred, PRISON_ROOT)))
2699 		goto out;
2700 	if (vcount(vp) > 1)
2701 		VOP_REVOKE(vp, REVOKEALL);
2702 out:
2703 	vrele(vp);
2704 	return (error);
2705 }
2706 
2707 /*
2708  * Convert a user file descriptor to a kernel file entry.
2709  */
2710 int
2711 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
2712 {
2713 	struct file *fp;
2714 
2715 	if ((u_int)fd >= fdp->fd_nfiles ||
2716 	    (fp = fdp->fd_ofiles[fd]) == NULL)
2717 		return (EBADF);
2718 	if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO)
2719 		return (EINVAL);
2720 	*fpp = fp;
2721 	return (0);
2722 }
2723 /*
2724  * getfh_args(char *fname, fhandle_t *fhp)
2725  *
2726  * Get (NFS) file handle
2727  */
2728 int
2729 getfh(struct getfh_args *uap)
2730 {
2731 	struct thread *td = curthread;
2732 	struct nameidata nd;
2733 	fhandle_t fh;
2734 	struct vnode *vp;
2735 	int error;
2736 
2737 	/*
2738 	 * Must be super user
2739 	 */
2740 	error = suser(td);
2741 	if (error)
2742 		return (error);
2743 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE, uap->fname, td);
2744 	error = namei(&nd);
2745 	if (error)
2746 		return (error);
2747 	NDFREE(&nd, NDF_ONLY_PNBUF);
2748 	vp = nd.ni_vp;
2749 	bzero(&fh, sizeof(fh));
2750 	fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
2751 	error = VFS_VPTOFH(vp, &fh.fh_fid);
2752 	vput(vp);
2753 	if (error)
2754 		return (error);
2755 	error = copyout(&fh, uap->fhp, sizeof (fh));
2756 	return (error);
2757 }
2758 
2759 /*
2760  * fhopen_args(const struct fhandle *u_fhp, int flags)
2761  *
2762  * syscall for the rpc.lockd to use to translate a NFS file handle into
2763  * an open descriptor.
2764  *
2765  * warning: do not remove the suser() call or this becomes one giant
2766  * security hole.
2767  */
2768 int
2769 fhopen(struct fhopen_args *uap)
2770 {
2771 	struct thread *td = curthread;
2772 	struct proc *p = td->td_proc;
2773 	struct mount *mp;
2774 	struct vnode *vp;
2775 	struct fhandle fhp;
2776 	struct vattr vat;
2777 	struct vattr *vap = &vat;
2778 	struct flock lf;
2779 	struct file *fp;
2780 	struct filedesc *fdp = p->p_fd;
2781 	int fmode, mode, error, type;
2782 	struct file *nfp;
2783 	int indx;
2784 
2785 	/*
2786 	 * Must be super user
2787 	 */
2788 	error = suser(td);
2789 	if (error)
2790 		return (error);
2791 
2792 	fmode = FFLAGS(SCARG(uap, flags));
2793 	/* why not allow a non-read/write open for our lockd? */
2794 	if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
2795 		return (EINVAL);
2796 	error = copyin(SCARG(uap,u_fhp), &fhp, sizeof(fhp));
2797 	if (error)
2798 		return(error);
2799 	/* find the mount point */
2800 	mp = vfs_getvfs(&fhp.fh_fsid);
2801 	if (mp == NULL)
2802 		return (ESTALE);
2803 	/* now give me my vnode, it gets returned to me locked */
2804 	error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
2805 	if (error)
2806 		return (error);
2807  	/*
2808 	 * from now on we have to make sure not
2809 	 * to forget about the vnode
2810 	 * any error that causes an abort must vput(vp)
2811 	 * just set error = err and 'goto bad;'.
2812 	 */
2813 
2814 	/*
2815 	 * from vn_open
2816 	 */
2817 	if (vp->v_type == VLNK) {
2818 		error = EMLINK;
2819 		goto bad;
2820 	}
2821 	if (vp->v_type == VSOCK) {
2822 		error = EOPNOTSUPP;
2823 		goto bad;
2824 	}
2825 	mode = 0;
2826 	if (fmode & (FWRITE | O_TRUNC)) {
2827 		if (vp->v_type == VDIR) {
2828 			error = EISDIR;
2829 			goto bad;
2830 		}
2831 		error = vn_writechk(vp);
2832 		if (error)
2833 			goto bad;
2834 		mode |= VWRITE;
2835 	}
2836 	if (fmode & FREAD)
2837 		mode |= VREAD;
2838 	if (mode) {
2839 		error = VOP_ACCESS(vp, mode, p->p_ucred, td);
2840 		if (error)
2841 			goto bad;
2842 	}
2843 	if (fmode & O_TRUNC) {
2844 		VOP_UNLOCK(vp, NULL, 0, td);			/* XXX */
2845 		VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2846 		vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);	/* XXX */
2847 		VATTR_NULL(vap);
2848 		vap->va_size = 0;
2849 		error = VOP_SETATTR(vp, vap, p->p_ucred, td);
2850 		if (error)
2851 			goto bad;
2852 	}
2853 	error = VOP_OPEN(vp, fmode, p->p_ucred, td);
2854 	if (error)
2855 		goto bad;
2856 	/*
2857 	 * Make sure that a VM object is created for VMIO support.
2858 	 */
2859 	if (vn_canvmio(vp) == TRUE) {
2860 		if ((error = vfs_object_create(vp, td)) != 0)
2861 			goto bad;
2862 	}
2863 	if (fmode & FWRITE)
2864 		vp->v_writecount++;
2865 
2866 	/*
2867 	 * end of vn_open code
2868 	 */
2869 
2870 	if ((error = falloc(p, &nfp, &indx)) != 0) {
2871 		if (fmode & FWRITE)
2872 			vp->v_writecount--;
2873 		goto bad;
2874 	}
2875 	fp = nfp;
2876 
2877 	/*
2878 	 * hold an extra reference to avoid having fp ripped out
2879 	 * from under us while we block in the lock op.
2880 	 */
2881 	fhold(fp);
2882 	nfp->f_data = (caddr_t)vp;
2883 	nfp->f_flag = fmode & FMASK;
2884 	nfp->f_ops = &vnops;
2885 	nfp->f_type = DTYPE_VNODE;
2886 	if (fmode & (O_EXLOCK | O_SHLOCK)) {
2887 		lf.l_whence = SEEK_SET;
2888 		lf.l_start = 0;
2889 		lf.l_len = 0;
2890 		if (fmode & O_EXLOCK)
2891 			lf.l_type = F_WRLCK;
2892 		else
2893 			lf.l_type = F_RDLCK;
2894 		type = F_FLOCK;
2895 		if ((fmode & FNONBLOCK) == 0)
2896 			type |= F_WAIT;
2897 		VOP_UNLOCK(vp, NULL, 0, td);
2898 		if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
2899 			/*
2900 			 * lock request failed.  Normally close the descriptor
2901 			 * but handle the case where someone might have dup()d
2902 			 * or close()d it when we weren't looking.
2903 			 */
2904 			if (fdp->fd_ofiles[indx] == fp) {
2905 				fdp->fd_ofiles[indx] = NULL;
2906 				fdrop(fp, td);
2907 			}
2908 
2909 			/*
2910 			 * release our private reference.
2911 			 */
2912 			fdrop(fp, td);
2913 			return (error);
2914 		}
2915 		vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2916 		fp->f_flag |= FHASLOCK;
2917 	}
2918 	if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
2919 		vfs_object_create(vp, td);
2920 
2921 	VOP_UNLOCK(vp, NULL, 0, td);
2922 	fdrop(fp, td);
2923 	uap->sysmsg_result = indx;
2924 	return (0);
2925 
2926 bad:
2927 	vput(vp);
2928 	return (error);
2929 }
2930 
2931 /*
2932  * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
2933  */
2934 int
2935 fhstat(struct fhstat_args *uap)
2936 {
2937 	struct thread *td = curthread;
2938 	struct stat sb;
2939 	fhandle_t fh;
2940 	struct mount *mp;
2941 	struct vnode *vp;
2942 	int error;
2943 
2944 	/*
2945 	 * Must be super user
2946 	 */
2947 	error = suser(td);
2948 	if (error)
2949 		return (error);
2950 
2951 	error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t));
2952 	if (error)
2953 		return (error);
2954 
2955 	if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
2956 		return (ESTALE);
2957 	if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
2958 		return (error);
2959 	error = vn_stat(vp, &sb, td);
2960 	vput(vp);
2961 	if (error)
2962 		return (error);
2963 	error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
2964 	return (error);
2965 }
2966 
2967 /*
2968  * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
2969  */
2970 int
2971 fhstatfs(struct fhstatfs_args *uap)
2972 {
2973 	struct thread *td = curthread;
2974 	struct statfs *sp;
2975 	struct mount *mp;
2976 	struct vnode *vp;
2977 	struct statfs sb;
2978 	fhandle_t fh;
2979 	int error;
2980 
2981 	/*
2982 	 * Must be super user
2983 	 */
2984 	if ((error = suser(td)))
2985 		return (error);
2986 
2987 	if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
2988 		return (error);
2989 
2990 	if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
2991 		return (ESTALE);
2992 	if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
2993 		return (error);
2994 	mp = vp->v_mount;
2995 	sp = &mp->mnt_stat;
2996 	vput(vp);
2997 	if ((error = VFS_STATFS(mp, sp, td)) != 0)
2998 		return (error);
2999 	sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3000 	if (suser(td)) {
3001 		bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
3002 		sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3003 		sp = &sb;
3004 	}
3005 	return (copyout(sp, SCARG(uap, buf), sizeof(*sp)));
3006 }
3007 
3008 /*
3009  * Syscall to push extended attribute configuration information into the
3010  * VFS.  Accepts a path, which it converts to a mountpoint, as well as
3011  * a command (int cmd), and attribute name and misc data.  For now, the
3012  * attribute name is left in userspace for consumption by the VFS_op.
3013  * It will probably be changed to be copied into sysspace by the
3014  * syscall in the future, once issues with various consumers of the
3015  * attribute code have raised their hands.
3016  *
3017  * Currently this is used only by UFS Extended Attributes.
3018  */
3019 int
3020 extattrctl(struct extattrctl_args *uap)
3021 {
3022 	struct thread *td = curthread;
3023 	struct nameidata nd;
3024 	struct mount *mp;
3025 	int error;
3026 
3027 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
3028 	if ((error = namei(&nd)) != 0)
3029 		return (error);
3030 	mp = nd.ni_vp->v_mount;
3031 	NDFREE(&nd, 0);
3032 	return (VFS_EXTATTRCTL(mp, SCARG(uap, cmd), SCARG(uap, attrname),
3033 	    SCARG(uap, arg), td));
3034 }
3035 
3036 /*
3037  * Syscall to set a named extended attribute on a file or directory.
3038  * Accepts attribute name, and a uio structure pointing to the data to set.
3039  * The uio is consumed in the style of writev().  The real work happens
3040  * in VOP_SETEXTATTR().
3041  */
3042 int
3043 extattr_set_file(struct extattr_set_file_args *uap)
3044 {
3045 	struct thread *td = curthread;
3046 	struct proc *p = td->td_proc;
3047 	struct nameidata nd;
3048 	struct uio auio;
3049 	struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
3050 	char attrname[EXTATTR_MAXNAMELEN];
3051 	u_int iovlen, cnt;
3052 	int error, i;
3053 
3054 	error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3055 	if (error)
3056 		return (error);
3057 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
3058 	    SCARG(uap, path), td);
3059 	if ((error = namei(&nd)) != 0)
3060 		return(error);
3061 	iovlen = uap->iovcnt * sizeof(struct iovec);
3062 	if (uap->iovcnt > UIO_SMALLIOV) {
3063 		if (uap->iovcnt > UIO_MAXIOV) {
3064 			error = EINVAL;
3065 			goto done;
3066 		}
3067 		MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3068 		needfree = iov;
3069 	} else
3070 		iov = aiov;
3071 	auio.uio_iov = iov;
3072 	auio.uio_iovcnt = uap->iovcnt;
3073 	auio.uio_rw = UIO_WRITE;
3074 	auio.uio_segflg = UIO_USERSPACE;
3075 	auio.uio_td = td;
3076 	auio.uio_offset = 0;
3077 	if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3078 		goto done;
3079 	auio.uio_resid = 0;
3080 	for (i = 0; i < uap->iovcnt; i++) {
3081 		if (iov->iov_len > INT_MAX - auio.uio_resid) {
3082 			error = EINVAL;
3083 			goto done;
3084 		}
3085 		auio.uio_resid += iov->iov_len;
3086 		iov++;
3087 	}
3088 	cnt = auio.uio_resid;
3089 	error = VOP_SETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3090 	cnt -= auio.uio_resid;
3091 	uap->sysmsg_result = cnt;
3092 done:
3093 	if (needfree)
3094 		FREE(needfree, M_IOV);
3095 	NDFREE(&nd, 0);
3096 	return (error);
3097 }
3098 
3099 /*
3100  * Syscall to get a named extended attribute on a file or directory.
3101  * Accepts attribute name, and a uio structure pointing to a buffer for the
3102  * data.  The uio is consumed in the style of readv().  The real work
3103  * happens in VOP_GETEXTATTR();
3104  */
3105 int
3106 extattr_get_file(struct extattr_get_file_args *uap)
3107 {
3108 	struct thread *td = curthread;
3109 	struct proc *p = td->td_proc;
3110 	struct nameidata nd;
3111 	struct uio auio;
3112 	struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
3113 	char attrname[EXTATTR_MAXNAMELEN];
3114 	u_int iovlen, cnt;
3115 	int error, i;
3116 
3117 	error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3118 	if (error)
3119 		return (error);
3120 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
3121 	    SCARG(uap, path), td);
3122 	if ((error = namei(&nd)) != 0)
3123 		return (error);
3124 	iovlen = uap->iovcnt * sizeof (struct iovec);
3125 	if (uap->iovcnt > UIO_SMALLIOV) {
3126 		if (uap->iovcnt > UIO_MAXIOV) {
3127 			NDFREE(&nd, 0);
3128 			return (EINVAL);
3129 		}
3130 		MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3131 		needfree = iov;
3132 	} else {
3133 		iov = aiov;
3134 		needfree = NULL;
3135 	}
3136 	auio.uio_iov = iov;
3137 	auio.uio_iovcnt = uap->iovcnt;
3138 	auio.uio_rw = UIO_READ;
3139 	auio.uio_segflg = UIO_USERSPACE;
3140 	auio.uio_td = td;
3141 	auio.uio_offset = 0;
3142 	if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3143 		goto done;
3144 	auio.uio_resid = 0;
3145 	for (i = 0; i < uap->iovcnt; i++) {
3146 		if (iov->iov_len > INT_MAX - auio.uio_resid) {
3147 			error = EINVAL;
3148 			goto done;
3149 		}
3150 		auio.uio_resid += iov->iov_len;
3151 		iov++;
3152 	}
3153 	cnt = auio.uio_resid;
3154 	error = VOP_GETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3155 	cnt -= auio.uio_resid;
3156 	uap->sysmsg_result = cnt;
3157 done:
3158 	if (needfree)
3159 		FREE(needfree, M_IOV);
3160 	NDFREE(&nd, 0);
3161 	return(error);
3162 }
3163 
3164 /*
3165  * Syscall to delete a named extended attribute from a file or directory.
3166  * Accepts attribute name.  The real work happens in VOP_SETEXTATTR().
3167  */
3168 int
3169 extattr_delete_file(struct extattr_delete_file_args *uap)
3170 {
3171 	struct thread *td = curthread;
3172 	struct proc *p = td->td_proc;
3173 	struct nameidata nd;
3174 	char attrname[EXTATTR_MAXNAMELEN];
3175 	int	error;
3176 
3177 	error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3178 	if (error)
3179 		return(error);
3180 	NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
3181 	    SCARG(uap, path), td);
3182 	if ((error = namei(&nd)) != 0)
3183 		return(error);
3184 	error = VOP_SETEXTATTR(nd.ni_vp, attrname, NULL, p->p_ucred, td);
3185 	NDFREE(&nd, 0);
3186 	return(error);
3187 }
3188