1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $ 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/sysent.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/mountctl.h> 50 #include <sys/sysproto.h> 51 #include <sys/filedesc.h> 52 #include <sys/kernel.h> 53 #include <sys/fcntl.h> 54 #include <sys/file.h> 55 #include <sys/linker.h> 56 #include <sys/stat.h> 57 #include <sys/unistd.h> 58 #include <sys/vnode.h> 59 #include <sys/proc.h> 60 #include <sys/priv.h> 61 #include <sys/jail.h> 62 #include <sys/namei.h> 63 #include <sys/nlookup.h> 64 #include <sys/dirent.h> 65 #include <sys/extattr.h> 66 #include <sys/spinlock.h> 67 #include <sys/kern_syscall.h> 68 #include <sys/objcache.h> 69 #include <sys/sysctl.h> 70 71 #include <sys/buf2.h> 72 #include <sys/file2.h> 73 #include <sys/spinlock2.h> 74 #include <sys/mplock2.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_object.h> 78 #include <vm/vm_page.h> 79 80 #include <machine/limits.h> 81 #include <machine/stdarg.h> 82 83 #include <vfs/union/union.h> 84 85 static void mount_warning(struct mount *mp, const char *ctl, ...) 86 __printflike(2, 3); 87 static int mount_path(struct proc *p, struct mount *mp, char **rb, char **fb); 88 static int checkvp_chdir (struct vnode *vn, struct thread *td); 89 static void checkdirs (struct nchandle *old_nch, struct nchandle *new_nch); 90 static int chroot_refuse_vdir_fds (struct filedesc *fdp); 91 static int chroot_visible_mnt(struct mount *mp, struct proc *p); 92 static int getutimes (const struct timeval *, struct timespec *); 93 static int setfown (struct mount *, struct vnode *, uid_t, gid_t); 94 static int setfmode (struct vnode *, int); 95 static int setfflags (struct vnode *, int); 96 static int setutimes (struct vnode *, struct vattr *, 97 const struct timespec *, int); 98 static int usermount = 0; /* if 1, non-root can mount fs. */ 99 100 int (*union_dircheckp) (struct thread *, struct vnode **, struct file *); 101 102 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, 103 "Allow non-root users to mount filesystems"); 104 105 /* 106 * Virtual File System System Calls 107 */ 108 109 /* 110 * Mount a file system. 111 * 112 * mount_args(char *type, char *path, int flags, caddr_t data) 113 * 114 * MPALMOSTSAFE 115 */ 116 int 117 sys_mount(struct mount_args *uap) 118 { 119 struct thread *td = curthread; 120 struct vnode *vp; 121 struct nchandle nch; 122 struct mount *mp, *nullmp; 123 struct vfsconf *vfsp; 124 int error, flag = 0, flag2 = 0; 125 int hasmount; 126 struct vattr va; 127 struct nlookupdata nd; 128 char fstypename[MFSNAMELEN]; 129 struct ucred *cred; 130 131 get_mplock(); 132 cred = td->td_ucred; 133 if (jailed(cred)) { 134 error = EPERM; 135 goto done; 136 } 137 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 138 goto done; 139 140 /* 141 * Do not allow NFS export by non-root users. 142 */ 143 if (uap->flags & MNT_EXPORTED) { 144 error = priv_check(td, PRIV_ROOT); 145 if (error) 146 goto done; 147 } 148 /* 149 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users 150 */ 151 if (priv_check(td, PRIV_ROOT)) 152 uap->flags |= MNT_NOSUID | MNT_NODEV; 153 154 /* 155 * Lookup the requested path and extract the nch and vnode. 156 */ 157 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 158 if (error == 0) { 159 if ((error = nlookup(&nd)) == 0) { 160 if (nd.nl_nch.ncp->nc_vp == NULL) 161 error = ENOENT; 162 } 163 } 164 if (error) { 165 nlookup_done(&nd); 166 goto done; 167 } 168 169 /* 170 * If the target filesystem is resolved via a nullfs mount, then 171 * nd.nl_nch.mount will be pointing to the nullfs mount structure 172 * instead of the target file system. We need it in case we are 173 * doing an update. 174 */ 175 nullmp = nd.nl_nch.mount; 176 177 /* 178 * Extract the locked+refd ncp and cleanup the nd structure 179 */ 180 nch = nd.nl_nch; 181 cache_zero(&nd.nl_nch); 182 nlookup_done(&nd); 183 184 if ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && 185 (mp = cache_findmount(&nch)) != NULL) { 186 cache_dropmount(mp); 187 hasmount = 1; 188 } else { 189 hasmount = 0; 190 } 191 192 193 /* 194 * now we have the locked ref'd nch and unreferenced vnode. 195 */ 196 vp = nch.ncp->nc_vp; 197 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) { 198 cache_put(&nch); 199 goto done; 200 } 201 cache_unlock(&nch); 202 203 /* 204 * Extract the file system type. We need to know this early, to take 205 * appropriate actions if we are dealing with a nullfs. 206 */ 207 if ((error = copyinstr(uap->type, fstypename, MFSNAMELEN, NULL)) != 0) { 208 cache_drop(&nch); 209 vput(vp); 210 goto done; 211 } 212 213 /* 214 * Now we have an unlocked ref'd nch and a locked ref'd vp 215 */ 216 if (uap->flags & MNT_UPDATE) { 217 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 218 cache_drop(&nch); 219 vput(vp); 220 error = EINVAL; 221 goto done; 222 } 223 224 if (strncmp(fstypename, "null", 5) == 0) { 225 KKASSERT(nullmp); 226 mp = nullmp; 227 } else { 228 mp = vp->v_mount; 229 } 230 231 flag = mp->mnt_flag; 232 flag2 = mp->mnt_kern_flag; 233 /* 234 * We only allow the filesystem to be reloaded if it 235 * is currently mounted read-only. 236 */ 237 if ((uap->flags & MNT_RELOAD) && 238 ((mp->mnt_flag & MNT_RDONLY) == 0)) { 239 cache_drop(&nch); 240 vput(vp); 241 error = EOPNOTSUPP; /* Needs translation */ 242 goto done; 243 } 244 /* 245 * Only root, or the user that did the original mount is 246 * permitted to update it. 247 */ 248 if (mp->mnt_stat.f_owner != cred->cr_uid && 249 (error = priv_check(td, PRIV_ROOT))) { 250 cache_drop(&nch); 251 vput(vp); 252 goto done; 253 } 254 if (vfs_busy(mp, LK_NOWAIT)) { 255 cache_drop(&nch); 256 vput(vp); 257 error = EBUSY; 258 goto done; 259 } 260 if ((vp->v_flag & VMOUNT) != 0 || hasmount) { 261 cache_drop(&nch); 262 vfs_unbusy(mp); 263 vput(vp); 264 error = EBUSY; 265 goto done; 266 } 267 vsetflags(vp, VMOUNT); 268 mp->mnt_flag |= 269 uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE); 270 vn_unlock(vp); 271 goto update; 272 } 273 /* 274 * If the user is not root, ensure that they own the directory 275 * onto which we are attempting to mount. 276 */ 277 if ((error = VOP_GETATTR(vp, &va)) || 278 (va.va_uid != cred->cr_uid && (error = priv_check(td, PRIV_ROOT)))) { 279 cache_drop(&nch); 280 vput(vp); 281 goto done; 282 } 283 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) { 284 cache_drop(&nch); 285 vput(vp); 286 goto done; 287 } 288 if (vp->v_type != VDIR) { 289 cache_drop(&nch); 290 vput(vp); 291 error = ENOTDIR; 292 goto done; 293 } 294 if (vp->v_mount->mnt_kern_flag & MNTK_NOSTKMNT) { 295 cache_drop(&nch); 296 vput(vp); 297 error = EPERM; 298 goto done; 299 } 300 vfsp = vfsconf_find_by_name(fstypename); 301 if (vfsp == NULL) { 302 linker_file_t lf; 303 304 /* Only load modules for root (very important!) */ 305 if ((error = priv_check(td, PRIV_ROOT)) != 0) { 306 cache_drop(&nch); 307 vput(vp); 308 goto done; 309 } 310 error = linker_load_file(fstypename, &lf); 311 if (error || lf == NULL) { 312 cache_drop(&nch); 313 vput(vp); 314 if (lf == NULL) 315 error = ENODEV; 316 goto done; 317 } 318 lf->userrefs++; 319 /* lookup again, see if the VFS was loaded */ 320 vfsp = vfsconf_find_by_name(fstypename); 321 if (vfsp == NULL) { 322 lf->userrefs--; 323 linker_file_unload(lf); 324 cache_drop(&nch); 325 vput(vp); 326 error = ENODEV; 327 goto done; 328 } 329 } 330 if ((vp->v_flag & VMOUNT) != 0 || hasmount) { 331 cache_drop(&nch); 332 vput(vp); 333 error = EBUSY; 334 goto done; 335 } 336 vsetflags(vp, VMOUNT); 337 338 /* 339 * Allocate and initialize the filesystem. 340 */ 341 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK); 342 mount_init(mp); 343 vfs_busy(mp, LK_NOWAIT); 344 mp->mnt_op = vfsp->vfc_vfsops; 345 mp->mnt_vfc = vfsp; 346 vfsp->vfc_refcount++; 347 mp->mnt_stat.f_type = vfsp->vfc_typenum; 348 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 349 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 350 mp->mnt_stat.f_owner = cred->cr_uid; 351 vn_unlock(vp); 352 update: 353 /* 354 * Set the mount level flags. 355 */ 356 if (uap->flags & MNT_RDONLY) 357 mp->mnt_flag |= MNT_RDONLY; 358 else if (mp->mnt_flag & MNT_RDONLY) 359 mp->mnt_kern_flag |= MNTK_WANTRDWR; 360 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV | 361 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME | 362 MNT_NOSYMFOLLOW | MNT_IGNORE | MNT_TRIM | 363 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 364 mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC | 365 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE | 366 MNT_NOSYMFOLLOW | MNT_IGNORE | MNT_TRIM | 367 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR); 368 /* 369 * Mount the filesystem. 370 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 371 * get. 372 */ 373 error = VFS_MOUNT(mp, uap->path, uap->data, cred); 374 if (mp->mnt_flag & MNT_UPDATE) { 375 if (mp->mnt_kern_flag & MNTK_WANTRDWR) 376 mp->mnt_flag &= ~MNT_RDONLY; 377 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE); 378 mp->mnt_kern_flag &=~ MNTK_WANTRDWR; 379 if (error) { 380 mp->mnt_flag = flag; 381 mp->mnt_kern_flag = flag2; 382 } 383 vfs_unbusy(mp); 384 vclrflags(vp, VMOUNT); 385 vrele(vp); 386 cache_drop(&nch); 387 goto done; 388 } 389 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 390 /* 391 * Put the new filesystem on the mount list after root. The mount 392 * point gets its own mnt_ncmountpt (unless the VFS already set one 393 * up) which represents the root of the mount. The lookup code 394 * detects the mount point going forward and checks the root of 395 * the mount going backwards. 396 * 397 * It is not necessary to invalidate or purge the vnode underneath 398 * because elements under the mount will be given their own glue 399 * namecache record. 400 */ 401 if (!error) { 402 if (mp->mnt_ncmountpt.ncp == NULL) { 403 /* 404 * allocate, then unlock, but leave the ref intact 405 */ 406 cache_allocroot(&mp->mnt_ncmountpt, mp, NULL); 407 cache_unlock(&mp->mnt_ncmountpt); 408 } 409 mp->mnt_ncmounton = nch; /* inherits ref */ 410 nch.ncp->nc_flag |= NCF_ISMOUNTPT; 411 412 /* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */ 413 vclrflags(vp, VMOUNT); 414 mountlist_insert(mp, MNTINS_LAST); 415 vn_unlock(vp); 416 checkdirs(&mp->mnt_ncmounton, &mp->mnt_ncmountpt); 417 error = vfs_allocate_syncvnode(mp); 418 vfs_unbusy(mp); 419 error = VFS_START(mp, 0); 420 vrele(vp); 421 } else { 422 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 423 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 424 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 425 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 426 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 427 vclrflags(vp, VMOUNT); 428 mp->mnt_vfc->vfc_refcount--; 429 vfs_unbusy(mp); 430 kfree(mp, M_MOUNT); 431 cache_drop(&nch); 432 vput(vp); 433 } 434 done: 435 rel_mplock(); 436 return (error); 437 } 438 439 /* 440 * Scan all active processes to see if any of them have a current 441 * or root directory onto which the new filesystem has just been 442 * mounted. If so, replace them with the new mount point. 443 * 444 * The passed ncp is ref'd and locked (from the mount code) and 445 * must be associated with the vnode representing the root of the 446 * mount point. 447 */ 448 struct checkdirs_info { 449 struct nchandle old_nch; 450 struct nchandle new_nch; 451 struct vnode *old_vp; 452 struct vnode *new_vp; 453 }; 454 455 static int checkdirs_callback(struct proc *p, void *data); 456 457 static void 458 checkdirs(struct nchandle *old_nch, struct nchandle *new_nch) 459 { 460 struct checkdirs_info info; 461 struct vnode *olddp; 462 struct vnode *newdp; 463 struct mount *mp; 464 465 /* 466 * If the old mount point's vnode has a usecount of 1, it is not 467 * being held as a descriptor anywhere. 468 */ 469 olddp = old_nch->ncp->nc_vp; 470 if (olddp == NULL || olddp->v_sysref.refcnt == 1) 471 return; 472 473 /* 474 * Force the root vnode of the new mount point to be resolved 475 * so we can update any matching processes. 476 */ 477 mp = new_nch->mount; 478 if (VFS_ROOT(mp, &newdp)) 479 panic("mount: lost mount"); 480 cache_setunresolved(new_nch); 481 cache_setvp(new_nch, newdp); 482 483 /* 484 * Special handling of the root node 485 */ 486 if (rootvnode == olddp) { 487 vref(newdp); 488 vfs_cache_setroot(newdp, cache_hold(new_nch)); 489 } 490 491 /* 492 * Pass newdp separately so the callback does not have to access 493 * it via new_nch->ncp->nc_vp. 494 */ 495 info.old_nch = *old_nch; 496 info.new_nch = *new_nch; 497 info.new_vp = newdp; 498 allproc_scan(checkdirs_callback, &info); 499 vput(newdp); 500 } 501 502 /* 503 * NOTE: callback is not MP safe because the scanned process's filedesc 504 * structure can be ripped out from under us, amoung other things. 505 */ 506 static int 507 checkdirs_callback(struct proc *p, void *data) 508 { 509 struct checkdirs_info *info = data; 510 struct filedesc *fdp; 511 struct nchandle ncdrop1; 512 struct nchandle ncdrop2; 513 struct vnode *vprele1; 514 struct vnode *vprele2; 515 516 if ((fdp = p->p_fd) != NULL) { 517 cache_zero(&ncdrop1); 518 cache_zero(&ncdrop2); 519 vprele1 = NULL; 520 vprele2 = NULL; 521 522 /* 523 * MPUNSAFE - XXX fdp can be pulled out from under a 524 * foreign process. 525 * 526 * A shared filedesc is ok, we don't have to copy it 527 * because we are making this change globally. 528 */ 529 spin_lock(&fdp->fd_spin); 530 if (fdp->fd_ncdir.mount == info->old_nch.mount && 531 fdp->fd_ncdir.ncp == info->old_nch.ncp) { 532 vprele1 = fdp->fd_cdir; 533 vref(info->new_vp); 534 fdp->fd_cdir = info->new_vp; 535 ncdrop1 = fdp->fd_ncdir; 536 cache_copy(&info->new_nch, &fdp->fd_ncdir); 537 } 538 if (fdp->fd_nrdir.mount == info->old_nch.mount && 539 fdp->fd_nrdir.ncp == info->old_nch.ncp) { 540 vprele2 = fdp->fd_rdir; 541 vref(info->new_vp); 542 fdp->fd_rdir = info->new_vp; 543 ncdrop2 = fdp->fd_nrdir; 544 cache_copy(&info->new_nch, &fdp->fd_nrdir); 545 } 546 spin_unlock(&fdp->fd_spin); 547 if (ncdrop1.ncp) 548 cache_drop(&ncdrop1); 549 if (ncdrop2.ncp) 550 cache_drop(&ncdrop2); 551 if (vprele1) 552 vrele(vprele1); 553 if (vprele2) 554 vrele(vprele2); 555 } 556 return(0); 557 } 558 559 /* 560 * Unmount a file system. 561 * 562 * Note: unmount takes a path to the vnode mounted on as argument, 563 * not special file (as before). 564 * 565 * umount_args(char *path, int flags) 566 * 567 * MPALMOSTSAFE 568 */ 569 int 570 sys_unmount(struct unmount_args *uap) 571 { 572 struct thread *td = curthread; 573 struct proc *p __debugvar = td->td_proc; 574 struct mount *mp = NULL; 575 struct nlookupdata nd; 576 int error; 577 578 KKASSERT(p); 579 get_mplock(); 580 if (td->td_ucred->cr_prison != NULL) { 581 error = EPERM; 582 goto done; 583 } 584 if (usermount == 0 && (error = priv_check(td, PRIV_ROOT))) 585 goto done; 586 587 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 588 if (error == 0) 589 error = nlookup(&nd); 590 if (error) 591 goto out; 592 593 mp = nd.nl_nch.mount; 594 595 /* 596 * Only root, or the user that did the original mount is 597 * permitted to unmount this filesystem. 598 */ 599 if ((mp->mnt_stat.f_owner != td->td_ucred->cr_uid) && 600 (error = priv_check(td, PRIV_ROOT))) 601 goto out; 602 603 /* 604 * Don't allow unmounting the root file system. 605 */ 606 if (mp->mnt_flag & MNT_ROOTFS) { 607 error = EINVAL; 608 goto out; 609 } 610 611 /* 612 * Must be the root of the filesystem 613 */ 614 if (nd.nl_nch.ncp != mp->mnt_ncmountpt.ncp) { 615 error = EINVAL; 616 goto out; 617 } 618 619 out: 620 nlookup_done(&nd); 621 if (error == 0) 622 error = dounmount(mp, uap->flags); 623 done: 624 rel_mplock(); 625 return (error); 626 } 627 628 /* 629 * Do the actual file system unmount. 630 */ 631 static int 632 dounmount_interlock(struct mount *mp) 633 { 634 if (mp->mnt_kern_flag & MNTK_UNMOUNT) 635 return (EBUSY); 636 mp->mnt_kern_flag |= MNTK_UNMOUNT; 637 return(0); 638 } 639 640 static int 641 unmount_allproc_cb(struct proc *p, void *arg) 642 { 643 struct mount *mp; 644 645 if (p->p_textnch.ncp == NULL) 646 return 0; 647 648 mp = (struct mount *)arg; 649 if (p->p_textnch.mount == mp) 650 cache_drop(&p->p_textnch); 651 652 return 0; 653 } 654 655 int 656 dounmount(struct mount *mp, int flags) 657 { 658 struct namecache *ncp; 659 struct nchandle nch; 660 struct vnode *vp; 661 int error; 662 int async_flag; 663 int lflags; 664 int freeok = 1; 665 666 /* 667 * Exclusive access for unmounting purposes 668 */ 669 if ((error = mountlist_interlock(dounmount_interlock, mp)) != 0) 670 return (error); 671 672 /* 673 * Allow filesystems to detect that a forced unmount is in progress. 674 */ 675 if (flags & MNT_FORCE) 676 mp->mnt_kern_flag |= MNTK_UNMOUNTF; 677 lflags = LK_EXCLUSIVE | ((flags & MNT_FORCE) ? 0 : LK_NOWAIT); 678 error = lockmgr(&mp->mnt_lock, lflags); 679 if (error) { 680 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 681 if (mp->mnt_kern_flag & MNTK_MWAIT) 682 wakeup(mp); 683 return (error); 684 } 685 686 if (mp->mnt_flag & MNT_EXPUBLIC) 687 vfs_setpublicfs(NULL, NULL, NULL); 688 689 vfs_msync(mp, MNT_WAIT); 690 async_flag = mp->mnt_flag & MNT_ASYNC; 691 mp->mnt_flag &=~ MNT_ASYNC; 692 693 /* 694 * If this filesystem isn't aliasing other filesystems, 695 * try to invalidate any remaining namecache entries and 696 * check the count afterwords. 697 */ 698 if ((mp->mnt_kern_flag & MNTK_NCALIASED) == 0) { 699 cache_lock(&mp->mnt_ncmountpt); 700 cache_inval(&mp->mnt_ncmountpt, CINV_DESTROY|CINV_CHILDREN); 701 cache_unlock(&mp->mnt_ncmountpt); 702 703 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL && 704 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) { 705 allproc_scan(&unmount_allproc_cb, mp); 706 } 707 708 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL && 709 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) { 710 711 if ((flags & MNT_FORCE) == 0) { 712 error = EBUSY; 713 mount_warning(mp, "Cannot unmount: " 714 "%d namecache " 715 "references still " 716 "present", 717 ncp->nc_refs - 1); 718 } else { 719 mount_warning(mp, "Forced unmount: " 720 "%d namecache " 721 "references still " 722 "present", 723 ncp->nc_refs - 1); 724 freeok = 0; 725 } 726 } 727 } 728 729 /* 730 * nchandle records ref the mount structure. Expect a count of 1 731 * (our mount->mnt_ncmountpt). 732 */ 733 if (mp->mnt_refs != 1) { 734 if ((flags & MNT_FORCE) == 0) { 735 mount_warning(mp, "Cannot unmount: " 736 "%d process references still " 737 "present", mp->mnt_refs); 738 error = EBUSY; 739 } else { 740 mount_warning(mp, "Forced unmount: " 741 "%d process references still " 742 "present", mp->mnt_refs); 743 freeok = 0; 744 } 745 } 746 747 /* 748 * Decomission our special mnt_syncer vnode. This also stops 749 * the vnlru code. If we are unable to unmount we recommission 750 * the vnode. 751 */ 752 if (error == 0) { 753 if ((vp = mp->mnt_syncer) != NULL) { 754 mp->mnt_syncer = NULL; 755 vrele(vp); 756 } 757 if (((mp->mnt_flag & MNT_RDONLY) || 758 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) || 759 (flags & MNT_FORCE)) { 760 error = VFS_UNMOUNT(mp, flags); 761 } 762 } 763 if (error) { 764 if (mp->mnt_syncer == NULL) 765 vfs_allocate_syncvnode(mp); 766 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 767 mp->mnt_flag |= async_flag; 768 lockmgr(&mp->mnt_lock, LK_RELEASE); 769 if (mp->mnt_kern_flag & MNTK_MWAIT) 770 wakeup(mp); 771 return (error); 772 } 773 /* 774 * Clean up any journals still associated with the mount after 775 * filesystem activity has ceased. 776 */ 777 journal_remove_all_journals(mp, 778 ((flags & MNT_FORCE) ? MC_JOURNAL_STOP_IMM : 0)); 779 780 mountlist_remove(mp); 781 782 /* 783 * Remove any installed vnode ops here so the individual VFSs don't 784 * have to. 785 */ 786 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops); 787 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops); 788 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops); 789 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops); 790 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops); 791 792 if (mp->mnt_ncmountpt.ncp != NULL) { 793 nch = mp->mnt_ncmountpt; 794 cache_zero(&mp->mnt_ncmountpt); 795 cache_clrmountpt(&nch); 796 cache_drop(&nch); 797 } 798 if (mp->mnt_ncmounton.ncp != NULL) { 799 nch = mp->mnt_ncmounton; 800 cache_zero(&mp->mnt_ncmounton); 801 cache_clrmountpt(&nch); 802 cache_drop(&nch); 803 } 804 805 mp->mnt_vfc->vfc_refcount--; 806 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) 807 panic("unmount: dangling vnode"); 808 lockmgr(&mp->mnt_lock, LK_RELEASE); 809 if (mp->mnt_kern_flag & MNTK_MWAIT) 810 wakeup(mp); 811 if (freeok) 812 kfree(mp, M_MOUNT); 813 return (0); 814 } 815 816 static 817 void 818 mount_warning(struct mount *mp, const char *ctl, ...) 819 { 820 char *ptr; 821 char *buf; 822 __va_list va; 823 824 __va_start(va, ctl); 825 if (cache_fullpath(NULL, &mp->mnt_ncmounton, &ptr, &buf, 0) == 0) { 826 kprintf("unmount(%s): ", ptr); 827 kvprintf(ctl, va); 828 kprintf("\n"); 829 kfree(buf, M_TEMP); 830 } else { 831 kprintf("unmount(%p", mp); 832 if (mp->mnt_ncmounton.ncp && mp->mnt_ncmounton.ncp->nc_name) 833 kprintf(",%s", mp->mnt_ncmounton.ncp->nc_name); 834 kprintf("): "); 835 kvprintf(ctl, va); 836 kprintf("\n"); 837 } 838 __va_end(va); 839 } 840 841 /* 842 * Shim cache_fullpath() to handle the case where a process is chrooted into 843 * a subdirectory of a mount. In this case if the root mount matches the 844 * process root directory's mount we have to specify the process's root 845 * directory instead of the mount point, because the mount point might 846 * be above the root directory. 847 */ 848 static 849 int 850 mount_path(struct proc *p, struct mount *mp, char **rb, char **fb) 851 { 852 struct nchandle *nch; 853 854 if (p && p->p_fd->fd_nrdir.mount == mp) 855 nch = &p->p_fd->fd_nrdir; 856 else 857 nch = &mp->mnt_ncmountpt; 858 return(cache_fullpath(p, nch, rb, fb, 0)); 859 } 860 861 /* 862 * Sync each mounted filesystem. 863 */ 864 865 #ifdef DEBUG 866 static int syncprt = 0; 867 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, ""); 868 #endif /* DEBUG */ 869 870 static int sync_callback(struct mount *mp, void *data); 871 872 int 873 sys_sync(struct sync_args *uap) 874 { 875 mountlist_scan(sync_callback, NULL, MNTSCAN_FORWARD); 876 #ifdef DEBUG 877 /* 878 * print out buffer pool stat information on each sync() call. 879 */ 880 if (syncprt) 881 vfs_bufstats(); 882 #endif /* DEBUG */ 883 return (0); 884 } 885 886 static 887 int 888 sync_callback(struct mount *mp, void *data __unused) 889 { 890 int asyncflag; 891 892 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 893 asyncflag = mp->mnt_flag & MNT_ASYNC; 894 mp->mnt_flag &= ~MNT_ASYNC; 895 vfs_msync(mp, MNT_NOWAIT); 896 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY); 897 mp->mnt_flag |= asyncflag; 898 } 899 return(0); 900 } 901 902 /* XXX PRISON: could be per prison flag */ 903 static int prison_quotas; 904 #if 0 905 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, ""); 906 #endif 907 908 /* 909 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg) 910 * 911 * Change filesystem quotas. 912 * 913 * MPALMOSTSAFE 914 */ 915 int 916 sys_quotactl(struct quotactl_args *uap) 917 { 918 struct nlookupdata nd; 919 struct thread *td; 920 struct mount *mp; 921 int error; 922 923 get_mplock(); 924 td = curthread; 925 if (td->td_ucred->cr_prison && !prison_quotas) { 926 error = EPERM; 927 goto done; 928 } 929 930 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 931 if (error == 0) 932 error = nlookup(&nd); 933 if (error == 0) { 934 mp = nd.nl_nch.mount; 935 error = VFS_QUOTACTL(mp, uap->cmd, uap->uid, 936 uap->arg, nd.nl_cred); 937 } 938 nlookup_done(&nd); 939 done: 940 rel_mplock(); 941 return (error); 942 } 943 944 /* 945 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen, 946 * void *buf, int buflen) 947 * 948 * This function operates on a mount point and executes the specified 949 * operation using the specified control data, and possibly returns data. 950 * 951 * The actual number of bytes stored in the result buffer is returned, 0 952 * if none, otherwise an error is returned. 953 * 954 * MPALMOSTSAFE 955 */ 956 int 957 sys_mountctl(struct mountctl_args *uap) 958 { 959 struct thread *td = curthread; 960 struct proc *p = td->td_proc; 961 struct file *fp; 962 void *ctl = NULL; 963 void *buf = NULL; 964 char *path = NULL; 965 int error; 966 967 /* 968 * Sanity and permissions checks. We must be root. 969 */ 970 KKASSERT(p); 971 if (td->td_ucred->cr_prison != NULL) 972 return (EPERM); 973 if ((uap->op != MOUNTCTL_MOUNTFLAGS) && 974 (error = priv_check(td, PRIV_ROOT)) != 0) 975 return (error); 976 977 /* 978 * Argument length checks 979 */ 980 if (uap->ctllen < 0 || uap->ctllen > 1024) 981 return (EINVAL); 982 if (uap->buflen < 0 || uap->buflen > 16 * 1024) 983 return (EINVAL); 984 if (uap->path == NULL) 985 return (EINVAL); 986 987 /* 988 * Allocate the necessary buffers and copyin data 989 */ 990 path = objcache_get(namei_oc, M_WAITOK); 991 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 992 if (error) 993 goto done; 994 995 if (uap->ctllen) { 996 ctl = kmalloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO); 997 error = copyin(uap->ctl, ctl, uap->ctllen); 998 if (error) 999 goto done; 1000 } 1001 if (uap->buflen) 1002 buf = kmalloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO); 1003 1004 /* 1005 * Validate the descriptor 1006 */ 1007 if (uap->fd >= 0) { 1008 fp = holdfp(p->p_fd, uap->fd, -1); 1009 if (fp == NULL) { 1010 error = EBADF; 1011 goto done; 1012 } 1013 } else { 1014 fp = NULL; 1015 } 1016 1017 /* 1018 * Execute the internal kernel function and clean up. 1019 */ 1020 get_mplock(); 1021 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result); 1022 rel_mplock(); 1023 if (fp) 1024 fdrop(fp); 1025 if (error == 0 && uap->sysmsg_result > 0) 1026 error = copyout(buf, uap->buf, uap->sysmsg_result); 1027 done: 1028 if (path) 1029 objcache_put(namei_oc, path); 1030 if (ctl) 1031 kfree(ctl, M_TEMP); 1032 if (buf) 1033 kfree(buf, M_TEMP); 1034 return (error); 1035 } 1036 1037 /* 1038 * Execute a mount control operation by resolving the path to a mount point 1039 * and calling vop_mountctl(). 1040 * 1041 * Use the mount point from the nch instead of the vnode so nullfs mounts 1042 * can properly spike the VOP. 1043 */ 1044 int 1045 kern_mountctl(const char *path, int op, struct file *fp, 1046 const void *ctl, int ctllen, 1047 void *buf, int buflen, int *res) 1048 { 1049 struct vnode *vp; 1050 struct mount *mp; 1051 struct nlookupdata nd; 1052 int error; 1053 1054 *res = 0; 1055 vp = NULL; 1056 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW); 1057 if (error == 0) 1058 error = nlookup(&nd); 1059 if (error == 0) 1060 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 1061 mp = nd.nl_nch.mount; 1062 nlookup_done(&nd); 1063 if (error) 1064 return (error); 1065 vn_unlock(vp); 1066 1067 /* 1068 * Must be the root of the filesystem 1069 */ 1070 if ((vp->v_flag & (VROOT|VPFSROOT)) == 0) { 1071 vrele(vp); 1072 return (EINVAL); 1073 } 1074 error = vop_mountctl(mp->mnt_vn_use_ops, vp, op, fp, ctl, ctllen, 1075 buf, buflen, res); 1076 vrele(vp); 1077 return (error); 1078 } 1079 1080 int 1081 kern_statfs(struct nlookupdata *nd, struct statfs *buf) 1082 { 1083 struct thread *td = curthread; 1084 struct proc *p = td->td_proc; 1085 struct mount *mp; 1086 struct statfs *sp; 1087 char *fullpath, *freepath; 1088 int error; 1089 1090 if ((error = nlookup(nd)) != 0) 1091 return (error); 1092 mp = nd->nl_nch.mount; 1093 sp = &mp->mnt_stat; 1094 if ((error = VFS_STATFS(mp, sp, nd->nl_cred)) != 0) 1095 return (error); 1096 1097 error = mount_path(p, mp, &fullpath, &freepath); 1098 if (error) 1099 return(error); 1100 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1101 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1102 kfree(freepath, M_TEMP); 1103 1104 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1105 bcopy(sp, buf, sizeof(*buf)); 1106 /* Only root should have access to the fsid's. */ 1107 if (priv_check(td, PRIV_ROOT)) 1108 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1109 return (0); 1110 } 1111 1112 /* 1113 * statfs_args(char *path, struct statfs *buf) 1114 * 1115 * Get filesystem statistics. 1116 */ 1117 int 1118 sys_statfs(struct statfs_args *uap) 1119 { 1120 struct nlookupdata nd; 1121 struct statfs buf; 1122 int error; 1123 1124 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1125 if (error == 0) 1126 error = kern_statfs(&nd, &buf); 1127 nlookup_done(&nd); 1128 if (error == 0) 1129 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1130 return (error); 1131 } 1132 1133 int 1134 kern_fstatfs(int fd, struct statfs *buf) 1135 { 1136 struct thread *td = curthread; 1137 struct proc *p = td->td_proc; 1138 struct file *fp; 1139 struct mount *mp; 1140 struct statfs *sp; 1141 char *fullpath, *freepath; 1142 int error; 1143 1144 KKASSERT(p); 1145 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1146 return (error); 1147 1148 /* 1149 * Try to use mount info from any overlays rather than the 1150 * mount info for the underlying vnode, otherwise we will 1151 * fail when operating on null-mounted paths inside a chroot. 1152 */ 1153 if ((mp = fp->f_nchandle.mount) == NULL) 1154 mp = ((struct vnode *)fp->f_data)->v_mount; 1155 if (mp == NULL) { 1156 error = EBADF; 1157 goto done; 1158 } 1159 if (fp->f_cred == NULL) { 1160 error = EINVAL; 1161 goto done; 1162 } 1163 sp = &mp->mnt_stat; 1164 if ((error = VFS_STATFS(mp, sp, fp->f_cred)) != 0) 1165 goto done; 1166 1167 if ((error = mount_path(p, mp, &fullpath, &freepath)) != 0) 1168 goto done; 1169 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1170 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1171 kfree(freepath, M_TEMP); 1172 1173 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1174 bcopy(sp, buf, sizeof(*buf)); 1175 1176 /* Only root should have access to the fsid's. */ 1177 if (priv_check(td, PRIV_ROOT)) 1178 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0; 1179 error = 0; 1180 done: 1181 fdrop(fp); 1182 return (error); 1183 } 1184 1185 /* 1186 * fstatfs_args(int fd, struct statfs *buf) 1187 * 1188 * Get filesystem statistics. 1189 */ 1190 int 1191 sys_fstatfs(struct fstatfs_args *uap) 1192 { 1193 struct statfs buf; 1194 int error; 1195 1196 error = kern_fstatfs(uap->fd, &buf); 1197 1198 if (error == 0) 1199 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1200 return (error); 1201 } 1202 1203 int 1204 kern_statvfs(struct nlookupdata *nd, struct statvfs *buf) 1205 { 1206 struct mount *mp; 1207 struct statvfs *sp; 1208 int error; 1209 1210 if ((error = nlookup(nd)) != 0) 1211 return (error); 1212 mp = nd->nl_nch.mount; 1213 sp = &mp->mnt_vstat; 1214 if ((error = VFS_STATVFS(mp, sp, nd->nl_cred)) != 0) 1215 return (error); 1216 1217 sp->f_flag = 0; 1218 if (mp->mnt_flag & MNT_RDONLY) 1219 sp->f_flag |= ST_RDONLY; 1220 if (mp->mnt_flag & MNT_NOSUID) 1221 sp->f_flag |= ST_NOSUID; 1222 bcopy(sp, buf, sizeof(*buf)); 1223 return (0); 1224 } 1225 1226 /* 1227 * statfs_args(char *path, struct statfs *buf) 1228 * 1229 * Get filesystem statistics. 1230 */ 1231 int 1232 sys_statvfs(struct statvfs_args *uap) 1233 { 1234 struct nlookupdata nd; 1235 struct statvfs buf; 1236 int error; 1237 1238 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1239 if (error == 0) 1240 error = kern_statvfs(&nd, &buf); 1241 nlookup_done(&nd); 1242 if (error == 0) 1243 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1244 return (error); 1245 } 1246 1247 int 1248 kern_fstatvfs(int fd, struct statvfs *buf) 1249 { 1250 struct thread *td = curthread; 1251 struct proc *p = td->td_proc; 1252 struct file *fp; 1253 struct mount *mp; 1254 struct statvfs *sp; 1255 int error; 1256 1257 KKASSERT(p); 1258 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 1259 return (error); 1260 if ((mp = fp->f_nchandle.mount) == NULL) 1261 mp = ((struct vnode *)fp->f_data)->v_mount; 1262 if (mp == NULL) { 1263 error = EBADF; 1264 goto done; 1265 } 1266 if (fp->f_cred == NULL) { 1267 error = EINVAL; 1268 goto done; 1269 } 1270 sp = &mp->mnt_vstat; 1271 if ((error = VFS_STATVFS(mp, sp, fp->f_cred)) != 0) 1272 goto done; 1273 1274 sp->f_flag = 0; 1275 if (mp->mnt_flag & MNT_RDONLY) 1276 sp->f_flag |= ST_RDONLY; 1277 if (mp->mnt_flag & MNT_NOSUID) 1278 sp->f_flag |= ST_NOSUID; 1279 1280 bcopy(sp, buf, sizeof(*buf)); 1281 error = 0; 1282 done: 1283 fdrop(fp); 1284 return (error); 1285 } 1286 1287 /* 1288 * fstatfs_args(int fd, struct statfs *buf) 1289 * 1290 * Get filesystem statistics. 1291 */ 1292 int 1293 sys_fstatvfs(struct fstatvfs_args *uap) 1294 { 1295 struct statvfs buf; 1296 int error; 1297 1298 error = kern_fstatvfs(uap->fd, &buf); 1299 1300 if (error == 0) 1301 error = copyout(&buf, uap->buf, sizeof(*uap->buf)); 1302 return (error); 1303 } 1304 1305 /* 1306 * getfsstat_args(struct statfs *buf, long bufsize, int flags) 1307 * 1308 * Get statistics on all filesystems. 1309 */ 1310 1311 struct getfsstat_info { 1312 struct statfs *sfsp; 1313 long count; 1314 long maxcount; 1315 int error; 1316 int flags; 1317 struct thread *td; 1318 }; 1319 1320 static int getfsstat_callback(struct mount *, void *); 1321 1322 int 1323 sys_getfsstat(struct getfsstat_args *uap) 1324 { 1325 struct thread *td = curthread; 1326 struct getfsstat_info info; 1327 1328 bzero(&info, sizeof(info)); 1329 1330 info.maxcount = uap->bufsize / sizeof(struct statfs); 1331 info.sfsp = uap->buf; 1332 info.count = 0; 1333 info.flags = uap->flags; 1334 info.td = td; 1335 1336 mountlist_scan(getfsstat_callback, &info, MNTSCAN_FORWARD); 1337 if (info.sfsp && info.count > info.maxcount) 1338 uap->sysmsg_result = info.maxcount; 1339 else 1340 uap->sysmsg_result = info.count; 1341 return (info.error); 1342 } 1343 1344 static int 1345 getfsstat_callback(struct mount *mp, void *data) 1346 { 1347 struct getfsstat_info *info = data; 1348 struct statfs *sp; 1349 char *freepath; 1350 char *fullpath; 1351 int error; 1352 1353 if (info->sfsp && info->count < info->maxcount) { 1354 if (info->td->td_proc && 1355 !chroot_visible_mnt(mp, info->td->td_proc)) { 1356 return(0); 1357 } 1358 sp = &mp->mnt_stat; 1359 1360 /* 1361 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1362 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1363 * overrides MNT_WAIT. 1364 */ 1365 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1366 (info->flags & MNT_WAIT)) && 1367 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1368 return(0); 1369 } 1370 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1371 1372 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1373 if (error) { 1374 info->error = error; 1375 return(-1); 1376 } 1377 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1378 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1379 kfree(freepath, M_TEMP); 1380 1381 error = copyout(sp, info->sfsp, sizeof(*sp)); 1382 if (error) { 1383 info->error = error; 1384 return (-1); 1385 } 1386 ++info->sfsp; 1387 } 1388 info->count++; 1389 return(0); 1390 } 1391 1392 /* 1393 * getvfsstat_args(struct statfs *buf, struct statvfs *vbuf, 1394 long bufsize, int flags) 1395 * 1396 * Get statistics on all filesystems. 1397 */ 1398 1399 struct getvfsstat_info { 1400 struct statfs *sfsp; 1401 struct statvfs *vsfsp; 1402 long count; 1403 long maxcount; 1404 int error; 1405 int flags; 1406 struct thread *td; 1407 }; 1408 1409 static int getvfsstat_callback(struct mount *, void *); 1410 1411 int 1412 sys_getvfsstat(struct getvfsstat_args *uap) 1413 { 1414 struct thread *td = curthread; 1415 struct getvfsstat_info info; 1416 1417 bzero(&info, sizeof(info)); 1418 1419 info.maxcount = uap->vbufsize / sizeof(struct statvfs); 1420 info.sfsp = uap->buf; 1421 info.vsfsp = uap->vbuf; 1422 info.count = 0; 1423 info.flags = uap->flags; 1424 info.td = td; 1425 1426 mountlist_scan(getvfsstat_callback, &info, MNTSCAN_FORWARD); 1427 if (info.vsfsp && info.count > info.maxcount) 1428 uap->sysmsg_result = info.maxcount; 1429 else 1430 uap->sysmsg_result = info.count; 1431 return (info.error); 1432 } 1433 1434 static int 1435 getvfsstat_callback(struct mount *mp, void *data) 1436 { 1437 struct getvfsstat_info *info = data; 1438 struct statfs *sp; 1439 struct statvfs *vsp; 1440 char *freepath; 1441 char *fullpath; 1442 int error; 1443 1444 if (info->vsfsp && info->count < info->maxcount) { 1445 if (info->td->td_proc && 1446 !chroot_visible_mnt(mp, info->td->td_proc)) { 1447 return(0); 1448 } 1449 sp = &mp->mnt_stat; 1450 vsp = &mp->mnt_vstat; 1451 1452 /* 1453 * If MNT_NOWAIT or MNT_LAZY is specified, do not 1454 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 1455 * overrides MNT_WAIT. 1456 */ 1457 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1458 (info->flags & MNT_WAIT)) && 1459 (error = VFS_STATFS(mp, sp, info->td->td_ucred))) { 1460 return(0); 1461 } 1462 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 1463 1464 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 || 1465 (info->flags & MNT_WAIT)) && 1466 (error = VFS_STATVFS(mp, vsp, info->td->td_ucred))) { 1467 return(0); 1468 } 1469 vsp->f_flag = 0; 1470 if (mp->mnt_flag & MNT_RDONLY) 1471 vsp->f_flag |= ST_RDONLY; 1472 if (mp->mnt_flag & MNT_NOSUID) 1473 vsp->f_flag |= ST_NOSUID; 1474 1475 error = mount_path(info->td->td_proc, mp, &fullpath, &freepath); 1476 if (error) { 1477 info->error = error; 1478 return(-1); 1479 } 1480 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 1481 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 1482 kfree(freepath, M_TEMP); 1483 1484 error = copyout(sp, info->sfsp, sizeof(*sp)); 1485 if (error == 0) 1486 error = copyout(vsp, info->vsfsp, sizeof(*vsp)); 1487 if (error) { 1488 info->error = error; 1489 return (-1); 1490 } 1491 ++info->sfsp; 1492 ++info->vsfsp; 1493 } 1494 info->count++; 1495 return(0); 1496 } 1497 1498 1499 /* 1500 * fchdir_args(int fd) 1501 * 1502 * Change current working directory to a given file descriptor. 1503 */ 1504 int 1505 sys_fchdir(struct fchdir_args *uap) 1506 { 1507 struct thread *td = curthread; 1508 struct proc *p = td->td_proc; 1509 struct filedesc *fdp = p->p_fd; 1510 struct vnode *vp, *ovp; 1511 struct mount *mp; 1512 struct file *fp; 1513 struct nchandle nch, onch, tnch; 1514 int error; 1515 1516 if ((error = holdvnode(fdp, uap->fd, &fp)) != 0) 1517 return (error); 1518 lwkt_gettoken(&p->p_token); 1519 vp = (struct vnode *)fp->f_data; 1520 vref(vp); 1521 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1522 if (fp->f_nchandle.ncp == NULL) 1523 error = ENOTDIR; 1524 else 1525 error = checkvp_chdir(vp, td); 1526 if (error) { 1527 vput(vp); 1528 goto done; 1529 } 1530 cache_copy(&fp->f_nchandle, &nch); 1531 1532 /* 1533 * If the ncp has become a mount point, traverse through 1534 * the mount point. 1535 */ 1536 1537 while (!error && (nch.ncp->nc_flag & NCF_ISMOUNTPT) && 1538 (mp = cache_findmount(&nch)) != NULL 1539 ) { 1540 error = nlookup_mp(mp, &tnch); 1541 if (error == 0) { 1542 cache_unlock(&tnch); /* leave ref intact */ 1543 vput(vp); 1544 vp = tnch.ncp->nc_vp; 1545 error = vget(vp, LK_SHARED); 1546 KKASSERT(error == 0); 1547 cache_drop(&nch); 1548 nch = tnch; 1549 } 1550 cache_dropmount(mp); 1551 } 1552 if (error == 0) { 1553 ovp = fdp->fd_cdir; 1554 onch = fdp->fd_ncdir; 1555 vn_unlock(vp); /* leave ref intact */ 1556 fdp->fd_cdir = vp; 1557 fdp->fd_ncdir = nch; 1558 cache_drop(&onch); 1559 vrele(ovp); 1560 } else { 1561 cache_drop(&nch); 1562 vput(vp); 1563 } 1564 fdrop(fp); 1565 done: 1566 lwkt_reltoken(&p->p_token); 1567 return (error); 1568 } 1569 1570 int 1571 kern_chdir(struct nlookupdata *nd) 1572 { 1573 struct thread *td = curthread; 1574 struct proc *p = td->td_proc; 1575 struct filedesc *fdp = p->p_fd; 1576 struct vnode *vp, *ovp; 1577 struct nchandle onch; 1578 int error; 1579 1580 if ((error = nlookup(nd)) != 0) 1581 return (error); 1582 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 1583 return (ENOENT); 1584 if ((error = vget(vp, LK_SHARED)) != 0) 1585 return (error); 1586 1587 lwkt_gettoken(&p->p_token); 1588 error = checkvp_chdir(vp, td); 1589 vn_unlock(vp); 1590 if (error == 0) { 1591 ovp = fdp->fd_cdir; 1592 onch = fdp->fd_ncdir; 1593 cache_unlock(&nd->nl_nch); /* leave reference intact */ 1594 fdp->fd_ncdir = nd->nl_nch; 1595 fdp->fd_cdir = vp; 1596 cache_drop(&onch); 1597 vrele(ovp); 1598 cache_zero(&nd->nl_nch); 1599 } else { 1600 vrele(vp); 1601 } 1602 lwkt_reltoken(&p->p_token); 1603 return (error); 1604 } 1605 1606 /* 1607 * chdir_args(char *path) 1608 * 1609 * Change current working directory (``.''). 1610 */ 1611 int 1612 sys_chdir(struct chdir_args *uap) 1613 { 1614 struct nlookupdata nd; 1615 int error; 1616 1617 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1618 if (error == 0) 1619 error = kern_chdir(&nd); 1620 nlookup_done(&nd); 1621 return (error); 1622 } 1623 1624 /* 1625 * Helper function for raised chroot(2) security function: Refuse if 1626 * any filedescriptors are open directories. 1627 */ 1628 static int 1629 chroot_refuse_vdir_fds(struct filedesc *fdp) 1630 { 1631 struct vnode *vp; 1632 struct file *fp; 1633 int error; 1634 int fd; 1635 1636 for (fd = 0; fd < fdp->fd_nfiles ; fd++) { 1637 if ((error = holdvnode(fdp, fd, &fp)) != 0) 1638 continue; 1639 vp = (struct vnode *)fp->f_data; 1640 if (vp->v_type != VDIR) { 1641 fdrop(fp); 1642 continue; 1643 } 1644 fdrop(fp); 1645 return(EPERM); 1646 } 1647 return (0); 1648 } 1649 1650 /* 1651 * This sysctl determines if we will allow a process to chroot(2) if it 1652 * has a directory open: 1653 * 0: disallowed for all processes. 1654 * 1: allowed for processes that were not already chroot(2)'ed. 1655 * 2: allowed for all processes. 1656 */ 1657 1658 static int chroot_allow_open_directories = 1; 1659 1660 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW, 1661 &chroot_allow_open_directories, 0, ""); 1662 1663 /* 1664 * chroot to the specified namecache entry. We obtain the vp from the 1665 * namecache data. The passed ncp must be locked and referenced and will 1666 * remain locked and referenced on return. 1667 */ 1668 int 1669 kern_chroot(struct nchandle *nch) 1670 { 1671 struct thread *td = curthread; 1672 struct proc *p = td->td_proc; 1673 struct filedesc *fdp = p->p_fd; 1674 struct vnode *vp; 1675 int error; 1676 1677 /* 1678 * Only privileged user can chroot 1679 */ 1680 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1681 if (error) 1682 return (error); 1683 1684 /* 1685 * Disallow open directory descriptors (fchdir() breakouts). 1686 */ 1687 if (chroot_allow_open_directories == 0 || 1688 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) { 1689 if ((error = chroot_refuse_vdir_fds(fdp)) != 0) 1690 return (error); 1691 } 1692 if ((vp = nch->ncp->nc_vp) == NULL) 1693 return (ENOENT); 1694 1695 if ((error = vget(vp, LK_SHARED)) != 0) 1696 return (error); 1697 1698 /* 1699 * Check the validity of vp as a directory to change to and 1700 * associate it with rdir/jdir. 1701 */ 1702 error = checkvp_chdir(vp, td); 1703 vn_unlock(vp); /* leave reference intact */ 1704 if (error == 0) { 1705 vrele(fdp->fd_rdir); 1706 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */ 1707 cache_drop(&fdp->fd_nrdir); 1708 cache_copy(nch, &fdp->fd_nrdir); 1709 if (fdp->fd_jdir == NULL) { 1710 fdp->fd_jdir = vp; 1711 vref(fdp->fd_jdir); 1712 cache_copy(nch, &fdp->fd_njdir); 1713 } 1714 } else { 1715 vrele(vp); 1716 } 1717 return (error); 1718 } 1719 1720 /* 1721 * chroot_args(char *path) 1722 * 1723 * Change notion of root (``/'') directory. 1724 */ 1725 int 1726 sys_chroot(struct chroot_args *uap) 1727 { 1728 struct thread *td __debugvar = curthread; 1729 struct nlookupdata nd; 1730 int error; 1731 1732 KKASSERT(td->td_proc); 1733 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1734 if (error == 0) { 1735 nd.nl_flags |= NLC_EXEC; 1736 error = nlookup(&nd); 1737 if (error == 0) 1738 error = kern_chroot(&nd.nl_nch); 1739 } 1740 nlookup_done(&nd); 1741 return(error); 1742 } 1743 1744 int 1745 sys_chroot_kernel(struct chroot_kernel_args *uap) 1746 { 1747 struct thread *td = curthread; 1748 struct nlookupdata nd; 1749 struct nchandle *nch; 1750 struct vnode *vp; 1751 int error; 1752 1753 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 1754 if (error) 1755 goto error_nond; 1756 1757 error = nlookup(&nd); 1758 if (error) 1759 goto error_out; 1760 1761 nch = &nd.nl_nch; 1762 1763 error = priv_check_cred(td->td_ucred, PRIV_VFS_CHROOT, 0); 1764 if (error) 1765 goto error_out; 1766 1767 if ((vp = nch->ncp->nc_vp) == NULL) { 1768 error = ENOENT; 1769 goto error_out; 1770 } 1771 1772 if ((error = cache_vref(nch, nd.nl_cred, &vp)) != 0) 1773 goto error_out; 1774 1775 kprintf("chroot_kernel: set new rootnch/rootvnode to %s\n", uap->path); 1776 get_mplock(); 1777 vfs_cache_setroot(vp, cache_hold(nch)); 1778 rel_mplock(); 1779 1780 error_out: 1781 nlookup_done(&nd); 1782 error_nond: 1783 return(error); 1784 } 1785 1786 /* 1787 * Common routine for chroot and chdir. Given a locked, referenced vnode, 1788 * determine whether it is legal to chdir to the vnode. The vnode's state 1789 * is not changed by this call. 1790 */ 1791 int 1792 checkvp_chdir(struct vnode *vp, struct thread *td) 1793 { 1794 int error; 1795 1796 if (vp->v_type != VDIR) 1797 error = ENOTDIR; 1798 else 1799 error = VOP_EACCESS(vp, VEXEC, td->td_ucred); 1800 return (error); 1801 } 1802 1803 int 1804 kern_open(struct nlookupdata *nd, int oflags, int mode, int *res) 1805 { 1806 struct thread *td = curthread; 1807 struct proc *p = td->td_proc; 1808 struct lwp *lp = td->td_lwp; 1809 struct filedesc *fdp = p->p_fd; 1810 int cmode, flags; 1811 struct file *nfp; 1812 struct file *fp; 1813 struct vnode *vp; 1814 int type, indx, error = 0; 1815 struct flock lf; 1816 1817 if ((oflags & O_ACCMODE) == O_ACCMODE) 1818 return (EINVAL); 1819 flags = FFLAGS(oflags); 1820 error = falloc(lp, &nfp, NULL); 1821 if (error) 1822 return (error); 1823 fp = nfp; 1824 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; 1825 1826 /* 1827 * XXX p_dupfd is a real mess. It allows a device to return a 1828 * file descriptor to be duplicated rather then doing the open 1829 * itself. 1830 */ 1831 lp->lwp_dupfd = -1; 1832 1833 /* 1834 * Call vn_open() to do the lookup and assign the vnode to the 1835 * file pointer. vn_open() does not change the ref count on fp 1836 * and the vnode, on success, will be inherited by the file pointer 1837 * and unlocked. 1838 */ 1839 nd->nl_flags |= NLC_LOCKVP; 1840 error = vn_open(nd, fp, flags, cmode); 1841 nlookup_done(nd); 1842 if (error) { 1843 /* 1844 * handle special fdopen() case. bleh. dupfdopen() is 1845 * responsible for dropping the old contents of ofiles[indx] 1846 * if it succeeds. 1847 * 1848 * Note that fsetfd() will add a ref to fp which represents 1849 * the fd_files[] assignment. We must still drop our 1850 * reference. 1851 */ 1852 if ((error == ENODEV || error == ENXIO) && lp->lwp_dupfd >= 0) { 1853 if (fdalloc(p, 0, &indx) == 0) { 1854 error = dupfdopen(fdp, indx, lp->lwp_dupfd, flags, error); 1855 if (error == 0) { 1856 *res = indx; 1857 fdrop(fp); /* our ref */ 1858 return (0); 1859 } 1860 fsetfd(fdp, NULL, indx); 1861 } 1862 } 1863 fdrop(fp); /* our ref */ 1864 if (error == ERESTART) 1865 error = EINTR; 1866 return (error); 1867 } 1868 1869 /* 1870 * ref the vnode for ourselves so it can't be ripped out from under 1871 * is. XXX need an ND flag to request that the vnode be returned 1872 * anyway. 1873 * 1874 * Reserve a file descriptor but do not assign it until the open 1875 * succeeds. 1876 */ 1877 vp = (struct vnode *)fp->f_data; 1878 vref(vp); 1879 if ((error = fdalloc(p, 0, &indx)) != 0) { 1880 fdrop(fp); 1881 vrele(vp); 1882 return (error); 1883 } 1884 1885 /* 1886 * If no error occurs the vp will have been assigned to the file 1887 * pointer. 1888 */ 1889 lp->lwp_dupfd = 0; 1890 1891 if (flags & (O_EXLOCK | O_SHLOCK)) { 1892 lf.l_whence = SEEK_SET; 1893 lf.l_start = 0; 1894 lf.l_len = 0; 1895 if (flags & O_EXLOCK) 1896 lf.l_type = F_WRLCK; 1897 else 1898 lf.l_type = F_RDLCK; 1899 if (flags & FNONBLOCK) 1900 type = 0; 1901 else 1902 type = F_WAIT; 1903 1904 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 1905 /* 1906 * lock request failed. Clean up the reserved 1907 * descriptor. 1908 */ 1909 vrele(vp); 1910 fsetfd(fdp, NULL, indx); 1911 fdrop(fp); 1912 return (error); 1913 } 1914 fp->f_flag |= FHASLOCK; 1915 } 1916 #if 0 1917 /* 1918 * Assert that all regular file vnodes were created with a object. 1919 */ 1920 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 1921 ("open: regular file has no backing object after vn_open")); 1922 #endif 1923 1924 vrele(vp); 1925 1926 /* 1927 * release our private reference, leaving the one associated with the 1928 * descriptor table intact. 1929 */ 1930 fsetfd(fdp, fp, indx); 1931 fdrop(fp); 1932 *res = indx; 1933 if (oflags & O_CLOEXEC) 1934 error = fsetfdflags(fdp, *res, UF_EXCLOSE); 1935 return (error); 1936 } 1937 1938 /* 1939 * open_args(char *path, int flags, int mode) 1940 * 1941 * Check permissions, allocate an open file structure, 1942 * and call the device open routine if any. 1943 */ 1944 int 1945 sys_open(struct open_args *uap) 1946 { 1947 struct nlookupdata nd; 1948 int error; 1949 1950 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 1951 if (error == 0) { 1952 error = kern_open(&nd, uap->flags, 1953 uap->mode, &uap->sysmsg_result); 1954 } 1955 nlookup_done(&nd); 1956 return (error); 1957 } 1958 1959 /* 1960 * openat_args(int fd, char *path, int flags, int mode) 1961 */ 1962 int 1963 sys_openat(struct openat_args *uap) 1964 { 1965 struct nlookupdata nd; 1966 int error; 1967 struct file *fp; 1968 1969 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 1970 if (error == 0) { 1971 error = kern_open(&nd, uap->flags, uap->mode, 1972 &uap->sysmsg_result); 1973 } 1974 nlookup_done_at(&nd, fp); 1975 return (error); 1976 } 1977 1978 int 1979 kern_mknod(struct nlookupdata *nd, int mode, int rmajor, int rminor) 1980 { 1981 struct thread *td = curthread; 1982 struct proc *p = td->td_proc; 1983 struct vnode *vp; 1984 struct vattr vattr; 1985 int error; 1986 int whiteout = 0; 1987 1988 KKASSERT(p); 1989 1990 VATTR_NULL(&vattr); 1991 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 1992 vattr.va_rmajor = rmajor; 1993 vattr.va_rminor = rminor; 1994 1995 switch (mode & S_IFMT) { 1996 case S_IFMT: /* used by badsect to flag bad sectors */ 1997 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_BAD, 0); 1998 vattr.va_type = VBAD; 1999 break; 2000 case S_IFCHR: 2001 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2002 vattr.va_type = VCHR; 2003 break; 2004 case S_IFBLK: 2005 error = priv_check(td, PRIV_VFS_MKNOD_DEV); 2006 vattr.va_type = VBLK; 2007 break; 2008 case S_IFWHT: 2009 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_WHT, 0); 2010 whiteout = 1; 2011 break; 2012 case S_IFDIR: /* special directories support for HAMMER */ 2013 error = priv_check_cred(td->td_ucred, PRIV_VFS_MKNOD_DIR, 0); 2014 vattr.va_type = VDIR; 2015 break; 2016 default: 2017 error = EINVAL; 2018 break; 2019 } 2020 2021 if (error) 2022 return (error); 2023 2024 bwillinode(1); 2025 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2026 if ((error = nlookup(nd)) != 0) 2027 return (error); 2028 if (nd->nl_nch.ncp->nc_vp) 2029 return (EEXIST); 2030 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2031 return (error); 2032 2033 if (whiteout) { 2034 error = VOP_NWHITEOUT(&nd->nl_nch, nd->nl_dvp, 2035 nd->nl_cred, NAMEI_CREATE); 2036 } else { 2037 vp = NULL; 2038 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, 2039 &vp, nd->nl_cred, &vattr); 2040 if (error == 0) 2041 vput(vp); 2042 } 2043 return (error); 2044 } 2045 2046 /* 2047 * mknod_args(char *path, int mode, int dev) 2048 * 2049 * Create a special file. 2050 */ 2051 int 2052 sys_mknod(struct mknod_args *uap) 2053 { 2054 struct nlookupdata nd; 2055 int error; 2056 2057 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2058 if (error == 0) { 2059 error = kern_mknod(&nd, uap->mode, 2060 umajor(uap->dev), uminor(uap->dev)); 2061 } 2062 nlookup_done(&nd); 2063 return (error); 2064 } 2065 2066 /* 2067 * mknodat_args(int fd, char *path, mode_t mode, dev_t dev) 2068 * 2069 * Create a special file. The path is relative to the directory associated 2070 * with fd. 2071 */ 2072 int 2073 sys_mknodat(struct mknodat_args *uap) 2074 { 2075 struct nlookupdata nd; 2076 struct file *fp; 2077 int error; 2078 2079 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2080 if (error == 0) { 2081 error = kern_mknod(&nd, uap->mode, 2082 umajor(uap->dev), uminor(uap->dev)); 2083 } 2084 nlookup_done_at(&nd, fp); 2085 return (error); 2086 } 2087 2088 int 2089 kern_mkfifo(struct nlookupdata *nd, int mode) 2090 { 2091 struct thread *td = curthread; 2092 struct proc *p = td->td_proc; 2093 struct vattr vattr; 2094 struct vnode *vp; 2095 int error; 2096 2097 bwillinode(1); 2098 2099 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2100 if ((error = nlookup(nd)) != 0) 2101 return (error); 2102 if (nd->nl_nch.ncp->nc_vp) 2103 return (EEXIST); 2104 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2105 return (error); 2106 2107 VATTR_NULL(&vattr); 2108 vattr.va_type = VFIFO; 2109 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask; 2110 vp = NULL; 2111 error = VOP_NMKNOD(&nd->nl_nch, nd->nl_dvp, &vp, nd->nl_cred, &vattr); 2112 if (error == 0) 2113 vput(vp); 2114 return (error); 2115 } 2116 2117 /* 2118 * mkfifo_args(char *path, int mode) 2119 * 2120 * Create a named pipe. 2121 */ 2122 int 2123 sys_mkfifo(struct mkfifo_args *uap) 2124 { 2125 struct nlookupdata nd; 2126 int error; 2127 2128 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2129 if (error == 0) 2130 error = kern_mkfifo(&nd, uap->mode); 2131 nlookup_done(&nd); 2132 return (error); 2133 } 2134 2135 /* 2136 * mkfifoat_args(int fd, char *path, mode_t mode) 2137 * 2138 * Create a named pipe. The path is relative to the directory associated 2139 * with fd. 2140 */ 2141 int 2142 sys_mkfifoat(struct mkfifoat_args *uap) 2143 { 2144 struct nlookupdata nd; 2145 struct file *fp; 2146 int error; 2147 2148 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2149 if (error == 0) 2150 error = kern_mkfifo(&nd, uap->mode); 2151 nlookup_done_at(&nd, fp); 2152 return (error); 2153 } 2154 2155 static int hardlink_check_uid = 0; 2156 SYSCTL_INT(_security, OID_AUTO, hardlink_check_uid, CTLFLAG_RW, 2157 &hardlink_check_uid, 0, 2158 "Unprivileged processes cannot create hard links to files owned by other " 2159 "users"); 2160 static int hardlink_check_gid = 0; 2161 SYSCTL_INT(_security, OID_AUTO, hardlink_check_gid, CTLFLAG_RW, 2162 &hardlink_check_gid, 0, 2163 "Unprivileged processes cannot create hard links to files owned by other " 2164 "groups"); 2165 2166 static int 2167 can_hardlink(struct vnode *vp, struct thread *td, struct ucred *cred) 2168 { 2169 struct vattr va; 2170 int error; 2171 2172 /* 2173 * Shortcut if disabled 2174 */ 2175 if (hardlink_check_uid == 0 && hardlink_check_gid == 0) 2176 return (0); 2177 2178 /* 2179 * Privileged user can always hardlink 2180 */ 2181 if (priv_check_cred(cred, PRIV_VFS_LINK, 0) == 0) 2182 return (0); 2183 2184 /* 2185 * Otherwise only if the originating file is owned by the 2186 * same user or group. Note that any group is allowed if 2187 * the file is owned by the caller. 2188 */ 2189 error = VOP_GETATTR(vp, &va); 2190 if (error != 0) 2191 return (error); 2192 2193 if (hardlink_check_uid) { 2194 if (cred->cr_uid != va.va_uid) 2195 return (EPERM); 2196 } 2197 2198 if (hardlink_check_gid) { 2199 if (cred->cr_uid != va.va_uid && !groupmember(va.va_gid, cred)) 2200 return (EPERM); 2201 } 2202 2203 return (0); 2204 } 2205 2206 int 2207 kern_link(struct nlookupdata *nd, struct nlookupdata *linknd) 2208 { 2209 struct thread *td = curthread; 2210 struct vnode *vp; 2211 int error; 2212 2213 /* 2214 * Lookup the source and obtained a locked vnode. 2215 * 2216 * You may only hardlink a file which you have write permission 2217 * on or which you own. 2218 * 2219 * XXX relookup on vget failure / race ? 2220 */ 2221 bwillinode(1); 2222 nd->nl_flags |= NLC_WRITE | NLC_OWN | NLC_HLINK; 2223 if ((error = nlookup(nd)) != 0) 2224 return (error); 2225 vp = nd->nl_nch.ncp->nc_vp; 2226 KKASSERT(vp != NULL); 2227 if (vp->v_type == VDIR) 2228 return (EPERM); /* POSIX */ 2229 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2230 return (error); 2231 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) 2232 return (error); 2233 2234 /* 2235 * Unlock the source so we can lookup the target without deadlocking 2236 * (XXX vp is locked already, possible other deadlock?). The target 2237 * must not exist. 2238 */ 2239 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED); 2240 nd->nl_flags &= ~NLC_NCPISLOCKED; 2241 cache_unlock(&nd->nl_nch); 2242 vn_unlock(vp); 2243 2244 linknd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2245 if ((error = nlookup(linknd)) != 0) { 2246 vrele(vp); 2247 return (error); 2248 } 2249 if (linknd->nl_nch.ncp->nc_vp) { 2250 vrele(vp); 2251 return (EEXIST); 2252 } 2253 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 2254 vrele(vp); 2255 return (error); 2256 } 2257 2258 /* 2259 * Finally run the new API VOP. 2260 */ 2261 error = can_hardlink(vp, td, td->td_ucred); 2262 if (error == 0) { 2263 error = VOP_NLINK(&linknd->nl_nch, linknd->nl_dvp, 2264 vp, linknd->nl_cred); 2265 } 2266 vput(vp); 2267 return (error); 2268 } 2269 2270 /* 2271 * link_args(char *path, char *link) 2272 * 2273 * Make a hard file link. 2274 */ 2275 int 2276 sys_link(struct link_args *uap) 2277 { 2278 struct nlookupdata nd, linknd; 2279 int error; 2280 2281 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2282 if (error == 0) { 2283 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0); 2284 if (error == 0) 2285 error = kern_link(&nd, &linknd); 2286 nlookup_done(&linknd); 2287 } 2288 nlookup_done(&nd); 2289 return (error); 2290 } 2291 2292 /* 2293 * linkat_args(int fd1, char *path1, int fd2, char *path2, int flags) 2294 * 2295 * Make a hard file link. The path1 argument is relative to the directory 2296 * associated with fd1, and similarly the path2 argument is relative to 2297 * the directory associated with fd2. 2298 */ 2299 int 2300 sys_linkat(struct linkat_args *uap) 2301 { 2302 struct nlookupdata nd, linknd; 2303 struct file *fp1, *fp2; 2304 int error; 2305 2306 error = nlookup_init_at(&nd, &fp1, uap->fd1, uap->path1, UIO_USERSPACE, 2307 (uap->flags & AT_SYMLINK_FOLLOW) ? NLC_FOLLOW : 0); 2308 if (error == 0) { 2309 error = nlookup_init_at(&linknd, &fp2, uap->fd2, 2310 uap->path2, UIO_USERSPACE, 0); 2311 if (error == 0) 2312 error = kern_link(&nd, &linknd); 2313 nlookup_done_at(&linknd, fp2); 2314 } 2315 nlookup_done_at(&nd, fp1); 2316 return (error); 2317 } 2318 2319 int 2320 kern_symlink(struct nlookupdata *nd, char *path, int mode) 2321 { 2322 struct vattr vattr; 2323 struct vnode *vp; 2324 struct vnode *dvp; 2325 int error; 2326 2327 bwillinode(1); 2328 nd->nl_flags |= NLC_CREATE | NLC_REFDVP; 2329 if ((error = nlookup(nd)) != 0) 2330 return (error); 2331 if (nd->nl_nch.ncp->nc_vp) 2332 return (EEXIST); 2333 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2334 return (error); 2335 dvp = nd->nl_dvp; 2336 VATTR_NULL(&vattr); 2337 vattr.va_mode = mode; 2338 error = VOP_NSYMLINK(&nd->nl_nch, dvp, &vp, nd->nl_cred, &vattr, path); 2339 if (error == 0) 2340 vput(vp); 2341 return (error); 2342 } 2343 2344 /* 2345 * symlink(char *path, char *link) 2346 * 2347 * Make a symbolic link. 2348 */ 2349 int 2350 sys_symlink(struct symlink_args *uap) 2351 { 2352 struct thread *td = curthread; 2353 struct nlookupdata nd; 2354 char *path; 2355 int error; 2356 int mode; 2357 2358 path = objcache_get(namei_oc, M_WAITOK); 2359 error = copyinstr(uap->path, path, MAXPATHLEN, NULL); 2360 if (error == 0) { 2361 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0); 2362 if (error == 0) { 2363 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask; 2364 error = kern_symlink(&nd, path, mode); 2365 } 2366 nlookup_done(&nd); 2367 } 2368 objcache_put(namei_oc, path); 2369 return (error); 2370 } 2371 2372 /* 2373 * symlinkat_args(char *path1, int fd, char *path2) 2374 * 2375 * Make a symbolic link. The path2 argument is relative to the directory 2376 * associated with fd. 2377 */ 2378 int 2379 sys_symlinkat(struct symlinkat_args *uap) 2380 { 2381 struct thread *td = curthread; 2382 struct nlookupdata nd; 2383 struct file *fp; 2384 char *path1; 2385 int error; 2386 int mode; 2387 2388 path1 = objcache_get(namei_oc, M_WAITOK); 2389 error = copyinstr(uap->path1, path1, MAXPATHLEN, NULL); 2390 if (error == 0) { 2391 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path2, 2392 UIO_USERSPACE, 0); 2393 if (error == 0) { 2394 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask; 2395 error = kern_symlink(&nd, path1, mode); 2396 } 2397 nlookup_done_at(&nd, fp); 2398 } 2399 objcache_put(namei_oc, path1); 2400 return (error); 2401 } 2402 2403 /* 2404 * undelete_args(char *path) 2405 * 2406 * Delete a whiteout from the filesystem. 2407 */ 2408 int 2409 sys_undelete(struct undelete_args *uap) 2410 { 2411 struct nlookupdata nd; 2412 int error; 2413 2414 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2415 bwillinode(1); 2416 nd.nl_flags |= NLC_DELETE | NLC_REFDVP; 2417 if (error == 0) 2418 error = nlookup(&nd); 2419 if (error == 0) 2420 error = ncp_writechk(&nd.nl_nch); 2421 if (error == 0) { 2422 error = VOP_NWHITEOUT(&nd.nl_nch, nd.nl_dvp, nd.nl_cred, 2423 NAMEI_DELETE); 2424 } 2425 nlookup_done(&nd); 2426 return (error); 2427 } 2428 2429 int 2430 kern_unlink(struct nlookupdata *nd) 2431 { 2432 int error; 2433 2434 bwillinode(1); 2435 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 2436 if ((error = nlookup(nd)) != 0) 2437 return (error); 2438 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 2439 return (error); 2440 error = VOP_NREMOVE(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 2441 return (error); 2442 } 2443 2444 /* 2445 * unlink_args(char *path) 2446 * 2447 * Delete a name from the filesystem. 2448 */ 2449 int 2450 sys_unlink(struct unlink_args *uap) 2451 { 2452 struct nlookupdata nd; 2453 int error; 2454 2455 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2456 if (error == 0) 2457 error = kern_unlink(&nd); 2458 nlookup_done(&nd); 2459 return (error); 2460 } 2461 2462 2463 /* 2464 * unlinkat_args(int fd, char *path, int flags) 2465 * 2466 * Delete the file or directory entry pointed to by fd/path. 2467 */ 2468 int 2469 sys_unlinkat(struct unlinkat_args *uap) 2470 { 2471 struct nlookupdata nd; 2472 struct file *fp; 2473 int error; 2474 2475 if (uap->flags & ~AT_REMOVEDIR) 2476 return (EINVAL); 2477 2478 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2479 if (error == 0) { 2480 if (uap->flags & AT_REMOVEDIR) 2481 error = kern_rmdir(&nd); 2482 else 2483 error = kern_unlink(&nd); 2484 } 2485 nlookup_done_at(&nd, fp); 2486 return (error); 2487 } 2488 2489 int 2490 kern_lseek(int fd, off_t offset, int whence, off_t *res) 2491 { 2492 struct thread *td = curthread; 2493 struct proc *p = td->td_proc; 2494 struct file *fp; 2495 struct vnode *vp; 2496 struct vattr vattr; 2497 off_t new_offset; 2498 int error; 2499 2500 fp = holdfp(p->p_fd, fd, -1); 2501 if (fp == NULL) 2502 return (EBADF); 2503 if (fp->f_type != DTYPE_VNODE) { 2504 error = ESPIPE; 2505 goto done; 2506 } 2507 vp = (struct vnode *)fp->f_data; 2508 2509 switch (whence) { 2510 case L_INCR: 2511 spin_lock(&fp->f_spin); 2512 new_offset = fp->f_offset + offset; 2513 error = 0; 2514 break; 2515 case L_XTND: 2516 error = VOP_GETATTR(vp, &vattr); 2517 spin_lock(&fp->f_spin); 2518 new_offset = offset + vattr.va_size; 2519 break; 2520 case L_SET: 2521 new_offset = offset; 2522 error = 0; 2523 spin_lock(&fp->f_spin); 2524 break; 2525 default: 2526 new_offset = 0; 2527 error = EINVAL; 2528 spin_lock(&fp->f_spin); 2529 break; 2530 } 2531 2532 /* 2533 * Validate the seek position. Negative offsets are not allowed 2534 * for regular files or directories. 2535 * 2536 * Normally we would also not want to allow negative offsets for 2537 * character and block-special devices. However kvm addresses 2538 * on 64 bit architectures might appear to be negative and must 2539 * be allowed. 2540 */ 2541 if (error == 0) { 2542 if (new_offset < 0 && 2543 (vp->v_type == VREG || vp->v_type == VDIR)) { 2544 error = EINVAL; 2545 } else { 2546 fp->f_offset = new_offset; 2547 } 2548 } 2549 *res = fp->f_offset; 2550 spin_unlock(&fp->f_spin); 2551 done: 2552 fdrop(fp); 2553 return (error); 2554 } 2555 2556 /* 2557 * lseek_args(int fd, int pad, off_t offset, int whence) 2558 * 2559 * Reposition read/write file offset. 2560 */ 2561 int 2562 sys_lseek(struct lseek_args *uap) 2563 { 2564 int error; 2565 2566 error = kern_lseek(uap->fd, uap->offset, uap->whence, 2567 &uap->sysmsg_offset); 2568 2569 return (error); 2570 } 2571 2572 /* 2573 * Check if current process can access given file. amode is a bitmask of *_OK 2574 * access bits. flags is a bitmask of AT_* flags. 2575 */ 2576 int 2577 kern_access(struct nlookupdata *nd, int amode, int flags) 2578 { 2579 struct vnode *vp; 2580 int error, mode; 2581 2582 if (flags & ~AT_EACCESS) 2583 return (EINVAL); 2584 if ((error = nlookup(nd)) != 0) 2585 return (error); 2586 retry: 2587 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2588 if (error) 2589 return (error); 2590 2591 /* Flags == 0 means only check for existence. */ 2592 if (amode) { 2593 mode = 0; 2594 if (amode & R_OK) 2595 mode |= VREAD; 2596 if (amode & W_OK) 2597 mode |= VWRITE; 2598 if (amode & X_OK) 2599 mode |= VEXEC; 2600 if ((mode & VWRITE) == 0 || 2601 (error = vn_writechk(vp, &nd->nl_nch)) == 0) 2602 error = VOP_ACCESS_FLAGS(vp, mode, flags, nd->nl_cred); 2603 2604 /* 2605 * If the file handle is stale we have to re-resolve the 2606 * entry. This is a hack at the moment. 2607 */ 2608 if (error == ESTALE) { 2609 vput(vp); 2610 cache_setunresolved(&nd->nl_nch); 2611 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2612 if (error == 0) { 2613 vp = NULL; 2614 goto retry; 2615 } 2616 return(error); 2617 } 2618 } 2619 vput(vp); 2620 return (error); 2621 } 2622 2623 /* 2624 * access_args(char *path, int flags) 2625 * 2626 * Check access permissions. 2627 */ 2628 int 2629 sys_access(struct access_args *uap) 2630 { 2631 struct nlookupdata nd; 2632 int error; 2633 2634 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2635 if (error == 0) 2636 error = kern_access(&nd, uap->flags, 0); 2637 nlookup_done(&nd); 2638 return (error); 2639 } 2640 2641 2642 /* 2643 * faccessat_args(int fd, char *path, int amode, int flags) 2644 * 2645 * Check access permissions. 2646 */ 2647 int 2648 sys_faccessat(struct faccessat_args *uap) 2649 { 2650 struct nlookupdata nd; 2651 struct file *fp; 2652 int error; 2653 2654 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 2655 NLC_FOLLOW); 2656 if (error == 0) 2657 error = kern_access(&nd, uap->amode, uap->flags); 2658 nlookup_done_at(&nd, fp); 2659 return (error); 2660 } 2661 2662 2663 int 2664 kern_stat(struct nlookupdata *nd, struct stat *st) 2665 { 2666 int error; 2667 struct vnode *vp; 2668 2669 if ((error = nlookup(nd)) != 0) 2670 return (error); 2671 again: 2672 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL) 2673 return (ENOENT); 2674 2675 if ((error = vget(vp, LK_SHARED)) != 0) 2676 return (error); 2677 error = vn_stat(vp, st, nd->nl_cred); 2678 2679 /* 2680 * If the file handle is stale we have to re-resolve the entry. This 2681 * is a hack at the moment. 2682 */ 2683 if (error == ESTALE) { 2684 vput(vp); 2685 cache_setunresolved(&nd->nl_nch); 2686 error = cache_resolve(&nd->nl_nch, nd->nl_cred); 2687 if (error == 0) 2688 goto again; 2689 } else { 2690 vput(vp); 2691 } 2692 return (error); 2693 } 2694 2695 /* 2696 * stat_args(char *path, struct stat *ub) 2697 * 2698 * Get file status; this version follows links. 2699 */ 2700 int 2701 sys_stat(struct stat_args *uap) 2702 { 2703 struct nlookupdata nd; 2704 struct stat st; 2705 int error; 2706 2707 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2708 if (error == 0) { 2709 error = kern_stat(&nd, &st); 2710 if (error == 0) 2711 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2712 } 2713 nlookup_done(&nd); 2714 return (error); 2715 } 2716 2717 /* 2718 * lstat_args(char *path, struct stat *ub) 2719 * 2720 * Get file status; this version does not follow links. 2721 */ 2722 int 2723 sys_lstat(struct lstat_args *uap) 2724 { 2725 struct nlookupdata nd; 2726 struct stat st; 2727 int error; 2728 2729 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2730 if (error == 0) { 2731 error = kern_stat(&nd, &st); 2732 if (error == 0) 2733 error = copyout(&st, uap->ub, sizeof(*uap->ub)); 2734 } 2735 nlookup_done(&nd); 2736 return (error); 2737 } 2738 2739 /* 2740 * fstatat_args(int fd, char *path, struct stat *sb, int flags) 2741 * 2742 * Get status of file pointed to by fd/path. 2743 */ 2744 int 2745 sys_fstatat(struct fstatat_args *uap) 2746 { 2747 struct nlookupdata nd; 2748 struct stat st; 2749 int error; 2750 int flags; 2751 struct file *fp; 2752 2753 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 2754 return (EINVAL); 2755 2756 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 2757 2758 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 2759 UIO_USERSPACE, flags); 2760 if (error == 0) { 2761 error = kern_stat(&nd, &st); 2762 if (error == 0) 2763 error = copyout(&st, uap->sb, sizeof(*uap->sb)); 2764 } 2765 nlookup_done_at(&nd, fp); 2766 return (error); 2767 } 2768 2769 /* 2770 * pathconf_Args(char *path, int name) 2771 * 2772 * Get configurable pathname variables. 2773 */ 2774 int 2775 sys_pathconf(struct pathconf_args *uap) 2776 { 2777 struct nlookupdata nd; 2778 struct vnode *vp; 2779 int error; 2780 2781 vp = NULL; 2782 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2783 if (error == 0) 2784 error = nlookup(&nd); 2785 if (error == 0) 2786 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2787 nlookup_done(&nd); 2788 if (error == 0) { 2789 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg); 2790 vput(vp); 2791 } 2792 return (error); 2793 } 2794 2795 /* 2796 * XXX: daver 2797 * kern_readlink isn't properly split yet. There is a copyin burried 2798 * in VOP_READLINK(). 2799 */ 2800 int 2801 kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res) 2802 { 2803 struct thread *td = curthread; 2804 struct vnode *vp; 2805 struct iovec aiov; 2806 struct uio auio; 2807 int error; 2808 2809 if ((error = nlookup(nd)) != 0) 2810 return (error); 2811 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp); 2812 if (error) 2813 return (error); 2814 if (vp->v_type != VLNK) { 2815 error = EINVAL; 2816 } else { 2817 aiov.iov_base = buf; 2818 aiov.iov_len = count; 2819 auio.uio_iov = &aiov; 2820 auio.uio_iovcnt = 1; 2821 auio.uio_offset = 0; 2822 auio.uio_rw = UIO_READ; 2823 auio.uio_segflg = UIO_USERSPACE; 2824 auio.uio_td = td; 2825 auio.uio_resid = count; 2826 error = VOP_READLINK(vp, &auio, td->td_ucred); 2827 } 2828 vput(vp); 2829 *res = count - auio.uio_resid; 2830 return (error); 2831 } 2832 2833 /* 2834 * readlink_args(char *path, char *buf, int count) 2835 * 2836 * Return target name of a symbolic link. 2837 */ 2838 int 2839 sys_readlink(struct readlink_args *uap) 2840 { 2841 struct nlookupdata nd; 2842 int error; 2843 2844 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2845 if (error == 0) { 2846 error = kern_readlink(&nd, uap->buf, uap->count, 2847 &uap->sysmsg_result); 2848 } 2849 nlookup_done(&nd); 2850 return (error); 2851 } 2852 2853 /* 2854 * readlinkat_args(int fd, char *path, char *buf, size_t bufsize) 2855 * 2856 * Return target name of a symbolic link. The path is relative to the 2857 * directory associated with fd. 2858 */ 2859 int 2860 sys_readlinkat(struct readlinkat_args *uap) 2861 { 2862 struct nlookupdata nd; 2863 struct file *fp; 2864 int error; 2865 2866 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 2867 if (error == 0) { 2868 error = kern_readlink(&nd, uap->buf, uap->bufsize, 2869 &uap->sysmsg_result); 2870 } 2871 nlookup_done_at(&nd, fp); 2872 return (error); 2873 } 2874 2875 static int 2876 setfflags(struct vnode *vp, int flags) 2877 { 2878 struct thread *td = curthread; 2879 int error; 2880 struct vattr vattr; 2881 2882 /* 2883 * Prevent non-root users from setting flags on devices. When 2884 * a device is reused, users can retain ownership of the device 2885 * if they are allowed to set flags and programs assume that 2886 * chown can't fail when done as root. 2887 */ 2888 if ((vp->v_type == VCHR || vp->v_type == VBLK) && 2889 ((error = priv_check_cred(td->td_ucred, PRIV_VFS_CHFLAGS_DEV, 0)) != 0)) 2890 return (error); 2891 2892 /* 2893 * note: vget is required for any operation that might mod the vnode 2894 * so VINACTIVE is properly cleared. 2895 */ 2896 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 2897 VATTR_NULL(&vattr); 2898 vattr.va_flags = flags; 2899 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 2900 vput(vp); 2901 } 2902 return (error); 2903 } 2904 2905 /* 2906 * chflags(char *path, int flags) 2907 * 2908 * Change flags of a file given a path name. 2909 */ 2910 int 2911 sys_chflags(struct chflags_args *uap) 2912 { 2913 struct nlookupdata nd; 2914 struct vnode *vp; 2915 int error; 2916 2917 vp = NULL; 2918 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 2919 if (error == 0) 2920 error = nlookup(&nd); 2921 if (error == 0) 2922 error = ncp_writechk(&nd.nl_nch); 2923 if (error == 0) 2924 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 2925 nlookup_done(&nd); 2926 if (error == 0) { 2927 error = setfflags(vp, uap->flags); 2928 vrele(vp); 2929 } 2930 return (error); 2931 } 2932 2933 /* 2934 * lchflags(char *path, int flags) 2935 * 2936 * Change flags of a file given a path name, but don't follow symlinks. 2937 */ 2938 int 2939 sys_lchflags(struct lchflags_args *uap) 2940 { 2941 struct nlookupdata nd; 2942 struct vnode *vp; 2943 int error; 2944 2945 vp = NULL; 2946 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 2947 if (error == 0) 2948 error = nlookup(&nd); 2949 if (error == 0) 2950 error = ncp_writechk(&nd.nl_nch); 2951 if (error == 0) 2952 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 2953 nlookup_done(&nd); 2954 if (error == 0) { 2955 error = setfflags(vp, uap->flags); 2956 vrele(vp); 2957 } 2958 return (error); 2959 } 2960 2961 /* 2962 * fchflags_args(int fd, int flags) 2963 * 2964 * Change flags of a file given a file descriptor. 2965 */ 2966 int 2967 sys_fchflags(struct fchflags_args *uap) 2968 { 2969 struct thread *td = curthread; 2970 struct proc *p = td->td_proc; 2971 struct file *fp; 2972 int error; 2973 2974 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 2975 return (error); 2976 if (fp->f_nchandle.ncp) 2977 error = ncp_writechk(&fp->f_nchandle); 2978 if (error == 0) 2979 error = setfflags((struct vnode *) fp->f_data, uap->flags); 2980 fdrop(fp); 2981 return (error); 2982 } 2983 2984 static int 2985 setfmode(struct vnode *vp, int mode) 2986 { 2987 struct thread *td = curthread; 2988 int error; 2989 struct vattr vattr; 2990 2991 /* 2992 * note: vget is required for any operation that might mod the vnode 2993 * so VINACTIVE is properly cleared. 2994 */ 2995 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 2996 VATTR_NULL(&vattr); 2997 vattr.va_mode = mode & ALLPERMS; 2998 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 2999 vput(vp); 3000 } 3001 return error; 3002 } 3003 3004 int 3005 kern_chmod(struct nlookupdata *nd, int mode) 3006 { 3007 struct vnode *vp; 3008 int error; 3009 3010 if ((error = nlookup(nd)) != 0) 3011 return (error); 3012 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3013 return (error); 3014 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 3015 error = setfmode(vp, mode); 3016 vrele(vp); 3017 return (error); 3018 } 3019 3020 /* 3021 * chmod_args(char *path, int mode) 3022 * 3023 * Change mode of a file given path name. 3024 */ 3025 int 3026 sys_chmod(struct chmod_args *uap) 3027 { 3028 struct nlookupdata nd; 3029 int error; 3030 3031 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3032 if (error == 0) 3033 error = kern_chmod(&nd, uap->mode); 3034 nlookup_done(&nd); 3035 return (error); 3036 } 3037 3038 /* 3039 * lchmod_args(char *path, int mode) 3040 * 3041 * Change mode of a file given path name (don't follow links.) 3042 */ 3043 int 3044 sys_lchmod(struct lchmod_args *uap) 3045 { 3046 struct nlookupdata nd; 3047 int error; 3048 3049 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3050 if (error == 0) 3051 error = kern_chmod(&nd, uap->mode); 3052 nlookup_done(&nd); 3053 return (error); 3054 } 3055 3056 /* 3057 * fchmod_args(int fd, int mode) 3058 * 3059 * Change mode of a file given a file descriptor. 3060 */ 3061 int 3062 sys_fchmod(struct fchmod_args *uap) 3063 { 3064 struct thread *td = curthread; 3065 struct proc *p = td->td_proc; 3066 struct file *fp; 3067 int error; 3068 3069 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3070 return (error); 3071 if (fp->f_nchandle.ncp) 3072 error = ncp_writechk(&fp->f_nchandle); 3073 if (error == 0) 3074 error = setfmode((struct vnode *)fp->f_data, uap->mode); 3075 fdrop(fp); 3076 return (error); 3077 } 3078 3079 /* 3080 * fchmodat_args(char *path, int mode) 3081 * 3082 * Change mode of a file pointed to by fd/path. 3083 */ 3084 int 3085 sys_fchmodat(struct fchmodat_args *uap) 3086 { 3087 struct nlookupdata nd; 3088 struct file *fp; 3089 int error; 3090 int flags; 3091 3092 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3093 return (EINVAL); 3094 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3095 3096 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3097 UIO_USERSPACE, flags); 3098 if (error == 0) 3099 error = kern_chmod(&nd, uap->mode); 3100 nlookup_done_at(&nd, fp); 3101 return (error); 3102 } 3103 3104 static int 3105 setfown(struct mount *mp, struct vnode *vp, uid_t uid, gid_t gid) 3106 { 3107 struct thread *td = curthread; 3108 int error; 3109 struct vattr vattr; 3110 uid_t o_uid; 3111 gid_t o_gid; 3112 uint64_t size; 3113 3114 /* 3115 * note: vget is required for any operation that might mod the vnode 3116 * so VINACTIVE is properly cleared. 3117 */ 3118 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) { 3119 if ((error = VOP_GETATTR(vp, &vattr)) != 0) 3120 return error; 3121 o_uid = vattr.va_uid; 3122 o_gid = vattr.va_gid; 3123 size = vattr.va_size; 3124 3125 VATTR_NULL(&vattr); 3126 vattr.va_uid = uid; 3127 vattr.va_gid = gid; 3128 error = VOP_SETATTR(vp, &vattr, td->td_ucred); 3129 vput(vp); 3130 } 3131 3132 if (error == 0) { 3133 if (uid == -1) 3134 uid = o_uid; 3135 if (gid == -1) 3136 gid = o_gid; 3137 VFS_ACCOUNT(mp, o_uid, o_gid, -size); 3138 VFS_ACCOUNT(mp, uid, gid, size); 3139 } 3140 3141 return error; 3142 } 3143 3144 int 3145 kern_chown(struct nlookupdata *nd, int uid, int gid) 3146 { 3147 struct vnode *vp; 3148 int error; 3149 3150 if ((error = nlookup(nd)) != 0) 3151 return (error); 3152 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3153 return (error); 3154 if ((error = ncp_writechk(&nd->nl_nch)) == 0) 3155 error = setfown(nd->nl_nch.mount, vp, uid, gid); 3156 vrele(vp); 3157 return (error); 3158 } 3159 3160 /* 3161 * chown(char *path, int uid, int gid) 3162 * 3163 * Set ownership given a path name. 3164 */ 3165 int 3166 sys_chown(struct chown_args *uap) 3167 { 3168 struct nlookupdata nd; 3169 int error; 3170 3171 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3172 if (error == 0) 3173 error = kern_chown(&nd, uap->uid, uap->gid); 3174 nlookup_done(&nd); 3175 return (error); 3176 } 3177 3178 /* 3179 * lchown_args(char *path, int uid, int gid) 3180 * 3181 * Set ownership given a path name, do not cross symlinks. 3182 */ 3183 int 3184 sys_lchown(struct lchown_args *uap) 3185 { 3186 struct nlookupdata nd; 3187 int error; 3188 3189 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3190 if (error == 0) 3191 error = kern_chown(&nd, uap->uid, uap->gid); 3192 nlookup_done(&nd); 3193 return (error); 3194 } 3195 3196 /* 3197 * fchown_args(int fd, int uid, int gid) 3198 * 3199 * Set ownership given a file descriptor. 3200 */ 3201 int 3202 sys_fchown(struct fchown_args *uap) 3203 { 3204 struct thread *td = curthread; 3205 struct proc *p = td->td_proc; 3206 struct file *fp; 3207 int error; 3208 3209 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3210 return (error); 3211 if (fp->f_nchandle.ncp) 3212 error = ncp_writechk(&fp->f_nchandle); 3213 if (error == 0) 3214 error = setfown(p->p_fd->fd_ncdir.mount, 3215 (struct vnode *)fp->f_data, uap->uid, uap->gid); 3216 fdrop(fp); 3217 return (error); 3218 } 3219 3220 /* 3221 * fchownat(int fd, char *path, int uid, int gid, int flags) 3222 * 3223 * Set ownership of file pointed to by fd/path. 3224 */ 3225 int 3226 sys_fchownat(struct fchownat_args *uap) 3227 { 3228 struct nlookupdata nd; 3229 struct file *fp; 3230 int error; 3231 int flags; 3232 3233 if (uap->flags & ~AT_SYMLINK_NOFOLLOW) 3234 return (EINVAL); 3235 flags = (uap->flags & AT_SYMLINK_NOFOLLOW) ? 0 : NLC_FOLLOW; 3236 3237 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, 3238 UIO_USERSPACE, flags); 3239 if (error == 0) 3240 error = kern_chown(&nd, uap->uid, uap->gid); 3241 nlookup_done_at(&nd, fp); 3242 return (error); 3243 } 3244 3245 3246 static int 3247 getutimes(const struct timeval *tvp, struct timespec *tsp) 3248 { 3249 struct timeval tv[2]; 3250 3251 if (tvp == NULL) { 3252 microtime(&tv[0]); 3253 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); 3254 tsp[1] = tsp[0]; 3255 } else { 3256 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]); 3257 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]); 3258 } 3259 return 0; 3260 } 3261 3262 static int 3263 setutimes(struct vnode *vp, struct vattr *vattr, 3264 const struct timespec *ts, int nullflag) 3265 { 3266 struct thread *td = curthread; 3267 int error; 3268 3269 VATTR_NULL(vattr); 3270 vattr->va_atime = ts[0]; 3271 vattr->va_mtime = ts[1]; 3272 if (nullflag) 3273 vattr->va_vaflags |= VA_UTIMES_NULL; 3274 error = VOP_SETATTR(vp, vattr, td->td_ucred); 3275 3276 return error; 3277 } 3278 3279 int 3280 kern_utimes(struct nlookupdata *nd, struct timeval *tptr) 3281 { 3282 struct timespec ts[2]; 3283 struct vnode *vp; 3284 struct vattr vattr; 3285 int error; 3286 3287 if ((error = getutimes(tptr, ts)) != 0) 3288 return (error); 3289 3290 /* 3291 * NOTE: utimes() succeeds for the owner even if the file 3292 * is not user-writable. 3293 */ 3294 nd->nl_flags |= NLC_OWN | NLC_WRITE; 3295 3296 if ((error = nlookup(nd)) != 0) 3297 return (error); 3298 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3299 return (error); 3300 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3301 return (error); 3302 3303 /* 3304 * note: vget is required for any operation that might mod the vnode 3305 * so VINACTIVE is properly cleared. 3306 */ 3307 if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3308 error = vget(vp, LK_EXCLUSIVE); 3309 if (error == 0) { 3310 error = setutimes(vp, &vattr, ts, (tptr == NULL)); 3311 vput(vp); 3312 } 3313 } 3314 vrele(vp); 3315 return (error); 3316 } 3317 3318 /* 3319 * utimes_args(char *path, struct timeval *tptr) 3320 * 3321 * Set the access and modification times of a file. 3322 */ 3323 int 3324 sys_utimes(struct utimes_args *uap) 3325 { 3326 struct timeval tv[2]; 3327 struct nlookupdata nd; 3328 int error; 3329 3330 if (uap->tptr) { 3331 error = copyin(uap->tptr, tv, sizeof(tv)); 3332 if (error) 3333 return (error); 3334 } 3335 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3336 if (error == 0) 3337 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3338 nlookup_done(&nd); 3339 return (error); 3340 } 3341 3342 /* 3343 * lutimes_args(char *path, struct timeval *tptr) 3344 * 3345 * Set the access and modification times of a file. 3346 */ 3347 int 3348 sys_lutimes(struct lutimes_args *uap) 3349 { 3350 struct timeval tv[2]; 3351 struct nlookupdata nd; 3352 int error; 3353 3354 if (uap->tptr) { 3355 error = copyin(uap->tptr, tv, sizeof(tv)); 3356 if (error) 3357 return (error); 3358 } 3359 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3360 if (error == 0) 3361 error = kern_utimes(&nd, uap->tptr ? tv : NULL); 3362 nlookup_done(&nd); 3363 return (error); 3364 } 3365 3366 /* 3367 * Set utimes on a file descriptor. The creds used to open the 3368 * file are used to determine whether the operation is allowed 3369 * or not. 3370 */ 3371 int 3372 kern_futimes(int fd, struct timeval *tptr) 3373 { 3374 struct thread *td = curthread; 3375 struct proc *p = td->td_proc; 3376 struct timespec ts[2]; 3377 struct file *fp; 3378 struct vnode *vp; 3379 struct vattr vattr; 3380 int error; 3381 3382 error = getutimes(tptr, ts); 3383 if (error) 3384 return (error); 3385 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3386 return (error); 3387 if (fp->f_nchandle.ncp) 3388 error = ncp_writechk(&fp->f_nchandle); 3389 if (error == 0) { 3390 vp = fp->f_data; 3391 error = vget(vp, LK_EXCLUSIVE); 3392 if (error == 0) { 3393 error = VOP_GETATTR(vp, &vattr); 3394 if (error == 0) { 3395 error = naccess_va(&vattr, NLC_OWN | NLC_WRITE, 3396 fp->f_cred); 3397 } 3398 if (error == 0) { 3399 error = setutimes(vp, &vattr, ts, 3400 (tptr == NULL)); 3401 } 3402 vput(vp); 3403 } 3404 } 3405 fdrop(fp); 3406 return (error); 3407 } 3408 3409 /* 3410 * futimes_args(int fd, struct timeval *tptr) 3411 * 3412 * Set the access and modification times of a file. 3413 */ 3414 int 3415 sys_futimes(struct futimes_args *uap) 3416 { 3417 struct timeval tv[2]; 3418 int error; 3419 3420 if (uap->tptr) { 3421 error = copyin(uap->tptr, tv, sizeof(tv)); 3422 if (error) 3423 return (error); 3424 } 3425 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL); 3426 3427 return (error); 3428 } 3429 3430 int 3431 kern_truncate(struct nlookupdata *nd, off_t length) 3432 { 3433 struct vnode *vp; 3434 struct vattr vattr; 3435 int error; 3436 uid_t uid = 0; 3437 gid_t gid = 0; 3438 uint64_t old_size = 0; 3439 3440 if (length < 0) 3441 return(EINVAL); 3442 nd->nl_flags |= NLC_WRITE | NLC_TRUNCATE; 3443 if ((error = nlookup(nd)) != 0) 3444 return (error); 3445 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3446 return (error); 3447 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0) 3448 return (error); 3449 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) { 3450 vrele(vp); 3451 return (error); 3452 } 3453 if (vp->v_type == VDIR) { 3454 error = EISDIR; 3455 goto done; 3456 } 3457 if (vfs_quota_enabled) { 3458 error = VOP_GETATTR(vp, &vattr); 3459 KASSERT(error == 0, ("kern_truncate(): VOP_GETATTR didn't return 0")); 3460 uid = vattr.va_uid; 3461 gid = vattr.va_gid; 3462 old_size = vattr.va_size; 3463 } 3464 3465 if ((error = vn_writechk(vp, &nd->nl_nch)) == 0) { 3466 VATTR_NULL(&vattr); 3467 vattr.va_size = length; 3468 error = VOP_SETATTR(vp, &vattr, nd->nl_cred); 3469 VFS_ACCOUNT(nd->nl_nch.mount, uid, gid, length - old_size); 3470 } 3471 done: 3472 vput(vp); 3473 return (error); 3474 } 3475 3476 /* 3477 * truncate(char *path, int pad, off_t length) 3478 * 3479 * Truncate a file given its path name. 3480 */ 3481 int 3482 sys_truncate(struct truncate_args *uap) 3483 { 3484 struct nlookupdata nd; 3485 int error; 3486 3487 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 3488 if (error == 0) 3489 error = kern_truncate(&nd, uap->length); 3490 nlookup_done(&nd); 3491 return error; 3492 } 3493 3494 int 3495 kern_ftruncate(int fd, off_t length) 3496 { 3497 struct thread *td = curthread; 3498 struct proc *p = td->td_proc; 3499 struct vattr vattr; 3500 struct vnode *vp; 3501 struct file *fp; 3502 int error; 3503 uid_t uid = 0; 3504 gid_t gid = 0; 3505 uint64_t old_size = 0; 3506 struct mount *mp; 3507 3508 if (length < 0) 3509 return(EINVAL); 3510 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3511 return (error); 3512 if (fp->f_nchandle.ncp) { 3513 error = ncp_writechk(&fp->f_nchandle); 3514 if (error) 3515 goto done; 3516 } 3517 if ((fp->f_flag & FWRITE) == 0) { 3518 error = EINVAL; 3519 goto done; 3520 } 3521 if (fp->f_flag & FAPPENDONLY) { /* inode was set s/uapnd */ 3522 error = EINVAL; 3523 goto done; 3524 } 3525 vp = (struct vnode *)fp->f_data; 3526 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3527 if (vp->v_type == VDIR) { 3528 error = EISDIR; 3529 goto done; 3530 } 3531 3532 if (vfs_quota_enabled) { 3533 error = VOP_GETATTR(vp, &vattr); 3534 KASSERT(error == 0, ("kern_ftruncate(): VOP_GETATTR didn't return 0")); 3535 uid = vattr.va_uid; 3536 gid = vattr.va_gid; 3537 old_size = vattr.va_size; 3538 } 3539 3540 if ((error = vn_writechk(vp, NULL)) == 0) { 3541 VATTR_NULL(&vattr); 3542 vattr.va_size = length; 3543 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 3544 mp = vq_vptomp(vp); 3545 VFS_ACCOUNT(mp, uid, gid, length - old_size); 3546 } 3547 vn_unlock(vp); 3548 done: 3549 fdrop(fp); 3550 return (error); 3551 } 3552 3553 /* 3554 * ftruncate_args(int fd, int pad, off_t length) 3555 * 3556 * Truncate a file given a file descriptor. 3557 */ 3558 int 3559 sys_ftruncate(struct ftruncate_args *uap) 3560 { 3561 int error; 3562 3563 error = kern_ftruncate(uap->fd, uap->length); 3564 3565 return (error); 3566 } 3567 3568 /* 3569 * fsync(int fd) 3570 * 3571 * Sync an open file. 3572 */ 3573 int 3574 sys_fsync(struct fsync_args *uap) 3575 { 3576 struct thread *td = curthread; 3577 struct proc *p = td->td_proc; 3578 struct vnode *vp; 3579 struct file *fp; 3580 vm_object_t obj; 3581 int error; 3582 3583 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0) 3584 return (error); 3585 vp = (struct vnode *)fp->f_data; 3586 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3587 if ((obj = vp->v_object) != NULL) 3588 vm_object_page_clean(obj, 0, 0, 0); 3589 error = VOP_FSYNC(vp, MNT_WAIT, VOP_FSYNC_SYSCALL); 3590 if (error == 0 && vp->v_mount) 3591 error = buf_fsync(vp); 3592 vn_unlock(vp); 3593 fdrop(fp); 3594 3595 return (error); 3596 } 3597 3598 int 3599 kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond) 3600 { 3601 struct nchandle fnchd; 3602 struct nchandle tnchd; 3603 struct namecache *ncp; 3604 struct vnode *fdvp; 3605 struct vnode *tdvp; 3606 struct mount *mp; 3607 int error; 3608 3609 bwillinode(1); 3610 fromnd->nl_flags |= NLC_REFDVP | NLC_RENAME_SRC; 3611 if ((error = nlookup(fromnd)) != 0) 3612 return (error); 3613 if ((fnchd.ncp = fromnd->nl_nch.ncp->nc_parent) == NULL) 3614 return (ENOENT); 3615 fnchd.mount = fromnd->nl_nch.mount; 3616 cache_hold(&fnchd); 3617 3618 /* 3619 * unlock the source nch so we can lookup the target nch without 3620 * deadlocking. The target may or may not exist so we do not check 3621 * for a target vp like kern_mkdir() and other creation functions do. 3622 * 3623 * The source and target directories are ref'd and rechecked after 3624 * everything is relocked to determine if the source or target file 3625 * has been renamed. 3626 */ 3627 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED); 3628 fromnd->nl_flags &= ~NLC_NCPISLOCKED; 3629 cache_unlock(&fromnd->nl_nch); 3630 3631 tond->nl_flags |= NLC_RENAME_DST | NLC_REFDVP; 3632 if ((error = nlookup(tond)) != 0) { 3633 cache_drop(&fnchd); 3634 return (error); 3635 } 3636 if ((tnchd.ncp = tond->nl_nch.ncp->nc_parent) == NULL) { 3637 cache_drop(&fnchd); 3638 return (ENOENT); 3639 } 3640 tnchd.mount = tond->nl_nch.mount; 3641 cache_hold(&tnchd); 3642 3643 /* 3644 * If the source and target are the same there is nothing to do 3645 */ 3646 if (fromnd->nl_nch.ncp == tond->nl_nch.ncp) { 3647 cache_drop(&fnchd); 3648 cache_drop(&tnchd); 3649 return (0); 3650 } 3651 3652 /* 3653 * Mount points cannot be renamed or overwritten 3654 */ 3655 if ((fromnd->nl_nch.ncp->nc_flag | tond->nl_nch.ncp->nc_flag) & 3656 NCF_ISMOUNTPT 3657 ) { 3658 cache_drop(&fnchd); 3659 cache_drop(&tnchd); 3660 return (EINVAL); 3661 } 3662 3663 /* 3664 * Relock the source ncp. cache_relock() will deal with any 3665 * deadlocks against the already-locked tond and will also 3666 * make sure both are resolved. 3667 * 3668 * NOTE AFTER RELOCKING: The source or target ncp may have become 3669 * invalid while they were unlocked, nc_vp and nc_mount could 3670 * be NULL. 3671 */ 3672 cache_relock(&fromnd->nl_nch, fromnd->nl_cred, 3673 &tond->nl_nch, tond->nl_cred); 3674 fromnd->nl_flags |= NLC_NCPISLOCKED; 3675 3676 /* 3677 * make sure the parent directories linkages are the same 3678 */ 3679 if (fnchd.ncp != fromnd->nl_nch.ncp->nc_parent || 3680 tnchd.ncp != tond->nl_nch.ncp->nc_parent) { 3681 cache_drop(&fnchd); 3682 cache_drop(&tnchd); 3683 return (ENOENT); 3684 } 3685 3686 /* 3687 * Both the source and target must be within the same filesystem and 3688 * in the same filesystem as their parent directories within the 3689 * namecache topology. 3690 * 3691 * NOTE: fromnd's nc_mount or nc_vp could be NULL. 3692 */ 3693 mp = fnchd.mount; 3694 if (mp != tnchd.mount || mp != fromnd->nl_nch.mount || 3695 mp != tond->nl_nch.mount) { 3696 cache_drop(&fnchd); 3697 cache_drop(&tnchd); 3698 return (EXDEV); 3699 } 3700 3701 /* 3702 * Make sure the mount point is writable 3703 */ 3704 if ((error = ncp_writechk(&tond->nl_nch)) != 0) { 3705 cache_drop(&fnchd); 3706 cache_drop(&tnchd); 3707 return (error); 3708 } 3709 3710 /* 3711 * If the target exists and either the source or target is a directory, 3712 * then both must be directories. 3713 * 3714 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h 3715 * have become NULL. 3716 */ 3717 if (tond->nl_nch.ncp->nc_vp) { 3718 if (fromnd->nl_nch.ncp->nc_vp == NULL) { 3719 error = ENOENT; 3720 } else if (fromnd->nl_nch.ncp->nc_vp->v_type == VDIR) { 3721 if (tond->nl_nch.ncp->nc_vp->v_type != VDIR) 3722 error = ENOTDIR; 3723 } else if (tond->nl_nch.ncp->nc_vp->v_type == VDIR) { 3724 error = EISDIR; 3725 } 3726 } 3727 3728 /* 3729 * You cannot rename a source into itself or a subdirectory of itself. 3730 * We check this by travsersing the target directory upwards looking 3731 * for a match against the source. 3732 * 3733 * XXX MPSAFE 3734 */ 3735 if (error == 0) { 3736 for (ncp = tnchd.ncp; ncp; ncp = ncp->nc_parent) { 3737 if (fromnd->nl_nch.ncp == ncp) { 3738 error = EINVAL; 3739 break; 3740 } 3741 } 3742 } 3743 3744 cache_drop(&fnchd); 3745 cache_drop(&tnchd); 3746 3747 /* 3748 * Even though the namespaces are different, they may still represent 3749 * hardlinks to the same file. The filesystem might have a hard time 3750 * with this so we issue a NREMOVE of the source instead of a NRENAME 3751 * when we detect the situation. 3752 */ 3753 if (error == 0) { 3754 fdvp = fromnd->nl_dvp; 3755 tdvp = tond->nl_dvp; 3756 if (fdvp == NULL || tdvp == NULL) { 3757 error = EPERM; 3758 } else if (fromnd->nl_nch.ncp->nc_vp == tond->nl_nch.ncp->nc_vp) { 3759 error = VOP_NREMOVE(&fromnd->nl_nch, fdvp, 3760 fromnd->nl_cred); 3761 } else { 3762 error = VOP_NRENAME(&fromnd->nl_nch, &tond->nl_nch, 3763 fdvp, tdvp, tond->nl_cred); 3764 } 3765 } 3766 return (error); 3767 } 3768 3769 /* 3770 * rename_args(char *from, char *to) 3771 * 3772 * Rename files. Source and destination must either both be directories, 3773 * or both not be directories. If target is a directory, it must be empty. 3774 */ 3775 int 3776 sys_rename(struct rename_args *uap) 3777 { 3778 struct nlookupdata fromnd, tond; 3779 int error; 3780 3781 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0); 3782 if (error == 0) { 3783 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0); 3784 if (error == 0) 3785 error = kern_rename(&fromnd, &tond); 3786 nlookup_done(&tond); 3787 } 3788 nlookup_done(&fromnd); 3789 return (error); 3790 } 3791 3792 /* 3793 * renameat_args(int oldfd, char *old, int newfd, char *new) 3794 * 3795 * Rename files using paths relative to the directories associated with 3796 * oldfd and newfd. Source and destination must either both be directories, 3797 * or both not be directories. If target is a directory, it must be empty. 3798 */ 3799 int 3800 sys_renameat(struct renameat_args *uap) 3801 { 3802 struct nlookupdata oldnd, newnd; 3803 struct file *oldfp, *newfp; 3804 int error; 3805 3806 error = nlookup_init_at(&oldnd, &oldfp, uap->oldfd, uap->old, 3807 UIO_USERSPACE, 0); 3808 if (error == 0) { 3809 error = nlookup_init_at(&newnd, &newfp, uap->newfd, uap->new, 3810 UIO_USERSPACE, 0); 3811 if (error == 0) 3812 error = kern_rename(&oldnd, &newnd); 3813 nlookup_done_at(&newnd, newfp); 3814 } 3815 nlookup_done_at(&oldnd, oldfp); 3816 return (error); 3817 } 3818 3819 int 3820 kern_mkdir(struct nlookupdata *nd, int mode) 3821 { 3822 struct thread *td = curthread; 3823 struct proc *p = td->td_proc; 3824 struct vnode *vp; 3825 struct vattr vattr; 3826 int error; 3827 3828 bwillinode(1); 3829 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE | NLC_REFDVP; 3830 if ((error = nlookup(nd)) != 0) 3831 return (error); 3832 3833 if (nd->nl_nch.ncp->nc_vp) 3834 return (EEXIST); 3835 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3836 return (error); 3837 VATTR_NULL(&vattr); 3838 vattr.va_type = VDIR; 3839 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask; 3840 3841 vp = NULL; 3842 error = VOP_NMKDIR(&nd->nl_nch, nd->nl_dvp, &vp, td->td_ucred, &vattr); 3843 if (error == 0) 3844 vput(vp); 3845 return (error); 3846 } 3847 3848 /* 3849 * mkdir_args(char *path, int mode) 3850 * 3851 * Make a directory file. 3852 */ 3853 int 3854 sys_mkdir(struct mkdir_args *uap) 3855 { 3856 struct nlookupdata nd; 3857 int error; 3858 3859 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3860 if (error == 0) 3861 error = kern_mkdir(&nd, uap->mode); 3862 nlookup_done(&nd); 3863 return (error); 3864 } 3865 3866 /* 3867 * mkdirat_args(int fd, char *path, mode_t mode) 3868 * 3869 * Make a directory file. The path is relative to the directory associated 3870 * with fd. 3871 */ 3872 int 3873 sys_mkdirat(struct mkdirat_args *uap) 3874 { 3875 struct nlookupdata nd; 3876 struct file *fp; 3877 int error; 3878 3879 error = nlookup_init_at(&nd, &fp, uap->fd, uap->path, UIO_USERSPACE, 0); 3880 if (error == 0) 3881 error = kern_mkdir(&nd, uap->mode); 3882 nlookup_done_at(&nd, fp); 3883 return (error); 3884 } 3885 3886 int 3887 kern_rmdir(struct nlookupdata *nd) 3888 { 3889 int error; 3890 3891 bwillinode(1); 3892 nd->nl_flags |= NLC_DELETE | NLC_REFDVP; 3893 if ((error = nlookup(nd)) != 0) 3894 return (error); 3895 3896 /* 3897 * Do not allow directories representing mount points to be 3898 * deleted, even if empty. Check write perms on mount point 3899 * in case the vnode is aliased (aka nullfs). 3900 */ 3901 if (nd->nl_nch.ncp->nc_flag & (NCF_ISMOUNTPT)) 3902 return (EINVAL); 3903 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 3904 return (error); 3905 error = VOP_NRMDIR(&nd->nl_nch, nd->nl_dvp, nd->nl_cred); 3906 return (error); 3907 } 3908 3909 /* 3910 * rmdir_args(char *path) 3911 * 3912 * Remove a directory file. 3913 */ 3914 int 3915 sys_rmdir(struct rmdir_args *uap) 3916 { 3917 struct nlookupdata nd; 3918 int error; 3919 3920 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); 3921 if (error == 0) 3922 error = kern_rmdir(&nd); 3923 nlookup_done(&nd); 3924 return (error); 3925 } 3926 3927 int 3928 kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res, 3929 enum uio_seg direction) 3930 { 3931 struct thread *td = curthread; 3932 struct proc *p = td->td_proc; 3933 struct vnode *vp; 3934 struct file *fp; 3935 struct uio auio; 3936 struct iovec aiov; 3937 off_t loff; 3938 int error, eofflag; 3939 3940 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0) 3941 return (error); 3942 if ((fp->f_flag & FREAD) == 0) { 3943 error = EBADF; 3944 goto done; 3945 } 3946 vp = (struct vnode *)fp->f_data; 3947 unionread: 3948 if (vp->v_type != VDIR) { 3949 error = EINVAL; 3950 goto done; 3951 } 3952 aiov.iov_base = buf; 3953 aiov.iov_len = count; 3954 auio.uio_iov = &aiov; 3955 auio.uio_iovcnt = 1; 3956 auio.uio_rw = UIO_READ; 3957 auio.uio_segflg = direction; 3958 auio.uio_td = td; 3959 auio.uio_resid = count; 3960 loff = auio.uio_offset = fp->f_offset; 3961 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL); 3962 fp->f_offset = auio.uio_offset; 3963 if (error) 3964 goto done; 3965 if (count == auio.uio_resid) { 3966 if (union_dircheckp) { 3967 error = union_dircheckp(td, &vp, fp); 3968 if (error == -1) 3969 goto unionread; 3970 if (error) 3971 goto done; 3972 } 3973 #if 0 3974 if ((vp->v_flag & VROOT) && 3975 (vp->v_mount->mnt_flag & MNT_UNION)) { 3976 struct vnode *tvp = vp; 3977 vp = vp->v_mount->mnt_vnodecovered; 3978 vref(vp); 3979 fp->f_data = vp; 3980 fp->f_offset = 0; 3981 vrele(tvp); 3982 goto unionread; 3983 } 3984 #endif 3985 } 3986 3987 /* 3988 * WARNING! *basep may not be wide enough to accomodate the 3989 * seek offset. XXX should we hack this to return the upper 32 bits 3990 * for offsets greater then 4G? 3991 */ 3992 if (basep) { 3993 *basep = (long)loff; 3994 } 3995 *res = count - auio.uio_resid; 3996 done: 3997 fdrop(fp); 3998 return (error); 3999 } 4000 4001 /* 4002 * getdirentries_args(int fd, char *buf, u_int conut, long *basep) 4003 * 4004 * Read a block of directory entries in a file system independent format. 4005 */ 4006 int 4007 sys_getdirentries(struct getdirentries_args *uap) 4008 { 4009 long base; 4010 int error; 4011 4012 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base, 4013 &uap->sysmsg_result, UIO_USERSPACE); 4014 4015 if (error == 0 && uap->basep) 4016 error = copyout(&base, uap->basep, sizeof(*uap->basep)); 4017 return (error); 4018 } 4019 4020 /* 4021 * getdents_args(int fd, char *buf, size_t count) 4022 */ 4023 int 4024 sys_getdents(struct getdents_args *uap) 4025 { 4026 int error; 4027 4028 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL, 4029 &uap->sysmsg_result, UIO_USERSPACE); 4030 4031 return (error); 4032 } 4033 4034 /* 4035 * Set the mode mask for creation of filesystem nodes. 4036 * 4037 * umask(int newmask) 4038 */ 4039 int 4040 sys_umask(struct umask_args *uap) 4041 { 4042 struct thread *td = curthread; 4043 struct proc *p = td->td_proc; 4044 struct filedesc *fdp; 4045 4046 fdp = p->p_fd; 4047 uap->sysmsg_result = fdp->fd_cmask; 4048 fdp->fd_cmask = uap->newmask & ALLPERMS; 4049 return (0); 4050 } 4051 4052 /* 4053 * revoke(char *path) 4054 * 4055 * Void all references to file by ripping underlying filesystem 4056 * away from vnode. 4057 */ 4058 int 4059 sys_revoke(struct revoke_args *uap) 4060 { 4061 struct nlookupdata nd; 4062 struct vattr vattr; 4063 struct vnode *vp; 4064 struct ucred *cred; 4065 int error; 4066 4067 vp = NULL; 4068 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4069 if (error == 0) 4070 error = nlookup(&nd); 4071 if (error == 0) 4072 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4073 cred = crhold(nd.nl_cred); 4074 nlookup_done(&nd); 4075 if (error == 0) { 4076 if (error == 0) 4077 error = VOP_GETATTR(vp, &vattr); 4078 if (error == 0 && cred->cr_uid != vattr.va_uid) 4079 error = priv_check_cred(cred, PRIV_VFS_REVOKE, 0); 4080 if (error == 0 && (vp->v_type == VCHR || vp->v_type == VBLK)) { 4081 if (vcount(vp) > 0) 4082 error = vrevoke(vp, cred); 4083 } else if (error == 0) { 4084 error = vrevoke(vp, cred); 4085 } 4086 vrele(vp); 4087 } 4088 if (cred) 4089 crfree(cred); 4090 return (error); 4091 } 4092 4093 /* 4094 * getfh_args(char *fname, fhandle_t *fhp) 4095 * 4096 * Get (NFS) file handle 4097 * 4098 * NOTE: We use the fsid of the covering mount, even if it is a nullfs 4099 * mount. This allows nullfs mounts to be explicitly exported. 4100 * 4101 * WARNING: nullfs mounts of HAMMER PFS ROOTs are safe. 4102 * 4103 * nullfs mounts of subdirectories are not safe. That is, it will 4104 * work, but you do not really have protection against access to 4105 * the related parent directories. 4106 */ 4107 int 4108 sys_getfh(struct getfh_args *uap) 4109 { 4110 struct thread *td = curthread; 4111 struct nlookupdata nd; 4112 fhandle_t fh; 4113 struct vnode *vp; 4114 struct mount *mp; 4115 int error; 4116 4117 /* 4118 * Must be super user 4119 */ 4120 if ((error = priv_check(td, PRIV_ROOT)) != 0) 4121 return (error); 4122 4123 vp = NULL; 4124 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW); 4125 if (error == 0) 4126 error = nlookup(&nd); 4127 if (error == 0) 4128 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4129 mp = nd.nl_nch.mount; 4130 nlookup_done(&nd); 4131 if (error == 0) { 4132 bzero(&fh, sizeof(fh)); 4133 fh.fh_fsid = mp->mnt_stat.f_fsid; 4134 error = VFS_VPTOFH(vp, &fh.fh_fid); 4135 vput(vp); 4136 if (error == 0) 4137 error = copyout(&fh, uap->fhp, sizeof(fh)); 4138 } 4139 return (error); 4140 } 4141 4142 /* 4143 * fhopen_args(const struct fhandle *u_fhp, int flags) 4144 * 4145 * syscall for the rpc.lockd to use to translate a NFS file handle into 4146 * an open descriptor. 4147 * 4148 * warning: do not remove the priv_check() call or this becomes one giant 4149 * security hole. 4150 */ 4151 int 4152 sys_fhopen(struct fhopen_args *uap) 4153 { 4154 struct thread *td = curthread; 4155 struct filedesc *fdp = td->td_proc->p_fd; 4156 struct mount *mp; 4157 struct vnode *vp; 4158 struct fhandle fhp; 4159 struct vattr vat; 4160 struct vattr *vap = &vat; 4161 struct flock lf; 4162 int fmode, mode, error = 0, type; 4163 struct file *nfp; 4164 struct file *fp; 4165 int indx; 4166 4167 /* 4168 * Must be super user 4169 */ 4170 error = priv_check(td, PRIV_ROOT); 4171 if (error) 4172 return (error); 4173 4174 fmode = FFLAGS(uap->flags); 4175 4176 /* 4177 * Why not allow a non-read/write open for our lockd? 4178 */ 4179 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT)) 4180 return (EINVAL); 4181 error = copyin(uap->u_fhp, &fhp, sizeof(fhp)); 4182 if (error) 4183 return(error); 4184 4185 /* 4186 * Find the mount point 4187 */ 4188 mp = vfs_getvfs(&fhp.fh_fsid); 4189 if (mp == NULL) { 4190 error = ESTALE; 4191 goto done; 4192 } 4193 /* now give me my vnode, it gets returned to me locked */ 4194 error = VFS_FHTOVP(mp, NULL, &fhp.fh_fid, &vp); 4195 if (error) 4196 goto done; 4197 /* 4198 * from now on we have to make sure not 4199 * to forget about the vnode 4200 * any error that causes an abort must vput(vp) 4201 * just set error = err and 'goto bad;'. 4202 */ 4203 4204 /* 4205 * from vn_open 4206 */ 4207 if (vp->v_type == VLNK) { 4208 error = EMLINK; 4209 goto bad; 4210 } 4211 if (vp->v_type == VSOCK) { 4212 error = EOPNOTSUPP; 4213 goto bad; 4214 } 4215 mode = 0; 4216 if (fmode & (FWRITE | O_TRUNC)) { 4217 if (vp->v_type == VDIR) { 4218 error = EISDIR; 4219 goto bad; 4220 } 4221 error = vn_writechk(vp, NULL); 4222 if (error) 4223 goto bad; 4224 mode |= VWRITE; 4225 } 4226 if (fmode & FREAD) 4227 mode |= VREAD; 4228 if (mode) { 4229 error = VOP_ACCESS(vp, mode, td->td_ucred); 4230 if (error) 4231 goto bad; 4232 } 4233 if (fmode & O_TRUNC) { 4234 vn_unlock(vp); /* XXX */ 4235 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 4236 VATTR_NULL(vap); 4237 vap->va_size = 0; 4238 error = VOP_SETATTR(vp, vap, td->td_ucred); 4239 if (error) 4240 goto bad; 4241 } 4242 4243 /* 4244 * VOP_OPEN needs the file pointer so it can potentially override 4245 * it. 4246 * 4247 * WARNING! no f_nchandle will be associated when fhopen()ing a 4248 * directory. XXX 4249 */ 4250 if ((error = falloc(td->td_lwp, &nfp, &indx)) != 0) 4251 goto bad; 4252 fp = nfp; 4253 4254 error = VOP_OPEN(vp, fmode, td->td_ucred, fp); 4255 if (error) { 4256 /* 4257 * setting f_ops this way prevents VOP_CLOSE from being 4258 * called or fdrop() releasing the vp from v_data. Since 4259 * the VOP_OPEN failed we don't want to VOP_CLOSE. 4260 */ 4261 fp->f_ops = &badfileops; 4262 fp->f_data = NULL; 4263 goto bad_drop; 4264 } 4265 4266 /* 4267 * The fp is given its own reference, we still have our ref and lock. 4268 * 4269 * Assert that all regular files must be created with a VM object. 4270 */ 4271 if (vp->v_type == VREG && vp->v_object == NULL) { 4272 kprintf("fhopen: regular file did not have VM object: %p\n", vp); 4273 goto bad_drop; 4274 } 4275 4276 /* 4277 * The open was successful. Handle any locking requirements. 4278 */ 4279 if (fmode & (O_EXLOCK | O_SHLOCK)) { 4280 lf.l_whence = SEEK_SET; 4281 lf.l_start = 0; 4282 lf.l_len = 0; 4283 if (fmode & O_EXLOCK) 4284 lf.l_type = F_WRLCK; 4285 else 4286 lf.l_type = F_RDLCK; 4287 if (fmode & FNONBLOCK) 4288 type = 0; 4289 else 4290 type = F_WAIT; 4291 vn_unlock(vp); 4292 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) { 4293 /* 4294 * release our private reference. 4295 */ 4296 fsetfd(fdp, NULL, indx); 4297 fdrop(fp); 4298 vrele(vp); 4299 goto done; 4300 } 4301 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4302 fp->f_flag |= FHASLOCK; 4303 } 4304 4305 /* 4306 * Clean up. Associate the file pointer with the previously 4307 * reserved descriptor and return it. 4308 */ 4309 vput(vp); 4310 fsetfd(fdp, fp, indx); 4311 fdrop(fp); 4312 uap->sysmsg_result = indx; 4313 if (uap->flags & O_CLOEXEC) 4314 error = fsetfdflags(fdp, indx, UF_EXCLOSE); 4315 return (error); 4316 4317 bad_drop: 4318 fsetfd(fdp, NULL, indx); 4319 fdrop(fp); 4320 bad: 4321 vput(vp); 4322 done: 4323 return (error); 4324 } 4325 4326 /* 4327 * fhstat_args(struct fhandle *u_fhp, struct stat *sb) 4328 */ 4329 int 4330 sys_fhstat(struct fhstat_args *uap) 4331 { 4332 struct thread *td = curthread; 4333 struct stat sb; 4334 fhandle_t fh; 4335 struct mount *mp; 4336 struct vnode *vp; 4337 int error; 4338 4339 /* 4340 * Must be super user 4341 */ 4342 error = priv_check(td, PRIV_ROOT); 4343 if (error) 4344 return (error); 4345 4346 error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t)); 4347 if (error) 4348 return (error); 4349 4350 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) 4351 error = ESTALE; 4352 if (error == 0) { 4353 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) == 0) { 4354 error = vn_stat(vp, &sb, td->td_ucred); 4355 vput(vp); 4356 } 4357 } 4358 if (error == 0) 4359 error = copyout(&sb, uap->sb, sizeof(sb)); 4360 return (error); 4361 } 4362 4363 /* 4364 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf) 4365 */ 4366 int 4367 sys_fhstatfs(struct fhstatfs_args *uap) 4368 { 4369 struct thread *td = curthread; 4370 struct proc *p = td->td_proc; 4371 struct statfs *sp; 4372 struct mount *mp; 4373 struct vnode *vp; 4374 struct statfs sb; 4375 char *fullpath, *freepath; 4376 fhandle_t fh; 4377 int error; 4378 4379 /* 4380 * Must be super user 4381 */ 4382 if ((error = priv_check(td, PRIV_ROOT))) 4383 return (error); 4384 4385 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4386 return (error); 4387 4388 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4389 error = ESTALE; 4390 goto done; 4391 } 4392 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4393 error = ESTALE; 4394 goto done; 4395 } 4396 4397 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp)) != 0) 4398 goto done; 4399 mp = vp->v_mount; 4400 sp = &mp->mnt_stat; 4401 vput(vp); 4402 if ((error = VFS_STATFS(mp, sp, td->td_ucred)) != 0) 4403 goto done; 4404 4405 error = mount_path(p, mp, &fullpath, &freepath); 4406 if (error) 4407 goto done; 4408 bzero(sp->f_mntonname, sizeof(sp->f_mntonname)); 4409 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname)); 4410 kfree(freepath, M_TEMP); 4411 4412 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 4413 if (priv_check(td, PRIV_ROOT)) { 4414 bcopy(sp, &sb, sizeof(sb)); 4415 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0; 4416 sp = &sb; 4417 } 4418 error = copyout(sp, uap->buf, sizeof(*sp)); 4419 done: 4420 return (error); 4421 } 4422 4423 /* 4424 * fhstatvfs_args(struct fhandle *u_fhp, struct statvfs *buf) 4425 */ 4426 int 4427 sys_fhstatvfs(struct fhstatvfs_args *uap) 4428 { 4429 struct thread *td = curthread; 4430 struct proc *p = td->td_proc; 4431 struct statvfs *sp; 4432 struct mount *mp; 4433 struct vnode *vp; 4434 fhandle_t fh; 4435 int error; 4436 4437 /* 4438 * Must be super user 4439 */ 4440 if ((error = priv_check(td, PRIV_ROOT))) 4441 return (error); 4442 4443 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0) 4444 return (error); 4445 4446 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { 4447 error = ESTALE; 4448 goto done; 4449 } 4450 if (p != NULL && !chroot_visible_mnt(mp, p)) { 4451 error = ESTALE; 4452 goto done; 4453 } 4454 4455 if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp))) 4456 goto done; 4457 mp = vp->v_mount; 4458 sp = &mp->mnt_vstat; 4459 vput(vp); 4460 if ((error = VFS_STATVFS(mp, sp, td->td_ucred)) != 0) 4461 goto done; 4462 4463 sp->f_flag = 0; 4464 if (mp->mnt_flag & MNT_RDONLY) 4465 sp->f_flag |= ST_RDONLY; 4466 if (mp->mnt_flag & MNT_NOSUID) 4467 sp->f_flag |= ST_NOSUID; 4468 error = copyout(sp, uap->buf, sizeof(*sp)); 4469 done: 4470 return (error); 4471 } 4472 4473 4474 /* 4475 * Syscall to push extended attribute configuration information into the 4476 * VFS. Accepts a path, which it converts to a mountpoint, as well as 4477 * a command (int cmd), and attribute name and misc data. For now, the 4478 * attribute name is left in userspace for consumption by the VFS_op. 4479 * It will probably be changed to be copied into sysspace by the 4480 * syscall in the future, once issues with various consumers of the 4481 * attribute code have raised their hands. 4482 * 4483 * Currently this is used only by UFS Extended Attributes. 4484 */ 4485 int 4486 sys_extattrctl(struct extattrctl_args *uap) 4487 { 4488 struct nlookupdata nd; 4489 struct vnode *vp; 4490 char attrname[EXTATTR_MAXNAMELEN]; 4491 int error; 4492 size_t size; 4493 4494 attrname[0] = 0; 4495 vp = NULL; 4496 error = 0; 4497 4498 if (error == 0 && uap->filename) { 4499 error = nlookup_init(&nd, uap->filename, UIO_USERSPACE, 4500 NLC_FOLLOW); 4501 if (error == 0) 4502 error = nlookup(&nd); 4503 if (error == 0) 4504 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp); 4505 nlookup_done(&nd); 4506 } 4507 4508 if (error == 0 && uap->attrname) { 4509 error = copyinstr(uap->attrname, attrname, EXTATTR_MAXNAMELEN, 4510 &size); 4511 } 4512 4513 if (error == 0) { 4514 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4515 if (error == 0) 4516 error = nlookup(&nd); 4517 if (error == 0) 4518 error = ncp_writechk(&nd.nl_nch); 4519 if (error == 0) { 4520 error = VFS_EXTATTRCTL(nd.nl_nch.mount, uap->cmd, vp, 4521 uap->attrnamespace, 4522 uap->attrname, nd.nl_cred); 4523 } 4524 nlookup_done(&nd); 4525 } 4526 4527 return (error); 4528 } 4529 4530 /* 4531 * Syscall to get a named extended attribute on a file or directory. 4532 */ 4533 int 4534 sys_extattr_set_file(struct extattr_set_file_args *uap) 4535 { 4536 char attrname[EXTATTR_MAXNAMELEN]; 4537 struct nlookupdata nd; 4538 struct vnode *vp; 4539 struct uio auio; 4540 struct iovec aiov; 4541 int error; 4542 4543 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4544 if (error) 4545 return (error); 4546 4547 vp = NULL; 4548 4549 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4550 if (error == 0) 4551 error = nlookup(&nd); 4552 if (error == 0) 4553 error = ncp_writechk(&nd.nl_nch); 4554 if (error == 0) 4555 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4556 if (error) { 4557 nlookup_done(&nd); 4558 return (error); 4559 } 4560 4561 bzero(&auio, sizeof(auio)); 4562 aiov.iov_base = uap->data; 4563 aiov.iov_len = uap->nbytes; 4564 auio.uio_iov = &aiov; 4565 auio.uio_iovcnt = 1; 4566 auio.uio_offset = 0; 4567 auio.uio_resid = uap->nbytes; 4568 auio.uio_rw = UIO_WRITE; 4569 auio.uio_td = curthread; 4570 4571 error = VOP_SETEXTATTR(vp, uap->attrnamespace, attrname, 4572 &auio, nd.nl_cred); 4573 4574 vput(vp); 4575 nlookup_done(&nd); 4576 return (error); 4577 } 4578 4579 /* 4580 * Syscall to get a named extended attribute on a file or directory. 4581 */ 4582 int 4583 sys_extattr_get_file(struct extattr_get_file_args *uap) 4584 { 4585 char attrname[EXTATTR_MAXNAMELEN]; 4586 struct nlookupdata nd; 4587 struct uio auio; 4588 struct iovec aiov; 4589 struct vnode *vp; 4590 int error; 4591 4592 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4593 if (error) 4594 return (error); 4595 4596 vp = NULL; 4597 4598 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4599 if (error == 0) 4600 error = nlookup(&nd); 4601 if (error == 0) 4602 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4603 if (error) { 4604 nlookup_done(&nd); 4605 return (error); 4606 } 4607 4608 bzero(&auio, sizeof(auio)); 4609 aiov.iov_base = uap->data; 4610 aiov.iov_len = uap->nbytes; 4611 auio.uio_iov = &aiov; 4612 auio.uio_iovcnt = 1; 4613 auio.uio_offset = 0; 4614 auio.uio_resid = uap->nbytes; 4615 auio.uio_rw = UIO_READ; 4616 auio.uio_td = curthread; 4617 4618 error = VOP_GETEXTATTR(vp, uap->attrnamespace, attrname, 4619 &auio, nd.nl_cred); 4620 uap->sysmsg_result = uap->nbytes - auio.uio_resid; 4621 4622 vput(vp); 4623 nlookup_done(&nd); 4624 return(error); 4625 } 4626 4627 /* 4628 * Syscall to delete a named extended attribute from a file or directory. 4629 * Accepts attribute name. The real work happens in VOP_SETEXTATTR(). 4630 */ 4631 int 4632 sys_extattr_delete_file(struct extattr_delete_file_args *uap) 4633 { 4634 char attrname[EXTATTR_MAXNAMELEN]; 4635 struct nlookupdata nd; 4636 struct vnode *vp; 4637 int error; 4638 4639 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN); 4640 if (error) 4641 return(error); 4642 4643 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); 4644 if (error == 0) 4645 error = nlookup(&nd); 4646 if (error == 0) 4647 error = ncp_writechk(&nd.nl_nch); 4648 if (error == 0) { 4649 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 4650 if (error == 0) { 4651 error = VOP_SETEXTATTR(vp, uap->attrnamespace, 4652 attrname, NULL, nd.nl_cred); 4653 vput(vp); 4654 } 4655 } 4656 nlookup_done(&nd); 4657 return(error); 4658 } 4659 4660 /* 4661 * Determine if the mount is visible to the process. 4662 */ 4663 static int 4664 chroot_visible_mnt(struct mount *mp, struct proc *p) 4665 { 4666 struct nchandle nch; 4667 4668 /* 4669 * Traverse from the mount point upwards. If we hit the process 4670 * root then the mount point is visible to the process. 4671 */ 4672 nch = mp->mnt_ncmountpt; 4673 while (nch.ncp) { 4674 if (nch.mount == p->p_fd->fd_nrdir.mount && 4675 nch.ncp == p->p_fd->fd_nrdir.ncp) { 4676 return(1); 4677 } 4678 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 4679 nch = nch.mount->mnt_ncmounton; 4680 } else { 4681 nch.ncp = nch.ncp->nc_parent; 4682 } 4683 } 4684 4685 /* 4686 * If the mount point is not visible to the process, but the 4687 * process root is in a subdirectory of the mount, return 4688 * TRUE anyway. 4689 */ 4690 if (p->p_fd->fd_nrdir.mount == mp) 4691 return(1); 4692 4693 return(0); 4694 } 4695 4696