1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/kern_fp.c,v 1.11 2005/07/13 01:38:50 dillon Exp $ 35 */ 36 37 /* 38 * Direct file pointer API functions for in-kernel operations on files. These 39 * functions provide a open/read/write/close like interface within the kernel 40 * for operating on files that are not necessarily associated with processes 41 * and which do not (typically) have descriptors. 42 * 43 * FUTURE: file handle conversion routines to support checkpointing, 44 * and additional file operations (ioctl, fcntl). 45 */ 46 47 #include <sys/param.h> 48 #include <sys/kernel.h> 49 #include <sys/systm.h> 50 #include <sys/malloc.h> 51 #include <sys/sysproto.h> 52 #include <sys/conf.h> 53 #include <sys/filedesc.h> 54 #include <sys/sysctl.h> 55 #include <sys/vnode.h> 56 #include <sys/proc.h> 57 #include <sys/nlookup.h> 58 #include <sys/file.h> 59 #include <sys/stat.h> 60 #include <sys/filio.h> 61 #include <sys/fcntl.h> 62 #include <sys/unistd.h> 63 #include <sys/resourcevar.h> 64 #include <sys/event.h> 65 #include <sys/mman.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <sys/lock.h> 70 #include <vm/pmap.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vm_pageout.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_kern.h> 79 80 #include <sys/file2.h> 81 #include <machine/limits.h> 82 83 typedef struct file *file_t; 84 85 /* 86 * fp_open: 87 * 88 * Open a file as specified. Use O_* flags for flags. 89 * 90 * NOTE! O_ROOTCRED not quite working yet, vn_open() asserts that the 91 * cred must match the process's cred. XXX 92 * 93 * NOTE! when fp_open() is called from a pure thread, root creds are 94 * used. 95 */ 96 int 97 fp_open(const char *path, int flags, int mode, file_t *fpp) 98 { 99 struct nlookupdata nd; 100 struct thread *td; 101 struct file *fp; 102 int error; 103 104 if ((error = falloc(NULL, fpp, NULL)) != 0) 105 return (error); 106 fp = *fpp; 107 td = curthread; 108 if (td->td_proc) { 109 if ((flags & O_ROOTCRED) == 0) 110 fsetcred(fp, td->td_proc->p_ucred); 111 } 112 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_LOCKVP); 113 flags = FFLAGS(flags); 114 if (error == 0) 115 error = vn_open(&nd, fp, flags, mode); 116 nlookup_done(&nd); 117 if (error) { 118 fdrop(fp, td); 119 *fpp = NULL; 120 } 121 return(error); 122 } 123 124 125 /* 126 * fp_vpopen(): convert a vnode to a file pointer, call VOP_OPEN() on the 127 * the vnode. The vnode must be refd and locked. 128 * 129 * On success the vnode's ref is inherited by the file pointer and the caller 130 * should not vrele() it, and the vnode is unlocked. 131 * 132 * On failure the vnode remains locked and refd and the caller is responsible 133 * for vput()ing it. 134 */ 135 int 136 fp_vpopen(struct vnode *vp, int flags, file_t *fpp) 137 { 138 struct thread *td; 139 struct file *fp; 140 int vmode; 141 int error; 142 143 td = curthread; 144 145 /* 146 * Vnode checks (from vn_open()) 147 */ 148 if (vp->v_type == VLNK) { 149 error = EMLINK; 150 goto bad2; 151 } 152 if (vp->v_type == VSOCK) { 153 error = EOPNOTSUPP; 154 goto bad2; 155 } 156 flags = FFLAGS(flags); 157 vmode = 0; 158 if (flags & (FWRITE | O_TRUNC)) { 159 if (vp->v_type == VDIR) { 160 error = EISDIR; 161 goto bad2; 162 } 163 error = vn_writechk(vp); 164 if (error) 165 goto bad2; 166 vmode |= VWRITE; 167 } 168 if (flags & FREAD) 169 vmode |= VREAD; 170 if (vmode) { 171 error = VOP_ACCESS(vp, vmode, td->td_proc->p_ucred, td); 172 if (error) 173 goto bad2; 174 } 175 176 /* 177 * File pointer setup 178 */ 179 if ((error = falloc(NULL, fpp, NULL)) != 0) 180 goto bad2; 181 fp = *fpp; 182 if ((flags & O_ROOTCRED) == 0 && td->td_proc) 183 fsetcred(fp, td->td_proc->p_ucred); 184 fp->f_data = (caddr_t)vp; 185 fp->f_flag = flags; 186 fp->f_ops = &vnode_fileops; 187 fp->f_type = DTYPE_VNODE; 188 189 error = VOP_OPEN(vp, flags, td->td_proc->p_ucred, fp, td); 190 if (error) 191 goto bad1; 192 193 /* 194 * Make sure that a VM object is created for VMIO support. 195 */ 196 if (vn_canvmio(vp) == TRUE) { 197 if ((error = vfs_object_create(vp, td)) != 0) { 198 VOP_CLOSE(vp, flags, td); 199 goto bad1; 200 } 201 } 202 203 /* 204 * All done, update v_writecount now that no more errors can occur. 205 */ 206 if (flags & FWRITE) 207 vp->v_writecount++; 208 VOP_UNLOCK(vp, 0, td); 209 return (0); 210 bad1: 211 fp->f_ops = &badfileops; /* open failed, don't close */ 212 fp->f_data = NULL; 213 fdrop(fp, td); 214 /* leave the vnode intact, but fall through and unlock it anyway */ 215 bad2: 216 *fpp = NULL; 217 return (error); 218 } 219 220 /* 221 * fp_*read() is meant to operate like the normal descriptor based syscalls 222 * would. Note that if 'buf' points to user memory a UIO_USERSPACE 223 * transfer will be used. 224 */ 225 int 226 fp_pread(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res) 227 { 228 struct uio auio; 229 struct iovec aiov; 230 size_t count; 231 int error; 232 233 if (res) 234 *res = 0; 235 if (nbytes > INT_MAX) 236 return (EINVAL); 237 bzero(&auio, sizeof(auio)); 238 aiov.iov_base = (caddr_t)buf; 239 aiov.iov_len = nbytes; 240 auio.uio_iov = &aiov; 241 auio.uio_iovcnt = 1; 242 auio.uio_offset = offset; 243 auio.uio_resid = nbytes; 244 auio.uio_rw = UIO_READ; 245 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS) 246 auio.uio_segflg = UIO_USERSPACE; 247 else 248 auio.uio_segflg = UIO_SYSSPACE; 249 auio.uio_td = curthread; 250 251 count = nbytes; 252 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, auio.uio_td); 253 if (error) { 254 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR || 255 error == EWOULDBLOCK) 256 ) { 257 error = 0; 258 } 259 } 260 count -= auio.uio_resid; 261 if (res) 262 *res = count; 263 return(error); 264 } 265 266 int 267 fp_read(file_t fp, void *buf, size_t nbytes, ssize_t *res, int all) 268 { 269 struct uio auio; 270 struct iovec aiov; 271 int error; 272 int lastresid; 273 274 if (res) 275 *res = 0; 276 if (nbytes > INT_MAX) 277 return (EINVAL); 278 bzero(&auio, sizeof(auio)); 279 aiov.iov_base = (caddr_t)buf; 280 aiov.iov_len = nbytes; 281 auio.uio_iov = &aiov; 282 auio.uio_iovcnt = 1; 283 auio.uio_offset = 0; 284 auio.uio_resid = nbytes; 285 auio.uio_rw = UIO_READ; 286 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS) 287 auio.uio_segflg = UIO_USERSPACE; 288 else 289 auio.uio_segflg = UIO_SYSSPACE; 290 auio.uio_td = curthread; 291 292 /* 293 * If all is false call fo_read() once. 294 * If all is true we attempt to read the entire request. We have to 295 * break out of the loop if an unrecoverable error or EOF occurs. 296 */ 297 do { 298 lastresid = auio.uio_resid; 299 error = fo_read(fp, &auio, fp->f_cred, 0, auio.uio_td); 300 } while (all && auio.uio_resid && 301 ((error == 0 && auio.uio_resid != lastresid) || 302 error == ERESTART || error == EINTR)); 303 if (all && error == 0 && auio.uio_resid) 304 error = ESPIPE; 305 306 /* 307 * If an error occured but some data was read, silently forget the 308 * error. However, if this is a non-blocking descriptor and 'all' 309 * was specified, return an error even if some data was read (this 310 * is considered a bug in the caller for using an illegal combination 311 * of 'all' and a non-blocking descriptor). 312 */ 313 if (error) { 314 if (auio.uio_resid != nbytes) { 315 if (error == ERESTART || error == EINTR) 316 error = 0; 317 if (error == EWOULDBLOCK && all == 0) 318 error = 0; 319 } 320 } 321 if (res) 322 *res = nbytes - auio.uio_resid; 323 return(error); 324 } 325 326 int 327 fp_pwrite(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res) 328 { 329 struct uio auio; 330 struct iovec aiov; 331 size_t count; 332 int error; 333 334 if (res) 335 *res = 0; 336 if (nbytes > INT_MAX) 337 return (EINVAL); 338 bzero(&auio, sizeof(auio)); 339 aiov.iov_base = (caddr_t)buf; 340 aiov.iov_len = nbytes; 341 auio.uio_iov = &aiov; 342 auio.uio_iovcnt = 1; 343 auio.uio_offset = offset; 344 auio.uio_resid = nbytes; 345 auio.uio_rw = UIO_WRITE; 346 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS) 347 auio.uio_segflg = UIO_USERSPACE; 348 else 349 auio.uio_segflg = UIO_SYSSPACE; 350 auio.uio_td = curthread; 351 352 count = nbytes; 353 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, auio.uio_td); 354 if (error) { 355 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR || 356 error == EWOULDBLOCK) 357 ) { 358 error = 0; 359 } 360 } 361 count -= auio.uio_resid; 362 if (res) 363 *res = count; 364 return(error); 365 } 366 367 368 int 369 fp_write(file_t fp, void *buf, size_t nbytes, ssize_t *res) 370 { 371 struct uio auio; 372 struct iovec aiov; 373 size_t count; 374 int error; 375 376 if (res) 377 *res = 0; 378 if (nbytes > INT_MAX) 379 return (EINVAL); 380 bzero(&auio, sizeof(auio)); 381 aiov.iov_base = (caddr_t)buf; 382 aiov.iov_len = nbytes; 383 auio.uio_iov = &aiov; 384 auio.uio_iovcnt = 1; 385 auio.uio_offset = 0; 386 auio.uio_resid = nbytes; 387 auio.uio_rw = UIO_WRITE; 388 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS) 389 auio.uio_segflg = UIO_USERSPACE; 390 else 391 auio.uio_segflg = UIO_SYSSPACE; 392 auio.uio_td = curthread; 393 394 count = nbytes; 395 error = fo_write(fp, &auio, fp->f_cred, 0, auio.uio_td); 396 if (error) { 397 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR || 398 error == EWOULDBLOCK) 399 ) { 400 error = 0; 401 } 402 } 403 count -= auio.uio_resid; 404 if (res) 405 *res = count; 406 return(error); 407 } 408 409 int 410 fp_stat(file_t fp, struct stat *ub) 411 { 412 int error; 413 414 error = fo_stat(fp, ub, curthread); 415 return(error); 416 } 417 418 /* 419 * non-anonymous, non-stack descriptor mappings only! 420 * 421 * This routine mostly snarfed from vm/vm_mmap.c 422 */ 423 int 424 fp_mmap(void *addr_arg, size_t size, int prot, int flags, struct file *fp, 425 off_t pos, void **resp) 426 { 427 struct thread *td = curthread; 428 struct proc *p = td->td_proc; 429 vm_size_t pageoff; 430 vm_prot_t maxprot; 431 vm_offset_t addr; 432 void *handle; 433 int error; 434 vm_object_t obj; 435 struct vmspace *vms = p->p_vmspace; 436 struct vnode *vp; 437 int disablexworkaround; 438 439 prot &= VM_PROT_ALL; 440 441 if ((ssize_t)size < 0 || (flags & MAP_ANON)) 442 return(EINVAL); 443 444 pageoff = (pos & PAGE_MASK); 445 pos -= pageoff; 446 447 /* Adjust size for rounding (on both ends). */ 448 size += pageoff; /* low end... */ 449 size = (vm_size_t)round_page(size); /* hi end */ 450 addr = (vm_offset_t)addr_arg; 451 452 /* 453 * Check for illegal addresses. Watch out for address wrap... Note 454 * that VM_*_ADDRESS are not constants due to casts (argh). 455 */ 456 if (flags & MAP_FIXED) { 457 /* 458 * The specified address must have the same remainder 459 * as the file offset taken modulo PAGE_SIZE, so it 460 * should be aligned after adjustment by pageoff. 461 */ 462 addr -= pageoff; 463 if (addr & PAGE_MASK) 464 return (EINVAL); 465 /* Address range must be all in user VM space. */ 466 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 467 return (EINVAL); 468 #ifndef i386 469 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 470 return (EINVAL); 471 #endif 472 if (addr + size < addr) 473 return (EINVAL); 474 } else if (addr == 0 || 475 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 476 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz)) 477 ) { 478 /* 479 * XXX for non-fixed mappings where no hint is provided or 480 * the hint would fall in the potential heap space, 481 * place it after the end of the largest possible heap. 482 * 483 * There should really be a pmap call to determine a reasonable 484 * location. 485 */ 486 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 487 } 488 489 /* 490 * Mapping file, get fp for validation. Obtain vnode and make 491 * sure it is of appropriate type. 492 */ 493 if (fp->f_type != DTYPE_VNODE) 494 return (EINVAL); 495 496 /* 497 * POSIX shared-memory objects are defined to have 498 * kernel persistence, and are not defined to support 499 * read(2)/write(2) -- or even open(2). Thus, we can 500 * use MAP_ASYNC to trade on-disk coherence for speed. 501 * The shm_open(3) library routine turns on the FPOSIXSHM 502 * flag to request this behavior. 503 */ 504 if (fp->f_flag & FPOSIXSHM) 505 flags |= MAP_NOSYNC; 506 vp = (struct vnode *) fp->f_data; 507 if (vp->v_type != VREG && vp->v_type != VCHR) 508 return (EINVAL); 509 510 /* 511 * Get the proper underlying object 512 */ 513 if (vp->v_type == VREG) { 514 if (VOP_GETVOBJECT(vp, &obj) != 0) 515 return (EINVAL); 516 vp = (struct vnode*)obj->handle; 517 } 518 519 /* 520 * XXX hack to handle use of /dev/zero to map anon memory (ala 521 * SunOS). 522 */ 523 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 524 handle = NULL; 525 maxprot = VM_PROT_ALL; 526 flags |= MAP_ANON; 527 pos = 0; 528 } else { 529 /* 530 * cdevs does not provide private mappings of any kind. 531 */ 532 /* 533 * However, for XIG X server to continue to work, 534 * we should allow the superuser to do it anyway. 535 * We only allow it at securelevel < 1. 536 * (Because the XIG X server writes directly to video 537 * memory via /dev/mem, it should never work at any 538 * other securelevel. 539 * XXX this will have to go 540 */ 541 if (securelevel >= 1) 542 disablexworkaround = 1; 543 else 544 disablexworkaround = suser(td); 545 if (vp->v_type == VCHR && disablexworkaround && 546 (flags & (MAP_PRIVATE|MAP_COPY))) { 547 error = EINVAL; 548 goto done; 549 } 550 /* 551 * Ensure that file and memory protections are 552 * compatible. Note that we only worry about 553 * writability if mapping is shared; in this case, 554 * current and max prot are dictated by the open file. 555 * XXX use the vnode instead? Problem is: what 556 * credentials do we use for determination? What if 557 * proc does a setuid? 558 */ 559 maxprot = VM_PROT_EXECUTE; /* ??? */ 560 if (fp->f_flag & FREAD) { 561 maxprot |= VM_PROT_READ; 562 } else if (prot & PROT_READ) { 563 error = EACCES; 564 goto done; 565 } 566 /* 567 * If we are sharing potential changes (either via 568 * MAP_SHARED or via the implicit sharing of character 569 * device mappings), and we are trying to get write 570 * permission although we opened it without asking 571 * for it, bail out. Check for superuser, only if 572 * we're at securelevel < 1, to allow the XIG X server 573 * to continue to work. 574 */ 575 576 if ((flags & MAP_SHARED) != 0 || 577 (vp->v_type == VCHR && disablexworkaround) 578 ) { 579 if ((fp->f_flag & FWRITE) != 0) { 580 struct vattr va; 581 if ((error = VOP_GETATTR(vp, &va, td))) { 582 goto done; 583 } 584 if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) { 585 maxprot |= VM_PROT_WRITE; 586 } else if (prot & PROT_WRITE) { 587 error = EPERM; 588 goto done; 589 } 590 } else if ((prot & PROT_WRITE) != 0) { 591 error = EACCES; 592 goto done; 593 } 594 } else { 595 maxprot |= VM_PROT_WRITE; 596 } 597 handle = (void *)vp; 598 } 599 error = vm_mmap(&vms->vm_map, &addr, size, prot, 600 maxprot, flags, handle, pos); 601 if (error == 0 && addr_arg) 602 *resp = (void *)addr; 603 done: 604 return (error); 605 } 606 607 int 608 fp_close(file_t fp) 609 { 610 return(fdrop(fp, curthread)); 611 } 612 613 int 614 fp_shutdown(file_t fp, int how) 615 { 616 return(fo_shutdown(fp, how, curthread)); 617 } 618 619