1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 41 * 42 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 43 * $FreeBSD: src/sys/vm/vm_mmap.c,v 1.108.2.6 2002/07/02 20:06:19 dillon Exp $ 44 * $DragonFly: src/sys/vm/vm_mmap.c,v 1.39 2007/04/30 07:18:57 dillon Exp $ 45 */ 46 47 /* 48 * Mapped file (mmap) interface to VM 49 */ 50 51 #include <sys/param.h> 52 #include <sys/kernel.h> 53 #include <sys/systm.h> 54 #include <sys/sysproto.h> 55 #include <sys/filedesc.h> 56 #include <sys/kern_syscall.h> 57 #include <sys/proc.h> 58 #include <sys/priv.h> 59 #include <sys/resource.h> 60 #include <sys/resourcevar.h> 61 #include <sys/vnode.h> 62 #include <sys/fcntl.h> 63 #include <sys/file.h> 64 #include <sys/mman.h> 65 #include <sys/conf.h> 66 #include <sys/stat.h> 67 #include <sys/vmmeter.h> 68 #include <sys/sysctl.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_param.h> 72 #include <sys/lock.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_map.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_page.h> 77 #include <vm/vm_pager.h> 78 #include <vm/vm_pageout.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_kern.h> 82 83 #include <sys/file2.h> 84 #include <sys/thread.h> 85 #include <sys/thread2.h> 86 87 static int max_proc_mmap; 88 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 89 int vkernel_enable; 90 SYSCTL_INT(_vm, OID_AUTO, vkernel_enable, CTLFLAG_RW, &vkernel_enable, 0, ""); 91 92 /* 93 * Set the maximum number of vm_map_entry structures per process. Roughly 94 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 95 * of our KVM malloc space still results in generous limits. We want a 96 * default that is good enough to prevent the kernel running out of resources 97 * if attacked from compromised user account but generous enough such that 98 * multi-threaded processes are not unduly inconvenienced. 99 */ 100 101 static void vmmapentry_rsrc_init (void *); 102 SYSINIT(vmmersrc, SI_BOOT1_POST, SI_ORDER_ANY, vmmapentry_rsrc_init, NULL) 103 104 static void 105 vmmapentry_rsrc_init(void *dummy) 106 { 107 max_proc_mmap = KvaSize / sizeof(struct vm_map_entry); 108 max_proc_mmap /= 100; 109 } 110 111 /* 112 * MPSAFE 113 */ 114 int 115 sys_sbrk(struct sbrk_args *uap) 116 { 117 /* Not yet implemented */ 118 return (EOPNOTSUPP); 119 } 120 121 /* 122 * sstk_args(int incr) 123 * 124 * MPSAFE 125 */ 126 int 127 sys_sstk(struct sstk_args *uap) 128 { 129 /* Not yet implemented */ 130 return (EOPNOTSUPP); 131 } 132 133 /* 134 * mmap_args(void *addr, size_t len, int prot, int flags, int fd, 135 * long pad, off_t pos) 136 * 137 * Memory Map (mmap) system call. Note that the file offset 138 * and address are allowed to be NOT page aligned, though if 139 * the MAP_FIXED flag it set, both must have the same remainder 140 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 141 * page-aligned, the actual mapping starts at trunc_page(addr) 142 * and the return value is adjusted up by the page offset. 143 * 144 * Generally speaking, only character devices which are themselves 145 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 146 * there would be no cache coherency between a descriptor and a VM mapping 147 * both to the same character device. 148 * 149 * Block devices can be mmap'd no matter what they represent. Cache coherency 150 * is maintained as long as you do not write directly to the underlying 151 * character device. 152 * 153 * No requirements; sys_mmap path holds the vm_token 154 */ 155 int 156 kern_mmap(struct vmspace *vms, caddr_t uaddr, size_t ulen, 157 int uprot, int uflags, int fd, off_t upos, void **res) 158 { 159 struct thread *td = curthread; 160 struct proc *p = td->td_proc; 161 struct file *fp = NULL; 162 struct vnode *vp; 163 vm_offset_t addr; 164 vm_offset_t tmpaddr; 165 vm_size_t size, pageoff; 166 vm_prot_t prot, maxprot; 167 void *handle; 168 int flags, error; 169 off_t pos; 170 vm_object_t obj; 171 172 KKASSERT(p); 173 174 addr = (vm_offset_t) uaddr; 175 size = ulen; 176 prot = uprot & VM_PROT_ALL; 177 flags = uflags; 178 pos = upos; 179 180 /* 181 * Make sure mapping fits into numeric range etc. 182 * 183 * NOTE: We support the full unsigned range for size now. 184 */ 185 if (((flags & MAP_ANON) && (fd != -1 || pos != 0))) 186 return (EINVAL); 187 188 if (flags & MAP_STACK) { 189 if ((fd != -1) || 190 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 191 return (EINVAL); 192 flags |= MAP_ANON; 193 pos = 0; 194 } 195 196 /* 197 * Virtual page tables cannot be used with MAP_STACK. Apart from 198 * it not making any sense, the aux union is used by both 199 * types. 200 * 201 * Because the virtual page table is stored in the backing object 202 * and might be updated by the kernel, the mapping must be R+W. 203 */ 204 if (flags & MAP_VPAGETABLE) { 205 if (vkernel_enable == 0) 206 return (EOPNOTSUPP); 207 if (flags & MAP_STACK) 208 return (EINVAL); 209 if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) 210 return (EINVAL); 211 } 212 213 /* 214 * Align the file position to a page boundary, 215 * and save its page offset component. 216 */ 217 pageoff = (pos & PAGE_MASK); 218 pos -= pageoff; 219 220 /* Adjust size for rounding (on both ends). */ 221 size += pageoff; /* low end... */ 222 size = (vm_size_t) round_page(size); /* hi end */ 223 if (size < ulen) /* wrap */ 224 return(EINVAL); 225 226 /* 227 * Check for illegal addresses. Watch out for address wrap... Note 228 * that VM_*_ADDRESS are not constants due to casts (argh). 229 */ 230 if (flags & (MAP_FIXED | MAP_TRYFIXED)) { 231 /* 232 * The specified address must have the same remainder 233 * as the file offset taken modulo PAGE_SIZE, so it 234 * should be aligned after adjustment by pageoff. 235 */ 236 addr -= pageoff; 237 if (addr & PAGE_MASK) 238 return (EINVAL); 239 240 /* 241 * Address range must be all in user VM space and not wrap. 242 */ 243 tmpaddr = addr + size; 244 if (tmpaddr < addr) 245 return (EINVAL); 246 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 247 return (EINVAL); 248 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 249 return (EINVAL); 250 } else { 251 /* 252 * Set a reasonable start point for the hint if it was 253 * not specified or if it falls within the heap space. 254 * Hinted mmap()s do not allocate out of the heap space. 255 */ 256 if (addr == 0 || 257 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 258 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) 259 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 260 } 261 262 if (flags & MAP_ANON) { 263 /* 264 * Mapping blank space is trivial. 265 */ 266 handle = NULL; 267 maxprot = VM_PROT_ALL; 268 } else { 269 /* 270 * Mapping file, get fp for validation. Obtain vnode and make 271 * sure it is of appropriate type. 272 */ 273 fp = holdfp(p->p_fd, fd, -1); 274 if (fp == NULL) 275 return (EBADF); 276 if (fp->f_type != DTYPE_VNODE) { 277 error = EINVAL; 278 goto done; 279 } 280 /* 281 * POSIX shared-memory objects are defined to have 282 * kernel persistence, and are not defined to support 283 * read(2)/write(2) -- or even open(2). Thus, we can 284 * use MAP_ASYNC to trade on-disk coherence for speed. 285 * The shm_open(3) library routine turns on the FPOSIXSHM 286 * flag to request this behavior. 287 */ 288 if (fp->f_flag & FPOSIXSHM) 289 flags |= MAP_NOSYNC; 290 vp = (struct vnode *) fp->f_data; 291 292 /* 293 * Validate the vnode for the operation. 294 */ 295 switch(vp->v_type) { 296 case VREG: 297 /* 298 * Get the proper underlying object 299 */ 300 if ((obj = vp->v_object) == NULL) { 301 error = EINVAL; 302 goto done; 303 } 304 KKASSERT((struct vnode *)obj->handle == vp); 305 break; 306 case VCHR: 307 /* 308 * Make sure a device has not been revoked. 309 * Mappability is handled by the device layer. 310 */ 311 if (vp->v_rdev == NULL) { 312 error = EBADF; 313 goto done; 314 } 315 break; 316 default: 317 /* 318 * Nothing else is mappable. 319 */ 320 error = EINVAL; 321 goto done; 322 } 323 324 /* 325 * XXX hack to handle use of /dev/zero to map anon memory (ala 326 * SunOS). 327 */ 328 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 329 handle = NULL; 330 maxprot = VM_PROT_ALL; 331 flags |= MAP_ANON; 332 pos = 0; 333 } else { 334 /* 335 * cdevs does not provide private mappings of any kind. 336 */ 337 if (vp->v_type == VCHR && 338 (flags & (MAP_PRIVATE|MAP_COPY))) { 339 error = EINVAL; 340 goto done; 341 } 342 /* 343 * Ensure that file and memory protections are 344 * compatible. Note that we only worry about 345 * writability if mapping is shared; in this case, 346 * current and max prot are dictated by the open file. 347 * XXX use the vnode instead? Problem is: what 348 * credentials do we use for determination? What if 349 * proc does a setuid? 350 */ 351 maxprot = VM_PROT_EXECUTE; /* ??? */ 352 if (fp->f_flag & FREAD) { 353 maxprot |= VM_PROT_READ; 354 } else if (prot & PROT_READ) { 355 error = EACCES; 356 goto done; 357 } 358 /* 359 * If we are sharing potential changes (either via 360 * MAP_SHARED or via the implicit sharing of character 361 * device mappings), and we are trying to get write 362 * permission although we opened it without asking 363 * for it, bail out. Check for superuser, only if 364 * we're at securelevel < 1, to allow the XIG X server 365 * to continue to work. 366 */ 367 if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) { 368 if ((fp->f_flag & FWRITE) != 0) { 369 struct vattr va; 370 if ((error = VOP_GETATTR(vp, &va))) { 371 goto done; 372 } 373 if ((va.va_flags & 374 (IMMUTABLE|APPEND)) == 0) { 375 maxprot |= VM_PROT_WRITE; 376 } else if (prot & PROT_WRITE) { 377 error = EPERM; 378 goto done; 379 } 380 } else if ((prot & PROT_WRITE) != 0) { 381 error = EACCES; 382 goto done; 383 } 384 } else { 385 maxprot |= VM_PROT_WRITE; 386 } 387 handle = (void *)vp; 388 } 389 } 390 391 /* Token serializes access to vm_map.nentries against vm_mmap */ 392 lwkt_gettoken(&vm_token); 393 394 /* 395 * Do not allow more then a certain number of vm_map_entry structures 396 * per process. Scale with the number of rforks sharing the map 397 * to make the limit reasonable for threads. 398 */ 399 if (max_proc_mmap && 400 vms->vm_map.nentries >= max_proc_mmap * vms->vm_sysref.refcnt) { 401 error = ENOMEM; 402 lwkt_reltoken(&vm_token); 403 goto done; 404 } 405 406 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 407 flags, handle, pos); 408 if (error == 0) 409 *res = (void *)(addr + pageoff); 410 411 lwkt_reltoken(&vm_token); 412 done: 413 if (fp) 414 fdrop(fp); 415 416 return (error); 417 } 418 419 /* 420 * mmap system call handler 421 * 422 * No requirements. 423 */ 424 int 425 sys_mmap(struct mmap_args *uap) 426 { 427 int error; 428 429 error = kern_mmap(curproc->p_vmspace, uap->addr, uap->len, 430 uap->prot, uap->flags, 431 uap->fd, uap->pos, &uap->sysmsg_resultp); 432 433 return (error); 434 } 435 436 /* 437 * msync system call handler 438 * 439 * msync_args(void *addr, size_t len, int flags) 440 * 441 * No requirements 442 */ 443 int 444 sys_msync(struct msync_args *uap) 445 { 446 struct proc *p = curproc; 447 vm_offset_t addr; 448 vm_offset_t tmpaddr; 449 vm_size_t size, pageoff; 450 int flags; 451 vm_map_t map; 452 int rv; 453 454 addr = (vm_offset_t) uap->addr; 455 size = uap->len; 456 flags = uap->flags; 457 458 pageoff = (addr & PAGE_MASK); 459 addr -= pageoff; 460 size += pageoff; 461 size = (vm_size_t) round_page(size); 462 if (size < uap->len) /* wrap */ 463 return(EINVAL); 464 tmpaddr = addr + size; /* workaround gcc4 opt */ 465 if (tmpaddr < addr) /* wrap */ 466 return(EINVAL); 467 468 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 469 return (EINVAL); 470 471 map = &p->p_vmspace->vm_map; 472 473 /* 474 * vm_token serializes extracting the address range for size == 0 475 * msyncs with the vm_map_clean call; if the token were not held 476 * across the two calls, an intervening munmap/mmap pair, for example, 477 * could cause msync to occur on a wrong region. 478 */ 479 lwkt_gettoken(&vm_token); 480 481 /* 482 * XXX Gak! If size is zero we are supposed to sync "all modified 483 * pages with the region containing addr". Unfortunately, we don't 484 * really keep track of individual mmaps so we approximate by flushing 485 * the range of the map entry containing addr. This can be incorrect 486 * if the region splits or is coalesced with a neighbor. 487 */ 488 if (size == 0) { 489 vm_map_entry_t entry; 490 491 vm_map_lock_read(map); 492 rv = vm_map_lookup_entry(map, addr, &entry); 493 if (rv == FALSE) { 494 vm_map_unlock_read(map); 495 rv = KERN_INVALID_ADDRESS; 496 goto done; 497 } 498 addr = entry->start; 499 size = entry->end - entry->start; 500 vm_map_unlock_read(map); 501 } 502 503 /* 504 * Clean the pages and interpret the return value. 505 */ 506 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 507 (flags & MS_INVALIDATE) != 0); 508 done: 509 lwkt_reltoken(&vm_token); 510 511 switch (rv) { 512 case KERN_SUCCESS: 513 break; 514 case KERN_INVALID_ADDRESS: 515 return (EINVAL); /* Sun returns ENOMEM? */ 516 case KERN_FAILURE: 517 return (EIO); 518 default: 519 return (EINVAL); 520 } 521 522 return (0); 523 } 524 525 /* 526 * munmap system call handler 527 * 528 * munmap_args(void *addr, size_t len) 529 * 530 * No requirements 531 */ 532 int 533 sys_munmap(struct munmap_args *uap) 534 { 535 struct proc *p = curproc; 536 vm_offset_t addr; 537 vm_offset_t tmpaddr; 538 vm_size_t size, pageoff; 539 vm_map_t map; 540 541 addr = (vm_offset_t) uap->addr; 542 size = uap->len; 543 544 pageoff = (addr & PAGE_MASK); 545 addr -= pageoff; 546 size += pageoff; 547 size = (vm_size_t) round_page(size); 548 if (size < uap->len) /* wrap */ 549 return(EINVAL); 550 tmpaddr = addr + size; /* workaround gcc4 opt */ 551 if (tmpaddr < addr) /* wrap */ 552 return(EINVAL); 553 554 if (size == 0) 555 return (0); 556 557 /* 558 * Check for illegal addresses. Watch out for address wrap... Note 559 * that VM_*_ADDRESS are not constants due to casts (argh). 560 */ 561 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 562 return (EINVAL); 563 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 564 return (EINVAL); 565 566 map = &p->p_vmspace->vm_map; 567 568 /* vm_token serializes between the map check and the actual unmap */ 569 lwkt_gettoken(&vm_token); 570 571 /* 572 * Make sure entire range is allocated. 573 */ 574 if (!vm_map_check_protection(map, addr, addr + size, 575 VM_PROT_NONE, FALSE)) { 576 lwkt_reltoken(&vm_token); 577 return (EINVAL); 578 } 579 /* returns nothing but KERN_SUCCESS anyway */ 580 vm_map_remove(map, addr, addr + size); 581 lwkt_reltoken(&vm_token); 582 return (0); 583 } 584 585 /* 586 * mprotect_args(const void *addr, size_t len, int prot) 587 * 588 * No requirements. 589 */ 590 int 591 sys_mprotect(struct mprotect_args *uap) 592 { 593 struct proc *p = curproc; 594 vm_offset_t addr; 595 vm_offset_t tmpaddr; 596 vm_size_t size, pageoff; 597 vm_prot_t prot; 598 int error; 599 600 addr = (vm_offset_t) uap->addr; 601 size = uap->len; 602 prot = uap->prot & VM_PROT_ALL; 603 #if defined(VM_PROT_READ_IS_EXEC) 604 if (prot & VM_PROT_READ) 605 prot |= VM_PROT_EXECUTE; 606 #endif 607 608 pageoff = (addr & PAGE_MASK); 609 addr -= pageoff; 610 size += pageoff; 611 size = (vm_size_t) round_page(size); 612 if (size < uap->len) /* wrap */ 613 return(EINVAL); 614 tmpaddr = addr + size; /* workaround gcc4 opt */ 615 if (tmpaddr < addr) /* wrap */ 616 return(EINVAL); 617 618 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, 619 prot, FALSE)) { 620 case KERN_SUCCESS: 621 error = 0; 622 break; 623 case KERN_PROTECTION_FAILURE: 624 error = EACCES; 625 break; 626 default: 627 error = EINVAL; 628 break; 629 } 630 return (error); 631 } 632 633 /* 634 * minherit system call handler 635 * 636 * minherit_args(void *addr, size_t len, int inherit) 637 * 638 * No requirements. 639 */ 640 int 641 sys_minherit(struct minherit_args *uap) 642 { 643 struct proc *p = curproc; 644 vm_offset_t addr; 645 vm_offset_t tmpaddr; 646 vm_size_t size, pageoff; 647 vm_inherit_t inherit; 648 int error; 649 650 addr = (vm_offset_t)uap->addr; 651 size = uap->len; 652 inherit = uap->inherit; 653 654 pageoff = (addr & PAGE_MASK); 655 addr -= pageoff; 656 size += pageoff; 657 size = (vm_size_t) round_page(size); 658 if (size < uap->len) /* wrap */ 659 return(EINVAL); 660 tmpaddr = addr + size; /* workaround gcc4 opt */ 661 if (tmpaddr < addr) /* wrap */ 662 return(EINVAL); 663 664 switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, 665 addr + size, inherit)) { 666 case KERN_SUCCESS: 667 error = 0; 668 break; 669 case KERN_PROTECTION_FAILURE: 670 error = EACCES; 671 break; 672 default: 673 error = EINVAL; 674 break; 675 } 676 return (error); 677 } 678 679 /* 680 * madvise system call handler 681 * 682 * madvise_args(void *addr, size_t len, int behav) 683 * 684 * No requirements. 685 */ 686 int 687 sys_madvise(struct madvise_args *uap) 688 { 689 struct proc *p = curproc; 690 vm_offset_t start, end; 691 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 692 int error; 693 694 /* 695 * Check for illegal behavior 696 */ 697 if (uap->behav < 0 || uap->behav >= MADV_CONTROL_END) 698 return (EINVAL); 699 /* 700 * Check for illegal addresses. Watch out for address wrap... Note 701 * that VM_*_ADDRESS are not constants due to casts (argh). 702 */ 703 if (tmpaddr < (vm_offset_t)uap->addr) 704 return (EINVAL); 705 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 706 return (EINVAL); 707 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 708 return (EINVAL); 709 710 /* 711 * Since this routine is only advisory, we default to conservative 712 * behavior. 713 */ 714 start = trunc_page((vm_offset_t)uap->addr); 715 end = round_page(tmpaddr); 716 717 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 718 uap->behav, 0); 719 return (error); 720 } 721 722 /* 723 * mcontrol system call handler 724 * 725 * mcontrol_args(void *addr, size_t len, int behav, off_t value) 726 * 727 * No requirements 728 */ 729 int 730 sys_mcontrol(struct mcontrol_args *uap) 731 { 732 struct proc *p = curproc; 733 vm_offset_t start, end; 734 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 735 int error; 736 737 /* 738 * Check for illegal behavior 739 */ 740 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) 741 return (EINVAL); 742 /* 743 * Check for illegal addresses. Watch out for address wrap... Note 744 * that VM_*_ADDRESS are not constants due to casts (argh). 745 */ 746 if (tmpaddr < (vm_offset_t) uap->addr) 747 return (EINVAL); 748 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 749 return (EINVAL); 750 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 751 return (EINVAL); 752 753 /* 754 * Since this routine is only advisory, we default to conservative 755 * behavior. 756 */ 757 start = trunc_page((vm_offset_t)uap->addr); 758 end = round_page(tmpaddr); 759 760 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 761 uap->behav, uap->value); 762 return (error); 763 } 764 765 766 /* 767 * mincore system call handler 768 * 769 * mincore_args(const void *addr, size_t len, char *vec) 770 * 771 * No requirements 772 */ 773 int 774 sys_mincore(struct mincore_args *uap) 775 { 776 struct proc *p = curproc; 777 vm_offset_t addr, first_addr; 778 vm_offset_t end, cend; 779 pmap_t pmap; 780 vm_map_t map; 781 char *vec; 782 int error; 783 int vecindex, lastvecindex; 784 vm_map_entry_t current; 785 vm_map_entry_t entry; 786 int mincoreinfo; 787 unsigned int timestamp; 788 789 /* 790 * Make sure that the addresses presented are valid for user 791 * mode. 792 */ 793 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 794 end = addr + (vm_size_t)round_page(uap->len); 795 if (end < addr) 796 return (EINVAL); 797 if (VM_MAX_USER_ADDRESS > 0 && end > VM_MAX_USER_ADDRESS) 798 return (EINVAL); 799 800 /* 801 * Address of byte vector 802 */ 803 vec = uap->vec; 804 805 map = &p->p_vmspace->vm_map; 806 pmap = vmspace_pmap(p->p_vmspace); 807 808 lwkt_gettoken(&vm_token); 809 vm_map_lock_read(map); 810 RestartScan: 811 timestamp = map->timestamp; 812 813 if (!vm_map_lookup_entry(map, addr, &entry)) 814 entry = entry->next; 815 816 /* 817 * Do this on a map entry basis so that if the pages are not 818 * in the current processes address space, we can easily look 819 * up the pages elsewhere. 820 */ 821 lastvecindex = -1; 822 for(current = entry; 823 (current != &map->header) && (current->start < end); 824 current = current->next) { 825 826 /* 827 * ignore submaps (for now) or null objects 828 */ 829 if (current->maptype != VM_MAPTYPE_NORMAL && 830 current->maptype != VM_MAPTYPE_VPAGETABLE) { 831 continue; 832 } 833 if (current->object.vm_object == NULL) 834 continue; 835 836 /* 837 * limit this scan to the current map entry and the 838 * limits for the mincore call 839 */ 840 if (addr < current->start) 841 addr = current->start; 842 cend = current->end; 843 if (cend > end) 844 cend = end; 845 846 /* 847 * scan this entry one page at a time 848 */ 849 while (addr < cend) { 850 /* 851 * Check pmap first, it is likely faster, also 852 * it can provide info as to whether we are the 853 * one referencing or modifying the page. 854 * 855 * If we have to check the VM object, only mess 856 * around with normal maps. Do not mess around 857 * with virtual page tables (XXX). 858 */ 859 mincoreinfo = pmap_mincore(pmap, addr); 860 if (mincoreinfo == 0 && 861 current->maptype == VM_MAPTYPE_NORMAL) { 862 vm_pindex_t pindex; 863 vm_ooffset_t offset; 864 vm_page_t m; 865 866 /* 867 * calculate the page index into the object 868 */ 869 offset = current->offset + (addr - current->start); 870 pindex = OFF_TO_IDX(offset); 871 872 /* 873 * if the page is resident, then gather 874 * information about it. spl protection is 875 * required to maintain the object 876 * association. And XXX what if the page is 877 * busy? What's the deal with that? 878 */ 879 crit_enter(); 880 m = vm_page_lookup(current->object.vm_object, 881 pindex); 882 if (m && m->valid) { 883 mincoreinfo = MINCORE_INCORE; 884 if (m->dirty || 885 pmap_is_modified(m)) 886 mincoreinfo |= MINCORE_MODIFIED_OTHER; 887 if ((m->flags & PG_REFERENCED) || 888 pmap_ts_referenced(m)) { 889 vm_page_flag_set(m, PG_REFERENCED); 890 mincoreinfo |= MINCORE_REFERENCED_OTHER; 891 } 892 } 893 crit_exit(); 894 } 895 896 /* 897 * subyte may page fault. In case it needs to modify 898 * the map, we release the lock. 899 */ 900 vm_map_unlock_read(map); 901 902 /* 903 * calculate index into user supplied byte vector 904 */ 905 vecindex = OFF_TO_IDX(addr - first_addr); 906 907 /* 908 * If we have skipped map entries, we need to make sure that 909 * the byte vector is zeroed for those skipped entries. 910 */ 911 while((lastvecindex + 1) < vecindex) { 912 error = subyte( vec + lastvecindex, 0); 913 if (error) { 914 error = EFAULT; 915 goto done; 916 } 917 ++lastvecindex; 918 } 919 920 /* 921 * Pass the page information to the user 922 */ 923 error = subyte( vec + vecindex, mincoreinfo); 924 if (error) { 925 error = EFAULT; 926 goto done; 927 } 928 929 /* 930 * If the map has changed, due to the subyte, the previous 931 * output may be invalid. 932 */ 933 vm_map_lock_read(map); 934 if (timestamp != map->timestamp) 935 goto RestartScan; 936 937 lastvecindex = vecindex; 938 addr += PAGE_SIZE; 939 } 940 } 941 942 /* 943 * subyte may page fault. In case it needs to modify 944 * the map, we release the lock. 945 */ 946 vm_map_unlock_read(map); 947 948 /* 949 * Zero the last entries in the byte vector. 950 */ 951 vecindex = OFF_TO_IDX(end - first_addr); 952 while((lastvecindex + 1) < vecindex) { 953 error = subyte( vec + lastvecindex, 0); 954 if (error) { 955 error = EFAULT; 956 goto done; 957 } 958 ++lastvecindex; 959 } 960 961 /* 962 * If the map has changed, due to the subyte, the previous 963 * output may be invalid. 964 */ 965 vm_map_lock_read(map); 966 if (timestamp != map->timestamp) 967 goto RestartScan; 968 vm_map_unlock_read(map); 969 970 error = 0; 971 done: 972 lwkt_reltoken(&vm_token); 973 return (error); 974 } 975 976 /* 977 * mlock system call handler 978 * 979 * mlock_args(const void *addr, size_t len) 980 * 981 * No requirements 982 */ 983 int 984 sys_mlock(struct mlock_args *uap) 985 { 986 vm_offset_t addr; 987 vm_offset_t tmpaddr; 988 vm_size_t size, pageoff; 989 struct thread *td = curthread; 990 struct proc *p = td->td_proc; 991 int error; 992 993 addr = (vm_offset_t) uap->addr; 994 size = uap->len; 995 996 pageoff = (addr & PAGE_MASK); 997 addr -= pageoff; 998 size += pageoff; 999 size = (vm_size_t) round_page(size); 1000 if (size < uap->len) /* wrap */ 1001 return(EINVAL); 1002 tmpaddr = addr + size; /* workaround gcc4 opt */ 1003 if (tmpaddr < addr) /* wrap */ 1004 return (EINVAL); 1005 1006 if (atop(size) + vmstats.v_wire_count > vm_page_max_wired) 1007 return (EAGAIN); 1008 1009 /* 1010 * We do not need to synchronize against other threads updating ucred; 1011 * they update p->ucred, which is synchronized into td_ucred ourselves. 1012 */ 1013 #ifdef pmap_wired_count 1014 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 1015 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) { 1016 return (ENOMEM); 1017 } 1018 #else 1019 error = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1020 if (error) { 1021 return (error); 1022 } 1023 #endif 1024 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 1025 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1026 } 1027 1028 /* 1029 * mlockall_args(int how) 1030 * 1031 * Dummy routine, doesn't actually do anything. 1032 * 1033 * No requirements 1034 */ 1035 int 1036 sys_mlockall(struct mlockall_args *uap) 1037 { 1038 return (ENOSYS); 1039 } 1040 1041 /* 1042 * munlockall_args(void) 1043 * 1044 * Dummy routine, doesn't actually do anything. 1045 * 1046 * No requirements 1047 */ 1048 int 1049 sys_munlockall(struct munlockall_args *uap) 1050 { 1051 return (ENOSYS); 1052 } 1053 1054 /* 1055 * munlock system call handler 1056 * 1057 * munlock_args(const void *addr, size_t len) 1058 * 1059 * No requirements 1060 */ 1061 int 1062 sys_munlock(struct munlock_args *uap) 1063 { 1064 struct thread *td = curthread; 1065 struct proc *p = td->td_proc; 1066 vm_offset_t addr; 1067 vm_offset_t tmpaddr; 1068 vm_size_t size, pageoff; 1069 int error; 1070 1071 addr = (vm_offset_t) uap->addr; 1072 size = uap->len; 1073 1074 pageoff = (addr & PAGE_MASK); 1075 addr -= pageoff; 1076 size += pageoff; 1077 size = (vm_size_t) round_page(size); 1078 1079 tmpaddr = addr + size; 1080 if (tmpaddr < addr) /* wrap */ 1081 return (EINVAL); 1082 1083 #ifndef pmap_wired_count 1084 error = priv_check(td, PRIV_ROOT); 1085 if (error) 1086 return (error); 1087 #endif 1088 1089 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 1090 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1091 } 1092 1093 /* 1094 * Internal version of mmap. 1095 * Currently used by mmap, exec, and sys5 shared memory. 1096 * Handle is either a vnode pointer or NULL for MAP_ANON. 1097 * 1098 * No requirements; kern_mmap path holds the vm_token 1099 */ 1100 int 1101 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1102 vm_prot_t maxprot, int flags, void *handle, vm_ooffset_t foff) 1103 { 1104 boolean_t fitit; 1105 vm_object_t object; 1106 vm_offset_t eaddr; 1107 vm_size_t esize; 1108 struct vnode *vp; 1109 struct thread *td = curthread; 1110 struct proc *p; 1111 int rv = KERN_SUCCESS; 1112 off_t objsize; 1113 int docow; 1114 1115 if (size == 0) 1116 return (0); 1117 1118 objsize = round_page(size); 1119 if (objsize < size) 1120 return (EINVAL); 1121 size = objsize; 1122 1123 lwkt_gettoken(&vm_token); 1124 1125 /* 1126 * XXX messy code, fixme 1127 * 1128 * NOTE: Overflow checks require discrete statements or GCC4 1129 * will optimize it out. 1130 */ 1131 if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) { 1132 esize = map->size + size; /* workaround gcc4 opt */ 1133 if (esize < map->size || 1134 esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1135 lwkt_reltoken(&vm_token); 1136 return(ENOMEM); 1137 } 1138 } 1139 1140 /* 1141 * We currently can only deal with page aligned file offsets. 1142 * The check is here rather than in the syscall because the 1143 * kernel calls this function internally for other mmaping 1144 * operations (such as in exec) and non-aligned offsets will 1145 * cause pmap inconsistencies...so we want to be sure to 1146 * disallow this in all cases. 1147 * 1148 * NOTE: Overflow checks require discrete statements or GCC4 1149 * will optimize it out. 1150 */ 1151 if (foff & PAGE_MASK) { 1152 lwkt_reltoken(&vm_token); 1153 return (EINVAL); 1154 } 1155 1156 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 1157 fitit = TRUE; 1158 *addr = round_page(*addr); 1159 } else { 1160 if (*addr != trunc_page(*addr)) { 1161 lwkt_reltoken(&vm_token); 1162 return (EINVAL); 1163 } 1164 eaddr = *addr + size; 1165 if (eaddr < *addr) { 1166 lwkt_reltoken(&vm_token); 1167 return (EINVAL); 1168 } 1169 fitit = FALSE; 1170 if ((flags & MAP_TRYFIXED) == 0) 1171 vm_map_remove(map, *addr, *addr + size); 1172 } 1173 1174 /* 1175 * Lookup/allocate object. 1176 */ 1177 if (flags & MAP_ANON) { 1178 /* 1179 * Unnamed anonymous regions always start at 0. 1180 */ 1181 if (handle) { 1182 /* 1183 * Default memory object 1184 */ 1185 object = default_pager_alloc(handle, objsize, 1186 prot, foff); 1187 if (object == NULL) { 1188 lwkt_reltoken(&vm_token); 1189 return(ENOMEM); 1190 } 1191 docow = MAP_PREFAULT_PARTIAL; 1192 } else { 1193 /* 1194 * Implicit single instance of a default memory 1195 * object, so we don't need a VM object yet. 1196 */ 1197 foff = 0; 1198 object = NULL; 1199 docow = 0; 1200 } 1201 vp = NULL; 1202 } else { 1203 vp = (struct vnode *)handle; 1204 if (vp->v_type == VCHR) { 1205 /* 1206 * Device mappings (device size unknown?). 1207 * Force them to be shared. 1208 */ 1209 handle = (void *)(intptr_t)vp->v_rdev; 1210 object = dev_pager_alloc(handle, objsize, prot, foff); 1211 if (object == NULL) { 1212 lwkt_reltoken(&vm_token); 1213 return(EINVAL); 1214 } 1215 docow = MAP_PREFAULT_PARTIAL; 1216 flags &= ~(MAP_PRIVATE|MAP_COPY); 1217 flags |= MAP_SHARED; 1218 } else { 1219 /* 1220 * Regular file mapping (typically). The attribute 1221 * check is for the link count test only. Mmapble 1222 * vnodes must already have a VM object assigned. 1223 */ 1224 struct vattr vat; 1225 int error; 1226 1227 error = VOP_GETATTR(vp, &vat); 1228 if (error) { 1229 lwkt_reltoken(&vm_token); 1230 return (error); 1231 } 1232 docow = MAP_PREFAULT_PARTIAL; 1233 object = vnode_pager_reference(vp); 1234 if (object == NULL && vp->v_type == VREG) { 1235 lwkt_reltoken(&vm_token); 1236 kprintf("Warning: cannot mmap vnode %p, no " 1237 "object\n", vp); 1238 return(EINVAL); 1239 } 1240 1241 /* 1242 * If it is a regular file without any references 1243 * we do not need to sync it. 1244 */ 1245 if (vp->v_type == VREG && vat.va_nlink == 0) { 1246 flags |= MAP_NOSYNC; 1247 } 1248 } 1249 } 1250 1251 /* 1252 * Deal with the adjusted flags 1253 */ 1254 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1255 docow |= MAP_COPY_ON_WRITE; 1256 if (flags & MAP_NOSYNC) 1257 docow |= MAP_DISABLE_SYNCER; 1258 if (flags & MAP_NOCORE) 1259 docow |= MAP_DISABLE_COREDUMP; 1260 1261 #if defined(VM_PROT_READ_IS_EXEC) 1262 if (prot & VM_PROT_READ) 1263 prot |= VM_PROT_EXECUTE; 1264 1265 if (maxprot & VM_PROT_READ) 1266 maxprot |= VM_PROT_EXECUTE; 1267 #endif 1268 1269 /* 1270 * This may place the area in its own page directory if (size) is 1271 * large enough, otherwise it typically returns its argument. 1272 */ 1273 if (fitit) { 1274 *addr = pmap_addr_hint(object, *addr, size); 1275 } 1276 1277 /* 1278 * Stack mappings need special attention. 1279 * 1280 * Mappings that use virtual page tables will default to storing 1281 * the page table at offset 0. 1282 */ 1283 if (flags & MAP_STACK) { 1284 rv = vm_map_stack(map, *addr, size, flags, 1285 prot, maxprot, docow); 1286 } else if (flags & MAP_VPAGETABLE) { 1287 rv = vm_map_find(map, object, foff, addr, size, PAGE_SIZE, 1288 fitit, VM_MAPTYPE_VPAGETABLE, 1289 prot, maxprot, docow); 1290 } else { 1291 rv = vm_map_find(map, object, foff, addr, size, PAGE_SIZE, 1292 fitit, VM_MAPTYPE_NORMAL, 1293 prot, maxprot, docow); 1294 } 1295 1296 if (rv != KERN_SUCCESS) { 1297 /* 1298 * Lose the object reference. Will destroy the 1299 * object if it's an unnamed anonymous mapping 1300 * or named anonymous without other references. 1301 */ 1302 vm_object_deallocate(object); 1303 goto out; 1304 } 1305 1306 /* 1307 * Shared memory is also shared with children. 1308 */ 1309 if (flags & (MAP_SHARED|MAP_INHERIT)) { 1310 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1311 if (rv != KERN_SUCCESS) { 1312 vm_map_remove(map, *addr, *addr + size); 1313 goto out; 1314 } 1315 } 1316 1317 /* 1318 * Set the access time on the vnode 1319 */ 1320 if (vp != NULL) 1321 vn_mark_atime(vp, td); 1322 out: 1323 lwkt_reltoken(&vm_token); 1324 1325 switch (rv) { 1326 case KERN_SUCCESS: 1327 return (0); 1328 case KERN_INVALID_ADDRESS: 1329 case KERN_NO_SPACE: 1330 return (ENOMEM); 1331 case KERN_PROTECTION_FAILURE: 1332 return (EACCES); 1333 default: 1334 return (EINVAL); 1335 } 1336 } 1337