1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 37 * 38 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 39 * $FreeBSD: src/sys/vm/vm_mmap.c,v 1.108.2.6 2002/07/02 20:06:19 dillon Exp $ 40 */ 41 42 /* 43 * Mapped file (mmap) interface to VM 44 */ 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/systm.h> 49 #include <sys/sysproto.h> 50 #include <sys/filedesc.h> 51 #include <sys/kern_syscall.h> 52 #include <sys/proc.h> 53 #include <sys/priv.h> 54 #include <sys/resource.h> 55 #include <sys/resourcevar.h> 56 #include <sys/vnode.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/mman.h> 60 #include <sys/conf.h> 61 #include <sys/stat.h> 62 #include <sys/vmmeter.h> 63 #include <sys/sysctl.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <sys/lock.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vm_pageout.h> 74 #include <vm/vm_extern.h> 75 #include <vm/vm_page.h> 76 #include <vm/vm_kern.h> 77 78 #include <sys/file2.h> 79 #include <sys/thread.h> 80 #include <sys/thread2.h> 81 82 static int max_proc_mmap; 83 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 84 int vkernel_enable; 85 SYSCTL_INT(_vm, OID_AUTO, vkernel_enable, CTLFLAG_RW, &vkernel_enable, 0, ""); 86 87 /* 88 * Set the maximum number of vm_map_entry structures per process. Roughly 89 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 90 * of our KVM malloc space still results in generous limits. We want a 91 * default that is good enough to prevent the kernel running out of resources 92 * if attacked from compromised user account but generous enough such that 93 * multi-threaded processes are not unduly inconvenienced. 94 */ 95 96 static void vmmapentry_rsrc_init (void *); 97 SYSINIT(vmmersrc, SI_BOOT1_POST, SI_ORDER_ANY, vmmapentry_rsrc_init, NULL) 98 99 static void 100 vmmapentry_rsrc_init(void *dummy) 101 { 102 max_proc_mmap = KvaSize / sizeof(struct vm_map_entry); 103 max_proc_mmap /= 100; 104 } 105 106 /* 107 * MPSAFE 108 */ 109 int 110 sys_sbrk(struct sbrk_args *uap) 111 { 112 /* Not yet implemented */ 113 return (EOPNOTSUPP); 114 } 115 116 /* 117 * sstk_args(int incr) 118 * 119 * MPSAFE 120 */ 121 int 122 sys_sstk(struct sstk_args *uap) 123 { 124 /* Not yet implemented */ 125 return (EOPNOTSUPP); 126 } 127 128 /* 129 * mmap_args(void *addr, size_t len, int prot, int flags, int fd, 130 * long pad, off_t pos) 131 * 132 * Memory Map (mmap) system call. Note that the file offset 133 * and address are allowed to be NOT page aligned, though if 134 * the MAP_FIXED flag it set, both must have the same remainder 135 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 136 * page-aligned, the actual mapping starts at trunc_page(addr) 137 * and the return value is adjusted up by the page offset. 138 * 139 * Generally speaking, only character devices which are themselves 140 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 141 * there would be no cache coherency between a descriptor and a VM mapping 142 * both to the same character device. 143 * 144 * Block devices can be mmap'd no matter what they represent. Cache coherency 145 * is maintained as long as you do not write directly to the underlying 146 * character device. 147 * 148 * No requirements; sys_mmap path holds the vm_token 149 */ 150 int 151 kern_mmap(struct vmspace *vms, caddr_t uaddr, size_t ulen, 152 int uprot, int uflags, int fd, off_t upos, void **res) 153 { 154 struct thread *td = curthread; 155 struct proc *p = td->td_proc; 156 struct file *fp = NULL; 157 struct vnode *vp; 158 vm_offset_t addr; 159 vm_offset_t tmpaddr; 160 vm_size_t size, pageoff; 161 vm_prot_t prot, maxprot; 162 void *handle; 163 int flags, error; 164 off_t pos; 165 vm_object_t obj; 166 167 KKASSERT(p); 168 169 addr = (vm_offset_t) uaddr; 170 size = ulen; 171 prot = uprot & VM_PROT_ALL; 172 flags = uflags; 173 pos = upos; 174 175 /* 176 * Make sure mapping fits into numeric range etc. 177 * 178 * NOTE: We support the full unsigned range for size now. 179 */ 180 if (((flags & MAP_ANON) && (fd != -1 || pos != 0))) 181 return (EINVAL); 182 183 if (flags & MAP_STACK) { 184 if ((fd != -1) || 185 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 186 return (EINVAL); 187 flags |= MAP_ANON; 188 pos = 0; 189 } 190 191 /* 192 * Virtual page tables cannot be used with MAP_STACK. Apart from 193 * it not making any sense, the aux union is used by both 194 * types. 195 * 196 * Because the virtual page table is stored in the backing object 197 * and might be updated by the kernel, the mapping must be R+W. 198 */ 199 if (flags & MAP_VPAGETABLE) { 200 if (vkernel_enable == 0) 201 return (EOPNOTSUPP); 202 if (flags & MAP_STACK) 203 return (EINVAL); 204 if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) 205 return (EINVAL); 206 } 207 208 /* 209 * Align the file position to a page boundary, 210 * and save its page offset component. 211 */ 212 pageoff = (pos & PAGE_MASK); 213 pos -= pageoff; 214 215 /* Adjust size for rounding (on both ends). */ 216 size += pageoff; /* low end... */ 217 size = (vm_size_t) round_page(size); /* hi end */ 218 if (size < ulen) /* wrap */ 219 return(EINVAL); 220 221 /* 222 * Check for illegal addresses. Watch out for address wrap... Note 223 * that VM_*_ADDRESS are not constants due to casts (argh). 224 */ 225 if (flags & (MAP_FIXED | MAP_TRYFIXED)) { 226 /* 227 * The specified address must have the same remainder 228 * as the file offset taken modulo PAGE_SIZE, so it 229 * should be aligned after adjustment by pageoff. 230 */ 231 addr -= pageoff; 232 if (addr & PAGE_MASK) 233 return (EINVAL); 234 235 /* 236 * Address range must be all in user VM space and not wrap. 237 */ 238 tmpaddr = addr + size; 239 if (tmpaddr < addr) 240 return (EINVAL); 241 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 242 return (EINVAL); 243 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 244 return (EINVAL); 245 } else { 246 /* 247 * Get a hint of where to map. It also provides mmap offset 248 * randomization if enabled. 249 */ 250 addr = vm_map_hint(p, addr, prot); 251 } 252 253 if (flags & MAP_ANON) { 254 /* 255 * Mapping blank space is trivial. 256 */ 257 handle = NULL; 258 maxprot = VM_PROT_ALL; 259 } else { 260 /* 261 * Mapping file, get fp for validation. Obtain vnode and make 262 * sure it is of appropriate type. 263 */ 264 fp = holdfp(p->p_fd, fd, -1); 265 if (fp == NULL) 266 return (EBADF); 267 if (fp->f_type != DTYPE_VNODE) { 268 error = EINVAL; 269 goto done; 270 } 271 /* 272 * POSIX shared-memory objects are defined to have 273 * kernel persistence, and are not defined to support 274 * read(2)/write(2) -- or even open(2). Thus, we can 275 * use MAP_ASYNC to trade on-disk coherence for speed. 276 * The shm_open(3) library routine turns on the FPOSIXSHM 277 * flag to request this behavior. 278 */ 279 if (fp->f_flag & FPOSIXSHM) 280 flags |= MAP_NOSYNC; 281 vp = (struct vnode *) fp->f_data; 282 283 /* 284 * Validate the vnode for the operation. 285 */ 286 switch(vp->v_type) { 287 case VREG: 288 /* 289 * Get the proper underlying object 290 */ 291 if ((obj = vp->v_object) == NULL) { 292 error = EINVAL; 293 goto done; 294 } 295 KKASSERT((struct vnode *)obj->handle == vp); 296 break; 297 case VCHR: 298 /* 299 * Make sure a device has not been revoked. 300 * Mappability is handled by the device layer. 301 */ 302 if (vp->v_rdev == NULL) { 303 error = EBADF; 304 goto done; 305 } 306 break; 307 default: 308 /* 309 * Nothing else is mappable. 310 */ 311 error = EINVAL; 312 goto done; 313 } 314 315 /* 316 * XXX hack to handle use of /dev/zero to map anon memory (ala 317 * SunOS). 318 */ 319 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 320 handle = NULL; 321 maxprot = VM_PROT_ALL; 322 flags |= MAP_ANON; 323 pos = 0; 324 } else { 325 /* 326 * cdevs does not provide private mappings of any kind. 327 */ 328 if (vp->v_type == VCHR && 329 (flags & (MAP_PRIVATE|MAP_COPY))) { 330 error = EINVAL; 331 goto done; 332 } 333 /* 334 * Ensure that file and memory protections are 335 * compatible. Note that we only worry about 336 * writability if mapping is shared; in this case, 337 * current and max prot are dictated by the open file. 338 * XXX use the vnode instead? Problem is: what 339 * credentials do we use for determination? What if 340 * proc does a setuid? 341 */ 342 maxprot = VM_PROT_EXECUTE; /* ??? */ 343 if (fp->f_flag & FREAD) { 344 maxprot |= VM_PROT_READ; 345 } else if (prot & PROT_READ) { 346 error = EACCES; 347 goto done; 348 } 349 /* 350 * If we are sharing potential changes (either via 351 * MAP_SHARED or via the implicit sharing of character 352 * device mappings), and we are trying to get write 353 * permission although we opened it without asking 354 * for it, bail out. Check for superuser, only if 355 * we're at securelevel < 1, to allow the XIG X server 356 * to continue to work. 357 */ 358 if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) { 359 if ((fp->f_flag & FWRITE) != 0) { 360 struct vattr va; 361 if ((error = VOP_GETATTR(vp, &va))) { 362 goto done; 363 } 364 if ((va.va_flags & 365 (IMMUTABLE|APPEND)) == 0) { 366 maxprot |= VM_PROT_WRITE; 367 } else if (prot & PROT_WRITE) { 368 error = EPERM; 369 goto done; 370 } 371 } else if ((prot & PROT_WRITE) != 0) { 372 error = EACCES; 373 goto done; 374 } 375 } else { 376 maxprot |= VM_PROT_WRITE; 377 } 378 handle = (void *)vp; 379 } 380 } 381 382 /* Token serializes access to vm_map.nentries against vm_mmap */ 383 lwkt_gettoken(&vm_token); 384 385 /* 386 * Do not allow more then a certain number of vm_map_entry structures 387 * per process. Scale with the number of rforks sharing the map 388 * to make the limit reasonable for threads. 389 */ 390 if (max_proc_mmap && 391 vms->vm_map.nentries >= max_proc_mmap * vms->vm_sysref.refcnt) { 392 error = ENOMEM; 393 lwkt_reltoken(&vm_token); 394 goto done; 395 } 396 397 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 398 flags, handle, pos); 399 if (error == 0) 400 *res = (void *)(addr + pageoff); 401 402 lwkt_reltoken(&vm_token); 403 done: 404 if (fp) 405 fdrop(fp); 406 407 return (error); 408 } 409 410 /* 411 * mmap system call handler 412 * 413 * No requirements. 414 */ 415 int 416 sys_mmap(struct mmap_args *uap) 417 { 418 int error; 419 420 error = kern_mmap(curproc->p_vmspace, uap->addr, uap->len, 421 uap->prot, uap->flags, 422 uap->fd, uap->pos, &uap->sysmsg_resultp); 423 424 return (error); 425 } 426 427 /* 428 * msync system call handler 429 * 430 * msync_args(void *addr, size_t len, int flags) 431 * 432 * No requirements 433 */ 434 int 435 sys_msync(struct msync_args *uap) 436 { 437 struct proc *p = curproc; 438 vm_offset_t addr; 439 vm_offset_t tmpaddr; 440 vm_size_t size, pageoff; 441 int flags; 442 vm_map_t map; 443 int rv; 444 445 addr = (vm_offset_t) uap->addr; 446 size = uap->len; 447 flags = uap->flags; 448 449 pageoff = (addr & PAGE_MASK); 450 addr -= pageoff; 451 size += pageoff; 452 size = (vm_size_t) round_page(size); 453 if (size < uap->len) /* wrap */ 454 return(EINVAL); 455 tmpaddr = addr + size; /* workaround gcc4 opt */ 456 if (tmpaddr < addr) /* wrap */ 457 return(EINVAL); 458 459 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 460 return (EINVAL); 461 462 map = &p->p_vmspace->vm_map; 463 464 /* 465 * vm_token serializes extracting the address range for size == 0 466 * msyncs with the vm_map_clean call; if the token were not held 467 * across the two calls, an intervening munmap/mmap pair, for example, 468 * could cause msync to occur on a wrong region. 469 */ 470 lwkt_gettoken(&vm_token); 471 472 /* 473 * XXX Gak! If size is zero we are supposed to sync "all modified 474 * pages with the region containing addr". Unfortunately, we don't 475 * really keep track of individual mmaps so we approximate by flushing 476 * the range of the map entry containing addr. This can be incorrect 477 * if the region splits or is coalesced with a neighbor. 478 */ 479 if (size == 0) { 480 vm_map_entry_t entry; 481 482 vm_map_lock_read(map); 483 rv = vm_map_lookup_entry(map, addr, &entry); 484 if (rv == FALSE) { 485 vm_map_unlock_read(map); 486 rv = KERN_INVALID_ADDRESS; 487 goto done; 488 } 489 addr = entry->start; 490 size = entry->end - entry->start; 491 vm_map_unlock_read(map); 492 } 493 494 /* 495 * Clean the pages and interpret the return value. 496 */ 497 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 498 (flags & MS_INVALIDATE) != 0); 499 done: 500 lwkt_reltoken(&vm_token); 501 502 switch (rv) { 503 case KERN_SUCCESS: 504 break; 505 case KERN_INVALID_ADDRESS: 506 return (EINVAL); /* Sun returns ENOMEM? */ 507 case KERN_FAILURE: 508 return (EIO); 509 default: 510 return (EINVAL); 511 } 512 513 return (0); 514 } 515 516 /* 517 * munmap system call handler 518 * 519 * munmap_args(void *addr, size_t len) 520 * 521 * No requirements 522 */ 523 int 524 sys_munmap(struct munmap_args *uap) 525 { 526 struct proc *p = curproc; 527 vm_offset_t addr; 528 vm_offset_t tmpaddr; 529 vm_size_t size, pageoff; 530 vm_map_t map; 531 532 addr = (vm_offset_t) uap->addr; 533 size = uap->len; 534 535 pageoff = (addr & PAGE_MASK); 536 addr -= pageoff; 537 size += pageoff; 538 size = (vm_size_t) round_page(size); 539 if (size < uap->len) /* wrap */ 540 return(EINVAL); 541 tmpaddr = addr + size; /* workaround gcc4 opt */ 542 if (tmpaddr < addr) /* wrap */ 543 return(EINVAL); 544 545 if (size == 0) 546 return (0); 547 548 /* 549 * Check for illegal addresses. Watch out for address wrap... Note 550 * that VM_*_ADDRESS are not constants due to casts (argh). 551 */ 552 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 553 return (EINVAL); 554 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) 555 return (EINVAL); 556 557 map = &p->p_vmspace->vm_map; 558 559 /* vm_token serializes between the map check and the actual unmap */ 560 lwkt_gettoken(&vm_token); 561 562 /* 563 * Make sure entire range is allocated. 564 */ 565 if (!vm_map_check_protection(map, addr, addr + size, 566 VM_PROT_NONE, FALSE)) { 567 lwkt_reltoken(&vm_token); 568 return (EINVAL); 569 } 570 /* returns nothing but KERN_SUCCESS anyway */ 571 vm_map_remove(map, addr, addr + size); 572 lwkt_reltoken(&vm_token); 573 return (0); 574 } 575 576 /* 577 * mprotect_args(const void *addr, size_t len, int prot) 578 * 579 * No requirements. 580 */ 581 int 582 sys_mprotect(struct mprotect_args *uap) 583 { 584 struct proc *p = curproc; 585 vm_offset_t addr; 586 vm_offset_t tmpaddr; 587 vm_size_t size, pageoff; 588 vm_prot_t prot; 589 int error; 590 591 addr = (vm_offset_t) uap->addr; 592 size = uap->len; 593 prot = uap->prot & VM_PROT_ALL; 594 #if defined(VM_PROT_READ_IS_EXEC) 595 if (prot & VM_PROT_READ) 596 prot |= VM_PROT_EXECUTE; 597 #endif 598 599 pageoff = (addr & PAGE_MASK); 600 addr -= pageoff; 601 size += pageoff; 602 size = (vm_size_t) round_page(size); 603 if (size < uap->len) /* wrap */ 604 return(EINVAL); 605 tmpaddr = addr + size; /* workaround gcc4 opt */ 606 if (tmpaddr < addr) /* wrap */ 607 return(EINVAL); 608 609 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, 610 prot, FALSE)) { 611 case KERN_SUCCESS: 612 error = 0; 613 break; 614 case KERN_PROTECTION_FAILURE: 615 error = EACCES; 616 break; 617 default: 618 error = EINVAL; 619 break; 620 } 621 return (error); 622 } 623 624 /* 625 * minherit system call handler 626 * 627 * minherit_args(void *addr, size_t len, int inherit) 628 * 629 * No requirements. 630 */ 631 int 632 sys_minherit(struct minherit_args *uap) 633 { 634 struct proc *p = curproc; 635 vm_offset_t addr; 636 vm_offset_t tmpaddr; 637 vm_size_t size, pageoff; 638 vm_inherit_t inherit; 639 int error; 640 641 addr = (vm_offset_t)uap->addr; 642 size = uap->len; 643 inherit = uap->inherit; 644 645 pageoff = (addr & PAGE_MASK); 646 addr -= pageoff; 647 size += pageoff; 648 size = (vm_size_t) round_page(size); 649 if (size < uap->len) /* wrap */ 650 return(EINVAL); 651 tmpaddr = addr + size; /* workaround gcc4 opt */ 652 if (tmpaddr < addr) /* wrap */ 653 return(EINVAL); 654 655 switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, 656 addr + size, inherit)) { 657 case KERN_SUCCESS: 658 error = 0; 659 break; 660 case KERN_PROTECTION_FAILURE: 661 error = EACCES; 662 break; 663 default: 664 error = EINVAL; 665 break; 666 } 667 return (error); 668 } 669 670 /* 671 * madvise system call handler 672 * 673 * madvise_args(void *addr, size_t len, int behav) 674 * 675 * No requirements. 676 */ 677 int 678 sys_madvise(struct madvise_args *uap) 679 { 680 struct proc *p = curproc; 681 vm_offset_t start, end; 682 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 683 int error; 684 685 /* 686 * Check for illegal behavior 687 */ 688 if (uap->behav < 0 || uap->behav >= MADV_CONTROL_END) 689 return (EINVAL); 690 /* 691 * Check for illegal addresses. Watch out for address wrap... Note 692 * that VM_*_ADDRESS are not constants due to casts (argh). 693 */ 694 if (tmpaddr < (vm_offset_t)uap->addr) 695 return (EINVAL); 696 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 697 return (EINVAL); 698 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 699 return (EINVAL); 700 701 /* 702 * Since this routine is only advisory, we default to conservative 703 * behavior. 704 */ 705 start = trunc_page((vm_offset_t)uap->addr); 706 end = round_page(tmpaddr); 707 708 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 709 uap->behav, 0); 710 return (error); 711 } 712 713 /* 714 * mcontrol system call handler 715 * 716 * mcontrol_args(void *addr, size_t len, int behav, off_t value) 717 * 718 * No requirements 719 */ 720 int 721 sys_mcontrol(struct mcontrol_args *uap) 722 { 723 struct proc *p = curproc; 724 vm_offset_t start, end; 725 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len; 726 int error; 727 728 /* 729 * Check for illegal behavior 730 */ 731 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) 732 return (EINVAL); 733 /* 734 * Check for illegal addresses. Watch out for address wrap... Note 735 * that VM_*_ADDRESS are not constants due to casts (argh). 736 */ 737 if (tmpaddr < (vm_offset_t) uap->addr) 738 return (EINVAL); 739 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) 740 return (EINVAL); 741 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) 742 return (EINVAL); 743 744 /* 745 * Since this routine is only advisory, we default to conservative 746 * behavior. 747 */ 748 start = trunc_page((vm_offset_t)uap->addr); 749 end = round_page(tmpaddr); 750 751 error = vm_map_madvise(&p->p_vmspace->vm_map, start, end, 752 uap->behav, uap->value); 753 return (error); 754 } 755 756 757 /* 758 * mincore system call handler 759 * 760 * mincore_args(const void *addr, size_t len, char *vec) 761 * 762 * No requirements 763 */ 764 int 765 sys_mincore(struct mincore_args *uap) 766 { 767 struct proc *p = curproc; 768 vm_offset_t addr, first_addr; 769 vm_offset_t end, cend; 770 pmap_t pmap; 771 vm_map_t map; 772 char *vec; 773 int error; 774 int vecindex, lastvecindex; 775 vm_map_entry_t current; 776 vm_map_entry_t entry; 777 int mincoreinfo; 778 unsigned int timestamp; 779 780 /* 781 * Make sure that the addresses presented are valid for user 782 * mode. 783 */ 784 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 785 end = addr + (vm_size_t)round_page(uap->len); 786 if (end < addr) 787 return (EINVAL); 788 if (VM_MAX_USER_ADDRESS > 0 && end > VM_MAX_USER_ADDRESS) 789 return (EINVAL); 790 791 /* 792 * Address of byte vector 793 */ 794 vec = uap->vec; 795 796 map = &p->p_vmspace->vm_map; 797 pmap = vmspace_pmap(p->p_vmspace); 798 799 lwkt_gettoken(&vm_token); 800 vm_map_lock_read(map); 801 RestartScan: 802 timestamp = map->timestamp; 803 804 if (!vm_map_lookup_entry(map, addr, &entry)) 805 entry = entry->next; 806 807 /* 808 * Do this on a map entry basis so that if the pages are not 809 * in the current processes address space, we can easily look 810 * up the pages elsewhere. 811 */ 812 lastvecindex = -1; 813 for(current = entry; 814 (current != &map->header) && (current->start < end); 815 current = current->next) { 816 817 /* 818 * ignore submaps (for now) or null objects 819 */ 820 if (current->maptype != VM_MAPTYPE_NORMAL && 821 current->maptype != VM_MAPTYPE_VPAGETABLE) { 822 continue; 823 } 824 if (current->object.vm_object == NULL) 825 continue; 826 827 /* 828 * limit this scan to the current map entry and the 829 * limits for the mincore call 830 */ 831 if (addr < current->start) 832 addr = current->start; 833 cend = current->end; 834 if (cend > end) 835 cend = end; 836 837 /* 838 * scan this entry one page at a time 839 */ 840 while (addr < cend) { 841 /* 842 * Check pmap first, it is likely faster, also 843 * it can provide info as to whether we are the 844 * one referencing or modifying the page. 845 * 846 * If we have to check the VM object, only mess 847 * around with normal maps. Do not mess around 848 * with virtual page tables (XXX). 849 */ 850 mincoreinfo = pmap_mincore(pmap, addr); 851 if (mincoreinfo == 0 && 852 current->maptype == VM_MAPTYPE_NORMAL) { 853 vm_pindex_t pindex; 854 vm_ooffset_t offset; 855 vm_page_t m; 856 857 /* 858 * calculate the page index into the object 859 */ 860 offset = current->offset + (addr - current->start); 861 pindex = OFF_TO_IDX(offset); 862 863 /* 864 * if the page is resident, then gather 865 * information about it. spl protection is 866 * required to maintain the object 867 * association. And XXX what if the page is 868 * busy? What's the deal with that? 869 */ 870 crit_enter(); 871 m = vm_page_lookup(current->object.vm_object, 872 pindex); 873 if (m && m->valid) { 874 mincoreinfo = MINCORE_INCORE; 875 if (m->dirty || 876 pmap_is_modified(m)) 877 mincoreinfo |= MINCORE_MODIFIED_OTHER; 878 if ((m->flags & PG_REFERENCED) || 879 pmap_ts_referenced(m)) { 880 vm_page_flag_set(m, PG_REFERENCED); 881 mincoreinfo |= MINCORE_REFERENCED_OTHER; 882 } 883 } 884 crit_exit(); 885 } 886 887 /* 888 * subyte may page fault. In case it needs to modify 889 * the map, we release the lock. 890 */ 891 vm_map_unlock_read(map); 892 893 /* 894 * calculate index into user supplied byte vector 895 */ 896 vecindex = OFF_TO_IDX(addr - first_addr); 897 898 /* 899 * If we have skipped map entries, we need to make sure that 900 * the byte vector is zeroed for those skipped entries. 901 */ 902 while((lastvecindex + 1) < vecindex) { 903 error = subyte( vec + lastvecindex, 0); 904 if (error) { 905 error = EFAULT; 906 goto done; 907 } 908 ++lastvecindex; 909 } 910 911 /* 912 * Pass the page information to the user 913 */ 914 error = subyte( vec + vecindex, mincoreinfo); 915 if (error) { 916 error = EFAULT; 917 goto done; 918 } 919 920 /* 921 * If the map has changed, due to the subyte, the previous 922 * output may be invalid. 923 */ 924 vm_map_lock_read(map); 925 if (timestamp != map->timestamp) 926 goto RestartScan; 927 928 lastvecindex = vecindex; 929 addr += PAGE_SIZE; 930 } 931 } 932 933 /* 934 * subyte may page fault. In case it needs to modify 935 * the map, we release the lock. 936 */ 937 vm_map_unlock_read(map); 938 939 /* 940 * Zero the last entries in the byte vector. 941 */ 942 vecindex = OFF_TO_IDX(end - first_addr); 943 while((lastvecindex + 1) < vecindex) { 944 error = subyte( vec + lastvecindex, 0); 945 if (error) { 946 error = EFAULT; 947 goto done; 948 } 949 ++lastvecindex; 950 } 951 952 /* 953 * If the map has changed, due to the subyte, the previous 954 * output may be invalid. 955 */ 956 vm_map_lock_read(map); 957 if (timestamp != map->timestamp) 958 goto RestartScan; 959 vm_map_unlock_read(map); 960 961 error = 0; 962 done: 963 lwkt_reltoken(&vm_token); 964 return (error); 965 } 966 967 /* 968 * mlock system call handler 969 * 970 * mlock_args(const void *addr, size_t len) 971 * 972 * No requirements 973 */ 974 int 975 sys_mlock(struct mlock_args *uap) 976 { 977 vm_offset_t addr; 978 vm_offset_t tmpaddr; 979 vm_size_t size, pageoff; 980 struct thread *td = curthread; 981 struct proc *p = td->td_proc; 982 int error; 983 984 addr = (vm_offset_t) uap->addr; 985 size = uap->len; 986 987 pageoff = (addr & PAGE_MASK); 988 addr -= pageoff; 989 size += pageoff; 990 size = (vm_size_t) round_page(size); 991 if (size < uap->len) /* wrap */ 992 return(EINVAL); 993 tmpaddr = addr + size; /* workaround gcc4 opt */ 994 if (tmpaddr < addr) /* wrap */ 995 return (EINVAL); 996 997 if (atop(size) + vmstats.v_wire_count > vm_page_max_wired) 998 return (EAGAIN); 999 1000 /* 1001 * We do not need to synchronize against other threads updating ucred; 1002 * they update p->ucred, which is synchronized into td_ucred ourselves. 1003 */ 1004 #ifdef pmap_wired_count 1005 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 1006 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) { 1007 return (ENOMEM); 1008 } 1009 #else 1010 error = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1011 if (error) { 1012 return (error); 1013 } 1014 #endif 1015 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 1016 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1017 } 1018 1019 /* 1020 * mlockall(int how) 1021 * 1022 * No requirements 1023 */ 1024 int 1025 sys_mlockall(struct mlockall_args *uap) 1026 { 1027 #ifdef _P1003_1B_VISIBLE 1028 struct thread *td = curthread; 1029 struct proc *p = td->td_proc; 1030 vm_map_t map = &p->p_vmspace->vm_map; 1031 vm_map_entry_t entry; 1032 int how = uap->how; 1033 int rc = KERN_SUCCESS; 1034 1035 if (((how & MCL_CURRENT) == 0) && ((how & MCL_FUTURE) == 0)) 1036 return (EINVAL); 1037 1038 rc = priv_check_cred(td->td_ucred, PRIV_ROOT, 0); 1039 if (rc) 1040 return (rc); 1041 1042 vm_map_lock(map); 1043 do { 1044 if (how & MCL_CURRENT) { 1045 for(entry = map->header.next; 1046 entry != &map->header; 1047 entry = entry->next); 1048 1049 rc = ENOSYS; 1050 break; 1051 } 1052 1053 if (how & MCL_FUTURE) 1054 map->flags |= MAP_WIREFUTURE; 1055 } while(0); 1056 vm_map_unlock(map); 1057 1058 return (rc); 1059 #else /* !_P1003_1B_VISIBLE */ 1060 return (ENOSYS); 1061 #endif /* _P1003_1B_VISIBLE */ 1062 } 1063 1064 /* 1065 * munlockall(void) 1066 * 1067 * Unwire all user-wired map entries, cancel MCL_FUTURE. 1068 * 1069 * No requirements 1070 */ 1071 int 1072 sys_munlockall(struct munlockall_args *uap) 1073 { 1074 struct thread *td = curthread; 1075 struct proc *p = td->td_proc; 1076 vm_map_t map = &p->p_vmspace->vm_map; 1077 vm_map_entry_t entry; 1078 int rc = KERN_SUCCESS; 1079 1080 vm_map_lock(map); 1081 1082 /* Clear MAP_WIREFUTURE to cancel mlockall(MCL_FUTURE) */ 1083 map->flags &= ~MAP_WIREFUTURE; 1084 1085 retry: 1086 for (entry = map->header.next; 1087 entry != &map->header; 1088 entry = entry->next) { 1089 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 1090 continue; 1091 1092 /* 1093 * If we encounter an in-transition entry, we release the 1094 * map lock and retry the scan; we do not decrement any 1095 * wired_count more than once because we do not touch 1096 * any entries with MAP_ENTRY_USER_WIRED not set. 1097 * 1098 * There is a potential interleaving with concurrent 1099 * mlockall()s here -- if we abort a scan, an mlockall() 1100 * could start, wire a number of entries before our 1101 * current position in, and then stall itself on this 1102 * or any other in-transition entry. If that occurs, when 1103 * we resume, we will unwire those entries. 1104 */ 1105 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1106 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1107 ++mycpu->gd_cnt.v_intrans_coll; 1108 ++mycpu->gd_cnt.v_intrans_wait; 1109 vm_map_transition_wait(map); 1110 goto retry; 1111 } 1112 1113 KASSERT(entry->wired_count > 0, 1114 ("wired_count was 0 with USER_WIRED set! %p", entry)); 1115 1116 /* Drop wired count, if it hits zero, unwire the entry */ 1117 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1118 entry->wired_count--; 1119 if (entry->wired_count == 0) 1120 vm_fault_unwire(map, entry); 1121 } 1122 1123 map->timestamp++; 1124 vm_map_unlock(map); 1125 1126 return (rc); 1127 } 1128 1129 /* 1130 * munlock system call handler 1131 * 1132 * munlock_args(const void *addr, size_t len) 1133 * 1134 * No requirements 1135 */ 1136 int 1137 sys_munlock(struct munlock_args *uap) 1138 { 1139 struct thread *td = curthread; 1140 struct proc *p = td->td_proc; 1141 vm_offset_t addr; 1142 vm_offset_t tmpaddr; 1143 vm_size_t size, pageoff; 1144 int error; 1145 1146 addr = (vm_offset_t) uap->addr; 1147 size = uap->len; 1148 1149 pageoff = (addr & PAGE_MASK); 1150 addr -= pageoff; 1151 size += pageoff; 1152 size = (vm_size_t) round_page(size); 1153 1154 tmpaddr = addr + size; 1155 if (tmpaddr < addr) /* wrap */ 1156 return (EINVAL); 1157 1158 #ifndef pmap_wired_count 1159 error = priv_check(td, PRIV_ROOT); 1160 if (error) 1161 return (error); 1162 #endif 1163 1164 error = vm_map_unwire(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 1165 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1166 } 1167 1168 /* 1169 * Internal version of mmap. 1170 * Currently used by mmap, exec, and sys5 shared memory. 1171 * Handle is either a vnode pointer or NULL for MAP_ANON. 1172 * 1173 * No requirements; kern_mmap path holds the vm_token 1174 */ 1175 int 1176 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1177 vm_prot_t maxprot, int flags, void *handle, vm_ooffset_t foff) 1178 { 1179 boolean_t fitit; 1180 vm_object_t object; 1181 vm_offset_t eaddr; 1182 vm_size_t esize; 1183 struct vnode *vp; 1184 struct thread *td = curthread; 1185 struct proc *p; 1186 int rv = KERN_SUCCESS; 1187 off_t objsize; 1188 int docow; 1189 1190 if (size == 0) 1191 return (0); 1192 1193 objsize = round_page(size); 1194 if (objsize < size) 1195 return (EINVAL); 1196 size = objsize; 1197 1198 lwkt_gettoken(&vm_token); 1199 1200 /* 1201 * XXX messy code, fixme 1202 * 1203 * NOTE: Overflow checks require discrete statements or GCC4 1204 * will optimize it out. 1205 */ 1206 if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) { 1207 esize = map->size + size; /* workaround gcc4 opt */ 1208 if (esize < map->size || 1209 esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1210 lwkt_reltoken(&vm_token); 1211 return(ENOMEM); 1212 } 1213 } 1214 1215 /* 1216 * We currently can only deal with page aligned file offsets. 1217 * The check is here rather than in the syscall because the 1218 * kernel calls this function internally for other mmaping 1219 * operations (such as in exec) and non-aligned offsets will 1220 * cause pmap inconsistencies...so we want to be sure to 1221 * disallow this in all cases. 1222 * 1223 * NOTE: Overflow checks require discrete statements or GCC4 1224 * will optimize it out. 1225 */ 1226 if (foff & PAGE_MASK) { 1227 lwkt_reltoken(&vm_token); 1228 return (EINVAL); 1229 } 1230 1231 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 1232 fitit = TRUE; 1233 *addr = round_page(*addr); 1234 } else { 1235 if (*addr != trunc_page(*addr)) { 1236 lwkt_reltoken(&vm_token); 1237 return (EINVAL); 1238 } 1239 eaddr = *addr + size; 1240 if (eaddr < *addr) { 1241 lwkt_reltoken(&vm_token); 1242 return (EINVAL); 1243 } 1244 fitit = FALSE; 1245 if ((flags & MAP_TRYFIXED) == 0) 1246 vm_map_remove(map, *addr, *addr + size); 1247 } 1248 1249 /* 1250 * Lookup/allocate object. 1251 */ 1252 if (flags & MAP_ANON) { 1253 /* 1254 * Unnamed anonymous regions always start at 0. 1255 */ 1256 if (handle) { 1257 /* 1258 * Default memory object 1259 */ 1260 object = default_pager_alloc(handle, objsize, 1261 prot, foff); 1262 if (object == NULL) { 1263 lwkt_reltoken(&vm_token); 1264 return(ENOMEM); 1265 } 1266 docow = MAP_PREFAULT_PARTIAL; 1267 } else { 1268 /* 1269 * Implicit single instance of a default memory 1270 * object, so we don't need a VM object yet. 1271 */ 1272 foff = 0; 1273 object = NULL; 1274 docow = 0; 1275 } 1276 vp = NULL; 1277 } else { 1278 vp = (struct vnode *)handle; 1279 if (vp->v_type == VCHR) { 1280 /* 1281 * Device mappings (device size unknown?). 1282 * Force them to be shared. 1283 */ 1284 handle = (void *)(intptr_t)vp->v_rdev; 1285 object = dev_pager_alloc(handle, objsize, prot, foff); 1286 if (object == NULL) { 1287 lwkt_reltoken(&vm_token); 1288 return(EINVAL); 1289 } 1290 docow = MAP_PREFAULT_PARTIAL; 1291 flags &= ~(MAP_PRIVATE|MAP_COPY); 1292 flags |= MAP_SHARED; 1293 } else { 1294 /* 1295 * Regular file mapping (typically). The attribute 1296 * check is for the link count test only. Mmapble 1297 * vnodes must already have a VM object assigned. 1298 */ 1299 struct vattr vat; 1300 int error; 1301 1302 error = VOP_GETATTR(vp, &vat); 1303 if (error) { 1304 lwkt_reltoken(&vm_token); 1305 return (error); 1306 } 1307 docow = MAP_PREFAULT_PARTIAL; 1308 object = vnode_pager_reference(vp); 1309 if (object == NULL && vp->v_type == VREG) { 1310 lwkt_reltoken(&vm_token); 1311 kprintf("Warning: cannot mmap vnode %p, no " 1312 "object\n", vp); 1313 return(EINVAL); 1314 } 1315 1316 /* 1317 * If it is a regular file without any references 1318 * we do not need to sync it. 1319 */ 1320 if (vp->v_type == VREG && vat.va_nlink == 0) { 1321 flags |= MAP_NOSYNC; 1322 } 1323 } 1324 } 1325 1326 /* 1327 * Deal with the adjusted flags 1328 */ 1329 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1330 docow |= MAP_COPY_ON_WRITE; 1331 if (flags & MAP_NOSYNC) 1332 docow |= MAP_DISABLE_SYNCER; 1333 if (flags & MAP_NOCORE) 1334 docow |= MAP_DISABLE_COREDUMP; 1335 1336 #if defined(VM_PROT_READ_IS_EXEC) 1337 if (prot & VM_PROT_READ) 1338 prot |= VM_PROT_EXECUTE; 1339 1340 if (maxprot & VM_PROT_READ) 1341 maxprot |= VM_PROT_EXECUTE; 1342 #endif 1343 1344 /* 1345 * This may place the area in its own page directory if (size) is 1346 * large enough, otherwise it typically returns its argument. 1347 */ 1348 if (fitit) { 1349 *addr = pmap_addr_hint(object, *addr, size); 1350 } 1351 1352 /* 1353 * Stack mappings need special attention. 1354 * 1355 * Mappings that use virtual page tables will default to storing 1356 * the page table at offset 0. 1357 */ 1358 if (flags & MAP_STACK) { 1359 rv = vm_map_stack(map, *addr, size, flags, 1360 prot, maxprot, docow); 1361 } else if (flags & MAP_VPAGETABLE) { 1362 rv = vm_map_find(map, object, foff, addr, size, PAGE_SIZE, 1363 fitit, VM_MAPTYPE_VPAGETABLE, 1364 prot, maxprot, docow); 1365 } else { 1366 rv = vm_map_find(map, object, foff, addr, size, PAGE_SIZE, 1367 fitit, VM_MAPTYPE_NORMAL, 1368 prot, maxprot, docow); 1369 } 1370 1371 if (rv != KERN_SUCCESS) { 1372 /* 1373 * Lose the object reference. Will destroy the 1374 * object if it's an unnamed anonymous mapping 1375 * or named anonymous without other references. 1376 */ 1377 vm_object_deallocate(object); 1378 goto out; 1379 } 1380 1381 /* 1382 * Shared memory is also shared with children. 1383 */ 1384 if (flags & (MAP_SHARED|MAP_INHERIT)) { 1385 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1386 if (rv != KERN_SUCCESS) { 1387 vm_map_remove(map, *addr, *addr + size); 1388 goto out; 1389 } 1390 } 1391 1392 /* If a process has marked all future mappings for wiring, do so */ 1393 if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1394 vm_map_unwire(map, *addr, *addr + size, FALSE); 1395 1396 /* 1397 * Set the access time on the vnode 1398 */ 1399 if (vp != NULL) 1400 vn_mark_atime(vp, td); 1401 out: 1402 lwkt_reltoken(&vm_token); 1403 1404 switch (rv) { 1405 case KERN_SUCCESS: 1406 return (0); 1407 case KERN_INVALID_ADDRESS: 1408 case KERN_NO_SPACE: 1409 return (ENOMEM); 1410 case KERN_PROTECTION_FAILURE: 1411 return (EACCES); 1412 default: 1413 return (EINVAL); 1414 } 1415 } 1416