1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39 * 40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 41 * $FreeBSD: src/sys/vm/vm_mmap.c,v 1.108.2.6 2002/07/02 20:06:19 dillon Exp $ 42 * $DragonFly: src/sys/vm/vm_mmap.c,v 1.9 2003/07/26 22:10:02 rob Exp $ 43 */ 44 45 /* 46 * Mapped file (mmap) interface to VM 47 */ 48 49 #include "opt_compat.h" 50 51 #include <sys/param.h> 52 #include <sys/kernel.h> 53 #include <sys/systm.h> 54 #include <sys/sysproto.h> 55 #include <sys/filedesc.h> 56 #include <sys/proc.h> 57 #include <sys/resource.h> 58 #include <sys/resourcevar.h> 59 #include <sys/vnode.h> 60 #include <sys/fcntl.h> 61 #include <sys/file.h> 62 #include <sys/mman.h> 63 #include <sys/conf.h> 64 #include <sys/stat.h> 65 #include <sys/vmmeter.h> 66 #include <sys/sysctl.h> 67 68 #include <vm/vm.h> 69 #include <vm/vm_param.h> 70 #include <sys/lock.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_object.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_pager.h> 76 #include <vm/vm_pageout.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_page.h> 79 #include <vm/vm_kern.h> 80 81 #include <sys/file2.h> 82 83 static int max_proc_mmap; 84 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 85 86 /* 87 * Set the maximum number of vm_map_entry structures per process. Roughly 88 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 89 * of our KVM malloc space still results in generous limits. We want a 90 * default that is good enough to prevent the kernel running out of resources 91 * if attacked from compromised user account but generous enough such that 92 * multi-threaded processes are not unduly inconvenienced. 93 */ 94 95 static void vmmapentry_rsrc_init __P((void *)); 96 SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 97 98 static void 99 vmmapentry_rsrc_init(dummy) 100 void *dummy; 101 { 102 max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 103 max_proc_mmap /= 100; 104 } 105 106 /* ARGSUSED */ 107 int 108 sbrk(struct sbrk_args *uap) 109 { 110 /* Not yet implemented */ 111 return (EOPNOTSUPP); 112 } 113 114 /* 115 * sstk_args(int incr) 116 */ 117 /* ARGSUSED */ 118 int 119 sstk(struct sstk_args *uap) 120 { 121 /* Not yet implemented */ 122 return (EOPNOTSUPP); 123 } 124 125 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 126 127 /* 128 * getpagesize_args(int dummy) 129 */ 130 /* ARGSUSED */ 131 int 132 ogetpagesize(struct getpagesize_args *uap) 133 { 134 uap->lmsg.u.ms_result = PAGE_SIZE; 135 return (0); 136 } 137 #endif /* COMPAT_43 || COMPAT_SUNOS */ 138 139 140 /* 141 * mmap_args(void *addr, size_t len, int prot, int flags, int fd, 142 * long pad, off_t pos) 143 * 144 * Memory Map (mmap) system call. Note that the file offset 145 * and address are allowed to be NOT page aligned, though if 146 * the MAP_FIXED flag it set, both must have the same remainder 147 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 148 * page-aligned, the actual mapping starts at trunc_page(addr) 149 * and the return value is adjusted up by the page offset. 150 * 151 * Generally speaking, only character devices which are themselves 152 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 153 * there would be no cache coherency between a descriptor and a VM mapping 154 * both to the same character device. 155 * 156 * Block devices can be mmap'd no matter what they represent. Cache coherency 157 * is maintained as long as you do not write directly to the underlying 158 * character device. 159 */ 160 161 int 162 mmap(struct mmap_args *uap) 163 { 164 struct thread *td = curthread; 165 struct proc *p = td->td_proc; 166 struct filedesc *fdp = p->p_fd; 167 struct file *fp = NULL; 168 struct vnode *vp; 169 vm_offset_t addr; 170 vm_size_t size, pageoff; 171 vm_prot_t prot, maxprot; 172 void *handle; 173 int flags, error; 174 int disablexworkaround; 175 off_t pos; 176 struct vmspace *vms = p->p_vmspace; 177 vm_object_t obj; 178 179 KKASSERT(p); 180 181 addr = (vm_offset_t) uap->addr; 182 size = uap->len; 183 prot = uap->prot & VM_PROT_ALL; 184 flags = uap->flags; 185 pos = uap->pos; 186 187 /* make sure mapping fits into numeric range etc */ 188 if ((ssize_t) uap->len < 0 || 189 ((flags & MAP_ANON) && uap->fd != -1)) 190 return (EINVAL); 191 192 if (flags & MAP_STACK) { 193 if ((uap->fd != -1) || 194 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 195 return (EINVAL); 196 flags |= MAP_ANON; 197 pos = 0; 198 } 199 200 /* 201 * Align the file position to a page boundary, 202 * and save its page offset component. 203 */ 204 pageoff = (pos & PAGE_MASK); 205 pos -= pageoff; 206 207 /* Adjust size for rounding (on both ends). */ 208 size += pageoff; /* low end... */ 209 size = (vm_size_t) round_page(size); /* hi end */ 210 211 /* 212 * Check for illegal addresses. Watch out for address wrap... Note 213 * that VM_*_ADDRESS are not constants due to casts (argh). 214 */ 215 if (flags & MAP_FIXED) { 216 /* 217 * The specified address must have the same remainder 218 * as the file offset taken modulo PAGE_SIZE, so it 219 * should be aligned after adjustment by pageoff. 220 */ 221 addr -= pageoff; 222 if (addr & PAGE_MASK) 223 return (EINVAL); 224 /* Address range must be all in user VM space. */ 225 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 226 return (EINVAL); 227 #ifndef i386 228 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 229 return (EINVAL); 230 #endif 231 if (addr + size < addr) 232 return (EINVAL); 233 } 234 /* 235 * XXX for non-fixed mappings where no hint is provided or 236 * the hint would fall in the potential heap space, 237 * place it after the end of the largest possible heap. 238 * 239 * There should really be a pmap call to determine a reasonable 240 * location. 241 */ 242 else if (addr == 0 || 243 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 244 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) 245 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 246 247 if (flags & MAP_ANON) { 248 /* 249 * Mapping blank space is trivial. 250 */ 251 handle = NULL; 252 maxprot = VM_PROT_ALL; 253 pos = 0; 254 } else { 255 /* 256 * Mapping file, get fp for validation. Obtain vnode and make 257 * sure it is of appropriate type. 258 */ 259 if (((unsigned) uap->fd) >= fdp->fd_nfiles || 260 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 261 return (EBADF); 262 if (fp->f_type != DTYPE_VNODE) 263 return (EINVAL); 264 /* 265 * POSIX shared-memory objects are defined to have 266 * kernel persistence, and are not defined to support 267 * read(2)/write(2) -- or even open(2). Thus, we can 268 * use MAP_ASYNC to trade on-disk coherence for speed. 269 * The shm_open(3) library routine turns on the FPOSIXSHM 270 * flag to request this behavior. 271 */ 272 if (fp->f_flag & FPOSIXSHM) 273 flags |= MAP_NOSYNC; 274 vp = (struct vnode *) fp->f_data; 275 if (vp->v_type != VREG && vp->v_type != VCHR) 276 return (EINVAL); 277 if (vp->v_type == VREG) { 278 /* 279 * Get the proper underlying object 280 */ 281 if (VOP_GETVOBJECT(vp, &obj) != 0) 282 return (EINVAL); 283 vp = (struct vnode*)obj->handle; 284 } 285 286 /* 287 * don't let the descriptor disappear on us if we block 288 */ 289 fhold(fp); 290 291 /* 292 * XXX hack to handle use of /dev/zero to map anon memory (ala 293 * SunOS). 294 */ 295 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 296 handle = NULL; 297 maxprot = VM_PROT_ALL; 298 flags |= MAP_ANON; 299 pos = 0; 300 } else { 301 /* 302 * cdevs does not provide private mappings of any kind. 303 */ 304 /* 305 * However, for XIG X server to continue to work, 306 * we should allow the superuser to do it anyway. 307 * We only allow it at securelevel < 1. 308 * (Because the XIG X server writes directly to video 309 * memory via /dev/mem, it should never work at any 310 * other securelevel. 311 * XXX this will have to go 312 */ 313 if (securelevel >= 1) 314 disablexworkaround = 1; 315 else 316 disablexworkaround = suser(td); 317 if (vp->v_type == VCHR && disablexworkaround && 318 (flags & (MAP_PRIVATE|MAP_COPY))) { 319 error = EINVAL; 320 goto done; 321 } 322 /* 323 * Ensure that file and memory protections are 324 * compatible. Note that we only worry about 325 * writability if mapping is shared; in this case, 326 * current and max prot are dictated by the open file. 327 * XXX use the vnode instead? Problem is: what 328 * credentials do we use for determination? What if 329 * proc does a setuid? 330 */ 331 maxprot = VM_PROT_EXECUTE; /* ??? */ 332 if (fp->f_flag & FREAD) { 333 maxprot |= VM_PROT_READ; 334 } else if (prot & PROT_READ) { 335 error = EACCES; 336 goto done; 337 } 338 /* 339 * If we are sharing potential changes (either via 340 * MAP_SHARED or via the implicit sharing of character 341 * device mappings), and we are trying to get write 342 * permission although we opened it without asking 343 * for it, bail out. Check for superuser, only if 344 * we're at securelevel < 1, to allow the XIG X server 345 * to continue to work. 346 */ 347 348 if ((flags & MAP_SHARED) != 0 || 349 (vp->v_type == VCHR && disablexworkaround)) { 350 if ((fp->f_flag & FWRITE) != 0) { 351 struct vattr va; 352 if ((error = VOP_GETATTR(vp, &va, td))) { 353 goto done; 354 } 355 if ((va.va_flags & 356 (IMMUTABLE|APPEND)) == 0) { 357 maxprot |= VM_PROT_WRITE; 358 } else if (prot & PROT_WRITE) { 359 error = EPERM; 360 goto done; 361 } 362 } else if ((prot & PROT_WRITE) != 0) { 363 error = EACCES; 364 goto done; 365 } 366 } else { 367 maxprot |= VM_PROT_WRITE; 368 } 369 handle = (void *)vp; 370 } 371 } 372 373 /* 374 * Do not allow more then a certain number of vm_map_entry structures 375 * per process. Scale with the number of rforks sharing the map 376 * to make the limit reasonable for threads. 377 */ 378 if (max_proc_mmap && 379 vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 380 error = ENOMEM; 381 goto done; 382 } 383 384 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 385 flags, handle, pos); 386 if (error == 0) 387 uap->lmsg.u.ms_resultp = (void *)(addr + pageoff); 388 done: 389 if (fp) 390 fdrop(fp, td); 391 return (error); 392 } 393 394 #ifdef COMPAT_43 395 /* 396 * ommap_args(caddr_t addr, int len, int prot, int flags, int fd, long pos) 397 */ 398 int 399 ommap(struct ommap_args *uap) 400 { 401 struct mmap_args nargs; 402 static const char cvtbsdprot[8] = { 403 0, 404 PROT_EXEC, 405 PROT_WRITE, 406 PROT_EXEC | PROT_WRITE, 407 PROT_READ, 408 PROT_EXEC | PROT_READ, 409 PROT_WRITE | PROT_READ, 410 PROT_EXEC | PROT_WRITE | PROT_READ, 411 }; 412 413 #define OMAP_ANON 0x0002 414 #define OMAP_COPY 0x0020 415 #define OMAP_SHARED 0x0010 416 #define OMAP_FIXED 0x0100 417 #define OMAP_INHERIT 0x0800 418 419 nargs.addr = uap->addr; 420 nargs.len = uap->len; 421 nargs.prot = cvtbsdprot[uap->prot & 0x7]; 422 nargs.flags = 0; 423 if (uap->flags & OMAP_ANON) 424 nargs.flags |= MAP_ANON; 425 if (uap->flags & OMAP_COPY) 426 nargs.flags |= MAP_COPY; 427 if (uap->flags & OMAP_SHARED) 428 nargs.flags |= MAP_SHARED; 429 else 430 nargs.flags |= MAP_PRIVATE; 431 if (uap->flags & OMAP_FIXED) 432 nargs.flags |= MAP_FIXED; 433 if (uap->flags & OMAP_INHERIT) 434 nargs.flags |= MAP_INHERIT; 435 nargs.fd = uap->fd; 436 nargs.pos = uap->pos; 437 return (mmap(&nargs)); 438 } 439 #endif /* COMPAT_43 */ 440 441 442 /* 443 * msync_args(void *addr, int len, int flags) 444 */ 445 int 446 msync(struct msync_args *uap) 447 { 448 struct proc *p = curproc; 449 vm_offset_t addr; 450 vm_size_t size, pageoff; 451 int flags; 452 vm_map_t map; 453 int rv; 454 455 addr = (vm_offset_t) uap->addr; 456 size = uap->len; 457 flags = uap->flags; 458 459 pageoff = (addr & PAGE_MASK); 460 addr -= pageoff; 461 size += pageoff; 462 size = (vm_size_t) round_page(size); 463 if (addr + size < addr) 464 return(EINVAL); 465 466 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 467 return (EINVAL); 468 469 map = &p->p_vmspace->vm_map; 470 471 /* 472 * XXX Gak! If size is zero we are supposed to sync "all modified 473 * pages with the region containing addr". Unfortunately, we don't 474 * really keep track of individual mmaps so we approximate by flushing 475 * the range of the map entry containing addr. This can be incorrect 476 * if the region splits or is coalesced with a neighbor. 477 */ 478 if (size == 0) { 479 vm_map_entry_t entry; 480 481 vm_map_lock_read(map); 482 rv = vm_map_lookup_entry(map, addr, &entry); 483 vm_map_unlock_read(map); 484 if (rv == FALSE) 485 return (EINVAL); 486 addr = entry->start; 487 size = entry->end - entry->start; 488 } 489 490 /* 491 * Clean the pages and interpret the return value. 492 */ 493 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 494 (flags & MS_INVALIDATE) != 0); 495 496 switch (rv) { 497 case KERN_SUCCESS: 498 break; 499 case KERN_INVALID_ADDRESS: 500 return (EINVAL); /* Sun returns ENOMEM? */ 501 case KERN_FAILURE: 502 return (EIO); 503 default: 504 return (EINVAL); 505 } 506 507 return (0); 508 } 509 510 /* 511 * munmap_args(void *addr, size_t len) 512 */ 513 int 514 munmap(struct munmap_args *uap) 515 { 516 struct proc *p = curproc; 517 vm_offset_t addr; 518 vm_size_t size, pageoff; 519 vm_map_t map; 520 521 addr = (vm_offset_t) uap->addr; 522 size = uap->len; 523 524 pageoff = (addr & PAGE_MASK); 525 addr -= pageoff; 526 size += pageoff; 527 size = (vm_size_t) round_page(size); 528 if (addr + size < addr) 529 return(EINVAL); 530 531 if (size == 0) 532 return (0); 533 534 /* 535 * Check for illegal addresses. Watch out for address wrap... Note 536 * that VM_*_ADDRESS are not constants due to casts (argh). 537 */ 538 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 539 return (EINVAL); 540 #ifndef i386 541 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 542 return (EINVAL); 543 #endif 544 map = &p->p_vmspace->vm_map; 545 /* 546 * Make sure entire range is allocated. 547 */ 548 if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) 549 return (EINVAL); 550 /* returns nothing but KERN_SUCCESS anyway */ 551 (void) vm_map_remove(map, addr, addr + size); 552 return (0); 553 } 554 555 #if 0 556 void 557 munmapfd(p, fd) 558 struct proc *p; 559 int fd; 560 { 561 /* 562 * XXX should unmap any regions mapped to this file 563 */ 564 p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 565 } 566 #endif 567 568 /* 569 * mprotect_args(const void *addr, size_t len, int prot) 570 */ 571 int 572 mprotect(struct mprotect_args *uap) 573 { 574 struct proc *p = curproc; 575 vm_offset_t addr; 576 vm_size_t size, pageoff; 577 vm_prot_t prot; 578 579 addr = (vm_offset_t) uap->addr; 580 size = uap->len; 581 prot = uap->prot & VM_PROT_ALL; 582 #if defined(VM_PROT_READ_IS_EXEC) 583 if (prot & VM_PROT_READ) 584 prot |= VM_PROT_EXECUTE; 585 #endif 586 587 pageoff = (addr & PAGE_MASK); 588 addr -= pageoff; 589 size += pageoff; 590 size = (vm_size_t) round_page(size); 591 if (addr + size < addr) 592 return(EINVAL); 593 594 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot, 595 FALSE)) { 596 case KERN_SUCCESS: 597 return (0); 598 case KERN_PROTECTION_FAILURE: 599 return (EACCES); 600 } 601 return (EINVAL); 602 } 603 604 /* 605 * minherit_args(void *addr, size_t len, int inherit) 606 */ 607 int 608 minherit(struct minherit_args *uap) 609 { 610 struct proc *p = curproc; 611 vm_offset_t addr; 612 vm_size_t size, pageoff; 613 vm_inherit_t inherit; 614 615 addr = (vm_offset_t)uap->addr; 616 size = uap->len; 617 inherit = uap->inherit; 618 619 pageoff = (addr & PAGE_MASK); 620 addr -= pageoff; 621 size += pageoff; 622 size = (vm_size_t) round_page(size); 623 if (addr + size < addr) 624 return(EINVAL); 625 626 switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size, 627 inherit)) { 628 case KERN_SUCCESS: 629 return (0); 630 case KERN_PROTECTION_FAILURE: 631 return (EACCES); 632 } 633 return (EINVAL); 634 } 635 636 /* 637 * madvise_args(void *addr, size_t len, int behav) 638 */ 639 /* ARGSUSED */ 640 int 641 madvise(struct madvise_args *uap) 642 { 643 struct proc *p = curproc; 644 vm_offset_t start, end; 645 646 /* 647 * Check for illegal behavior 648 */ 649 if (uap->behav < 0 || uap->behav > MADV_CORE) 650 return (EINVAL); 651 /* 652 * Check for illegal addresses. Watch out for address wrap... Note 653 * that VM_*_ADDRESS are not constants due to casts (argh). 654 */ 655 if (VM_MAXUSER_ADDRESS > 0 && 656 ((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS) 657 return (EINVAL); 658 #ifndef i386 659 if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS) 660 return (EINVAL); 661 #endif 662 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 663 return (EINVAL); 664 665 /* 666 * Since this routine is only advisory, we default to conservative 667 * behavior. 668 */ 669 start = trunc_page((vm_offset_t) uap->addr); 670 end = round_page((vm_offset_t) uap->addr + uap->len); 671 672 if (vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav)) 673 return (EINVAL); 674 return (0); 675 } 676 677 /* 678 * mincore_args(const void *addr, size_t len, char *vec) 679 */ 680 /* ARGSUSED */ 681 int 682 mincore(struct mincore_args *uap) 683 { 684 struct proc *p = curproc; 685 vm_offset_t addr, first_addr; 686 vm_offset_t end, cend; 687 pmap_t pmap; 688 vm_map_t map; 689 char *vec; 690 int error; 691 int vecindex, lastvecindex; 692 vm_map_entry_t current; 693 vm_map_entry_t entry; 694 int mincoreinfo; 695 unsigned int timestamp; 696 697 /* 698 * Make sure that the addresses presented are valid for user 699 * mode. 700 */ 701 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 702 end = addr + (vm_size_t)round_page(uap->len); 703 if (VM_MAXUSER_ADDRESS > 0 && end > VM_MAXUSER_ADDRESS) 704 return (EINVAL); 705 if (end < addr) 706 return (EINVAL); 707 708 /* 709 * Address of byte vector 710 */ 711 vec = uap->vec; 712 713 map = &p->p_vmspace->vm_map; 714 pmap = vmspace_pmap(p->p_vmspace); 715 716 vm_map_lock_read(map); 717 RestartScan: 718 timestamp = map->timestamp; 719 720 if (!vm_map_lookup_entry(map, addr, &entry)) 721 entry = entry->next; 722 723 /* 724 * Do this on a map entry basis so that if the pages are not 725 * in the current processes address space, we can easily look 726 * up the pages elsewhere. 727 */ 728 lastvecindex = -1; 729 for(current = entry; 730 (current != &map->header) && (current->start < end); 731 current = current->next) { 732 733 /* 734 * ignore submaps (for now) or null objects 735 */ 736 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 737 current->object.vm_object == NULL) 738 continue; 739 740 /* 741 * limit this scan to the current map entry and the 742 * limits for the mincore call 743 */ 744 if (addr < current->start) 745 addr = current->start; 746 cend = current->end; 747 if (cend > end) 748 cend = end; 749 750 /* 751 * scan this entry one page at a time 752 */ 753 while(addr < cend) { 754 /* 755 * Check pmap first, it is likely faster, also 756 * it can provide info as to whether we are the 757 * one referencing or modifying the page. 758 */ 759 mincoreinfo = pmap_mincore(pmap, addr); 760 if (!mincoreinfo) { 761 vm_pindex_t pindex; 762 vm_ooffset_t offset; 763 vm_page_t m; 764 /* 765 * calculate the page index into the object 766 */ 767 offset = current->offset + (addr - current->start); 768 pindex = OFF_TO_IDX(offset); 769 m = vm_page_lookup(current->object.vm_object, 770 pindex); 771 /* 772 * if the page is resident, then gather information about 773 * it. 774 */ 775 if (m) { 776 mincoreinfo = MINCORE_INCORE; 777 if (m->dirty || 778 pmap_is_modified(m)) 779 mincoreinfo |= MINCORE_MODIFIED_OTHER; 780 if ((m->flags & PG_REFERENCED) || 781 pmap_ts_referenced(m)) { 782 vm_page_flag_set(m, PG_REFERENCED); 783 mincoreinfo |= MINCORE_REFERENCED_OTHER; 784 } 785 } 786 } 787 788 /* 789 * subyte may page fault. In case it needs to modify 790 * the map, we release the lock. 791 */ 792 vm_map_unlock_read(map); 793 794 /* 795 * calculate index into user supplied byte vector 796 */ 797 vecindex = OFF_TO_IDX(addr - first_addr); 798 799 /* 800 * If we have skipped map entries, we need to make sure that 801 * the byte vector is zeroed for those skipped entries. 802 */ 803 while((lastvecindex + 1) < vecindex) { 804 error = subyte( vec + lastvecindex, 0); 805 if (error) { 806 return (EFAULT); 807 } 808 ++lastvecindex; 809 } 810 811 /* 812 * Pass the page information to the user 813 */ 814 error = subyte( vec + vecindex, mincoreinfo); 815 if (error) { 816 return (EFAULT); 817 } 818 819 /* 820 * If the map has changed, due to the subyte, the previous 821 * output may be invalid. 822 */ 823 vm_map_lock_read(map); 824 if (timestamp != map->timestamp) 825 goto RestartScan; 826 827 lastvecindex = vecindex; 828 addr += PAGE_SIZE; 829 } 830 } 831 832 /* 833 * subyte may page fault. In case it needs to modify 834 * the map, we release the lock. 835 */ 836 vm_map_unlock_read(map); 837 838 /* 839 * Zero the last entries in the byte vector. 840 */ 841 vecindex = OFF_TO_IDX(end - first_addr); 842 while((lastvecindex + 1) < vecindex) { 843 error = subyte( vec + lastvecindex, 0); 844 if (error) { 845 return (EFAULT); 846 } 847 ++lastvecindex; 848 } 849 850 /* 851 * If the map has changed, due to the subyte, the previous 852 * output may be invalid. 853 */ 854 vm_map_lock_read(map); 855 if (timestamp != map->timestamp) 856 goto RestartScan; 857 vm_map_unlock_read(map); 858 859 return (0); 860 } 861 862 /* 863 * mlock_args(const void *addr, size_t len) 864 */ 865 int 866 mlock(struct mlock_args *uap) 867 { 868 vm_offset_t addr; 869 vm_size_t size, pageoff; 870 int error; 871 struct proc *p = curproc; 872 873 addr = (vm_offset_t) uap->addr; 874 size = uap->len; 875 876 pageoff = (addr & PAGE_MASK); 877 addr -= pageoff; 878 size += pageoff; 879 size = (vm_size_t) round_page(size); 880 881 /* disable wrap around */ 882 if (addr + size < addr) 883 return (EINVAL); 884 885 if (atop(size) + vmstats.v_wire_count > vm_page_max_wired) 886 return (EAGAIN); 887 888 #ifdef pmap_wired_count 889 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 890 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 891 return (ENOMEM); 892 #else 893 error = suser_cred(p->p_ucred, 0); 894 if (error) 895 return (error); 896 #endif 897 898 error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 899 return (error == KERN_SUCCESS ? 0 : ENOMEM); 900 } 901 902 /* 903 * mlockall_args(int how) 904 */ 905 int 906 mlockall(struct mlockall_args *uap) 907 { 908 return 0; 909 } 910 911 /* 912 * mlockall_args(int how) 913 */ 914 int 915 munlockall(struct munlockall_args *uap) 916 { 917 return 0; 918 } 919 920 /* 921 * munlock_args(const void *addr, size_t len) 922 */ 923 int 924 munlock(struct munlock_args *uap) 925 { 926 struct thread *td = curthread; 927 struct proc *p = td->td_proc; 928 vm_offset_t addr; 929 vm_size_t size, pageoff; 930 int error; 931 932 addr = (vm_offset_t) uap->addr; 933 size = uap->len; 934 935 pageoff = (addr & PAGE_MASK); 936 addr -= pageoff; 937 size += pageoff; 938 size = (vm_size_t) round_page(size); 939 940 /* disable wrap around */ 941 if (addr + size < addr) 942 return (EINVAL); 943 944 #ifndef pmap_wired_count 945 error = suser(td); 946 if (error) 947 return (error); 948 #endif 949 950 error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 951 return (error == KERN_SUCCESS ? 0 : ENOMEM); 952 } 953 954 /* 955 * Internal version of mmap. 956 * Currently used by mmap, exec, and sys5 shared memory. 957 * Handle is either a vnode pointer or NULL for MAP_ANON. 958 */ 959 int 960 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 961 vm_prot_t maxprot, int flags, 962 void *handle, 963 vm_ooffset_t foff) 964 { 965 boolean_t fitit; 966 vm_object_t object; 967 struct vnode *vp = NULL; 968 objtype_t type; 969 int rv = KERN_SUCCESS; 970 vm_ooffset_t objsize; 971 int docow; 972 struct thread *td = curthread; /* XXX */ 973 struct proc *p = td->td_proc; 974 975 KKASSERT(p); 976 977 if (size == 0) 978 return (0); 979 980 objsize = size = round_page(size); 981 982 if (p->p_vmspace->vm_map.size + size > 983 p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 984 return(ENOMEM); 985 } 986 987 /* 988 * We currently can only deal with page aligned file offsets. 989 * The check is here rather than in the syscall because the 990 * kernel calls this function internally for other mmaping 991 * operations (such as in exec) and non-aligned offsets will 992 * cause pmap inconsistencies...so we want to be sure to 993 * disallow this in all cases. 994 */ 995 if (foff & PAGE_MASK) 996 return (EINVAL); 997 998 if ((flags & MAP_FIXED) == 0) { 999 fitit = TRUE; 1000 *addr = round_page(*addr); 1001 } else { 1002 if (*addr != trunc_page(*addr)) 1003 return (EINVAL); 1004 fitit = FALSE; 1005 (void) vm_map_remove(map, *addr, *addr + size); 1006 } 1007 1008 /* 1009 * Lookup/allocate object. 1010 */ 1011 if (flags & MAP_ANON) { 1012 type = OBJT_DEFAULT; 1013 /* 1014 * Unnamed anonymous regions always start at 0. 1015 */ 1016 if (handle == 0) 1017 foff = 0; 1018 } else { 1019 vp = (struct vnode *) handle; 1020 if (vp->v_type == VCHR) { 1021 type = OBJT_DEVICE; 1022 handle = (void *)(intptr_t)vp->v_rdev; 1023 } else { 1024 struct vattr vat; 1025 int error; 1026 1027 error = VOP_GETATTR(vp, &vat, td); 1028 if (error) 1029 return (error); 1030 objsize = round_page(vat.va_size); 1031 type = OBJT_VNODE; 1032 /* 1033 * if it is a regular file without any references 1034 * we do not need to sync it. 1035 */ 1036 if (vp->v_type == VREG && vat.va_nlink == 0) { 1037 flags |= MAP_NOSYNC; 1038 } 1039 } 1040 } 1041 1042 if (handle == NULL) { 1043 object = NULL; 1044 docow = 0; 1045 } else { 1046 object = vm_pager_allocate(type, 1047 handle, objsize, prot, foff); 1048 if (object == NULL) 1049 return (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1050 docow = MAP_PREFAULT_PARTIAL; 1051 } 1052 1053 /* 1054 * Force device mappings to be shared. 1055 */ 1056 if (type == OBJT_DEVICE || type == OBJT_PHYS) { 1057 flags &= ~(MAP_PRIVATE|MAP_COPY); 1058 flags |= MAP_SHARED; 1059 } 1060 1061 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1062 docow |= MAP_COPY_ON_WRITE; 1063 if (flags & MAP_NOSYNC) 1064 docow |= MAP_DISABLE_SYNCER; 1065 if (flags & MAP_NOCORE) 1066 docow |= MAP_DISABLE_COREDUMP; 1067 1068 #if defined(VM_PROT_READ_IS_EXEC) 1069 if (prot & VM_PROT_READ) 1070 prot |= VM_PROT_EXECUTE; 1071 1072 if (maxprot & VM_PROT_READ) 1073 maxprot |= VM_PROT_EXECUTE; 1074 #endif 1075 1076 if (fitit) { 1077 *addr = pmap_addr_hint(object, *addr, size); 1078 } 1079 1080 if (flags & MAP_STACK) 1081 rv = vm_map_stack (map, *addr, size, prot, 1082 maxprot, docow); 1083 else 1084 rv = vm_map_find(map, object, foff, addr, size, fitit, 1085 prot, maxprot, docow); 1086 1087 if (rv != KERN_SUCCESS) { 1088 /* 1089 * Lose the object reference. Will destroy the 1090 * object if it's an unnamed anonymous mapping 1091 * or named anonymous without other references. 1092 */ 1093 vm_object_deallocate(object); 1094 goto out; 1095 } 1096 1097 /* 1098 * Shared memory is also shared with children. 1099 */ 1100 if (flags & (MAP_SHARED|MAP_INHERIT)) { 1101 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1102 if (rv != KERN_SUCCESS) { 1103 (void) vm_map_remove(map, *addr, *addr + size); 1104 goto out; 1105 } 1106 } 1107 out: 1108 switch (rv) { 1109 case KERN_SUCCESS: 1110 return (0); 1111 case KERN_INVALID_ADDRESS: 1112 case KERN_NO_SPACE: 1113 return (ENOMEM); 1114 case KERN_PROTECTION_FAILURE: 1115 return (EACCES); 1116 default: 1117 return (EINVAL); 1118 } 1119 } 1120