1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 13 * 14 * @(#)vm_mmap.c 7.27 (Berkeley) 04/05/93 15 */ 16 17 /* 18 * Mapped file (mmap) interface to VM 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/filedesc.h> 24 #include <sys/proc.h> 25 #include <sys/vnode.h> 26 #include <sys/file.h> 27 #include <sys/mman.h> 28 #include <sys/conf.h> 29 30 #include <miscfs/specfs/specdev.h> 31 32 #include <vm/vm.h> 33 #include <vm/vm_pager.h> 34 #include <vm/vm_prot.h> 35 36 #ifdef DEBUG 37 int mmapdebug = 0; 38 #define MDB_FOLLOW 0x01 39 #define MDB_SYNC 0x02 40 #define MDB_MAPIT 0x04 41 #endif 42 43 struct sbrk_args { 44 int incr; 45 }; 46 /* ARGSUSED */ 47 int 48 sbrk(p, uap, retval) 49 struct proc *p; 50 struct sbrk_args *uap; 51 int *retval; 52 { 53 54 /* Not yet implemented */ 55 return (EOPNOTSUPP); 56 } 57 58 struct sstk_args { 59 int incr; 60 }; 61 /* ARGSUSED */ 62 int 63 sstk(p, uap, retval) 64 struct proc *p; 65 struct sstk_args *uap; 66 int *retval; 67 { 68 69 /* Not yet implemented */ 70 return (EOPNOTSUPP); 71 } 72 73 struct mmap_args { 74 caddr_t addr; 75 size_t len; 76 int prot; 77 int flags; 78 int fd; 79 long pad; 80 off_t pos; 81 }; 82 83 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 84 struct getpagesize_args { 85 int dummy; 86 }; 87 /* ARGSUSED */ 88 int 89 ogetpagesize(p, uap, retval) 90 struct proc *p; 91 struct getpagesize_args *uap; 92 int *retval; 93 { 94 95 *retval = PAGE_SIZE; 96 return (0); 97 } 98 #endif /* COMPAT_43 || COMPAT_SUNOS */ 99 100 #ifdef COMPAT_43 101 struct osmmap_args { 102 caddr_t addr; 103 int len; 104 int prot; 105 int flags; 106 int fd; 107 long pos; 108 }; 109 int 110 osmmap(p, uap, retval) 111 struct proc *p; 112 register struct osmmap_args *uap; 113 int *retval; 114 { 115 struct mmap_args nargs; 116 static const char cvtbsdprot[8] = { 117 0, 118 PROT_EXEC, 119 PROT_WRITE, 120 PROT_EXEC|PROT_WRITE, 121 PROT_READ, 122 PROT_EXEC|PROT_READ, 123 PROT_WRITE|PROT_READ, 124 PROT_EXEC|PROT_WRITE|PROT_READ, 125 }; 126 #define OMAP_ANON 0x0002 127 #define OMAP_COPY 0x0020 128 #define OMAP_SHARED 0x0010 129 #define OMAP_FIXED 0x0100 130 #define OMAP_INHERIT 0x0800 131 132 nargs.addr = uap->addr; 133 nargs.len = uap->len; 134 nargs.prot = cvtbsdprot[uap->prot&0x7]; 135 nargs.flags = 0; 136 if (uap->flags & OMAP_ANON) 137 nargs.flags |= MAP_ANON; 138 if (uap->flags & OMAP_COPY) 139 nargs.flags |= MAP_COPY; 140 if (uap->flags & OMAP_SHARED) 141 nargs.flags |= MAP_SHARED; 142 else 143 nargs.flags |= MAP_PRIVATE; 144 if (uap->flags & OMAP_FIXED) 145 nargs.flags |= MAP_FIXED; 146 if (uap->flags & OMAP_INHERIT) 147 nargs.flags |= MAP_INHERIT; 148 nargs.fd = uap->fd; 149 nargs.pos = uap->pos; 150 return (smmap(p, &nargs, retval)); 151 } 152 #endif 153 154 int 155 smmap(p, uap, retval) 156 struct proc *p; 157 register struct mmap_args *uap; 158 int *retval; 159 { 160 register struct filedesc *fdp = p->p_fd; 161 register struct file *fp; 162 struct vnode *vp; 163 vm_offset_t addr; 164 vm_size_t size; 165 vm_prot_t prot, maxprot; 166 caddr_t handle; 167 int flags, error; 168 169 prot = uap->prot & VM_PROT_ALL; 170 flags = uap->flags; 171 #ifdef DEBUG 172 if (mmapdebug & MDB_FOLLOW) 173 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 174 p->p_pid, uap->addr, uap->len, prot, 175 flags, uap->fd, (vm_offset_t)uap->pos); 176 #endif 177 /* 178 * Address (if FIXED) must be page aligned. 179 * Size is implicitly rounded to a page boundary. 180 */ 181 addr = (vm_offset_t) uap->addr; 182 if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) || 183 (ssize_t)uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1)) 184 return (EINVAL); 185 size = (vm_size_t) round_page(uap->len); 186 /* 187 * Check for illegal addresses. Watch out for address wrap... 188 * Note that VM_*_ADDRESS are not constants due to casts (argh). 189 */ 190 if (flags & MAP_FIXED) { 191 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 192 return (EINVAL); 193 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 194 return (EINVAL); 195 if (addr > addr + size) 196 return (EINVAL); 197 } 198 /* 199 * XXX if no hint provided for a non-fixed mapping place it after 200 * the end of the largest possible heap. 201 * 202 * There should really be a pmap call to determine a reasonable 203 * location. 204 */ 205 if (addr == 0 && (flags & MAP_FIXED) == 0) 206 addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 207 if (flags & MAP_ANON) { 208 /* 209 * Mapping blank space is trivial. 210 */ 211 handle = NULL; 212 maxprot = VM_PROT_ALL; 213 } else { 214 /* 215 * Mapping file, get fp for validation. 216 * Obtain vnode and make sure it is of appropriate type. 217 */ 218 if (((unsigned)uap->fd) >= fdp->fd_nfiles || 219 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 220 return (EBADF); 221 if (fp->f_type != DTYPE_VNODE) 222 return (EINVAL); 223 vp = (struct vnode *)fp->f_data; 224 if (vp->v_type != VREG && vp->v_type != VCHR) 225 return (EINVAL); 226 /* 227 * Ensure that file and memory protections are compatible. 228 * Note that we only worry about writability if mapping is 229 * shared; in this case, current and max prot are dictated 230 * by the open file. 231 * XXX use the vnode instead? Problem is: what credentials 232 * do we use for determination? What if proc does a setuid? 233 */ 234 maxprot = VM_PROT_EXECUTE; /* ??? */ 235 if (fp->f_flag & FREAD) 236 maxprot |= VM_PROT_READ; 237 else if (prot & PROT_READ) 238 return (EACCES); 239 if (flags & MAP_SHARED) { 240 if (fp->f_flag & FWRITE) 241 maxprot |= VM_PROT_WRITE; 242 else if (prot & PROT_WRITE) 243 return (EACCES); 244 } else 245 maxprot |= VM_PROT_WRITE; 246 handle = (caddr_t)vp; 247 } 248 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, 249 flags, handle, (vm_offset_t)uap->pos); 250 if (error == 0) 251 *retval = (int)addr; 252 return (error); 253 } 254 255 struct msync_args { 256 caddr_t addr; 257 int len; 258 }; 259 int 260 msync(p, uap, retval) 261 struct proc *p; 262 struct msync_args *uap; 263 int *retval; 264 { 265 vm_offset_t addr, objoff, oaddr; 266 vm_size_t size, osize; 267 vm_prot_t prot, mprot; 268 vm_inherit_t inherit; 269 vm_object_t object; 270 boolean_t shared; 271 int rv; 272 273 #ifdef DEBUG 274 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 275 printf("msync(%d): addr %x len %x\n", 276 p->p_pid, uap->addr, uap->len); 277 #endif 278 if (((int)uap->addr & PAGE_MASK) || uap->len < 0) 279 return(EINVAL); 280 addr = oaddr = (vm_offset_t)uap->addr; 281 osize = (vm_size_t)uap->len; 282 /* 283 * Region must be entirely contained in a single entry 284 */ 285 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 286 TRUE)) 287 return(EINVAL); 288 /* 289 * Determine the object associated with that entry 290 * (object is returned locked on KERN_SUCCESS) 291 */ 292 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 293 &inherit, &shared, &object, &objoff); 294 if (rv != KERN_SUCCESS) 295 return(EINVAL); 296 #ifdef DEBUG 297 if (mmapdebug & MDB_SYNC) 298 printf("msync: region: object %x addr %x size %d objoff %d\n", 299 object, addr, size, objoff); 300 #endif 301 /* 302 * Do not msync non-vnoded backed objects. 303 */ 304 if ((object->flags & OBJ_INTERNAL) || object->pager == NULL || 305 object->pager->pg_type != PG_VNODE) { 306 vm_object_unlock(object); 307 return(EINVAL); 308 } 309 objoff += oaddr - addr; 310 if (osize == 0) 311 osize = size; 312 #ifdef DEBUG 313 if (mmapdebug & MDB_SYNC) 314 printf("msync: cleaning/flushing object range [%x-%x)\n", 315 objoff, objoff+osize); 316 #endif 317 if (prot & VM_PROT_WRITE) 318 vm_object_page_clean(object, objoff, objoff+osize, FALSE); 319 /* 320 * (XXX) 321 * Bummer, gotta flush all cached pages to ensure 322 * consistency with the file system cache. 323 */ 324 vm_object_page_remove(object, objoff, objoff+osize); 325 vm_object_unlock(object); 326 return(0); 327 } 328 329 struct munmap_args { 330 caddr_t addr; 331 int len; 332 }; 333 int 334 munmap(p, uap, retval) 335 register struct proc *p; 336 register struct munmap_args *uap; 337 int *retval; 338 { 339 vm_offset_t addr; 340 vm_size_t size; 341 342 #ifdef DEBUG 343 if (mmapdebug & MDB_FOLLOW) 344 printf("munmap(%d): addr %x len %x\n", 345 p->p_pid, uap->addr, uap->len); 346 #endif 347 348 addr = (vm_offset_t) uap->addr; 349 if ((addr & PAGE_MASK) || uap->len < 0) 350 return(EINVAL); 351 size = (vm_size_t) round_page(uap->len); 352 if (size == 0) 353 return(0); 354 /* 355 * Check for illegal addresses. Watch out for address wrap... 356 * Note that VM_*_ADDRESS are not constants due to casts (argh). 357 */ 358 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 359 return (EINVAL); 360 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 361 return (EINVAL); 362 if (addr > addr + size) 363 return (EINVAL); 364 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr + size, 365 FALSE)) 366 return(EINVAL); 367 /* returns nothing but KERN_SUCCESS anyway */ 368 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 369 return(0); 370 } 371 372 void 373 munmapfd(fd) 374 int fd; 375 { 376 #ifdef DEBUG 377 if (mmapdebug & MDB_FOLLOW) 378 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 379 #endif 380 381 /* 382 * XXX -- should vm_deallocate any regions mapped to this file 383 */ 384 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 385 } 386 387 struct mprotect_args { 388 caddr_t addr; 389 int len; 390 int prot; 391 }; 392 int 393 mprotect(p, uap, retval) 394 struct proc *p; 395 struct mprotect_args *uap; 396 int *retval; 397 { 398 vm_offset_t addr; 399 vm_size_t size; 400 register vm_prot_t prot; 401 402 #ifdef DEBUG 403 if (mmapdebug & MDB_FOLLOW) 404 printf("mprotect(%d): addr %x len %x prot %d\n", 405 p->p_pid, uap->addr, uap->len, uap->prot); 406 #endif 407 408 addr = (vm_offset_t)uap->addr; 409 if ((addr & PAGE_MASK) || uap->len < 0) 410 return(EINVAL); 411 size = (vm_size_t)uap->len; 412 prot = uap->prot & VM_PROT_ALL; 413 414 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 415 FALSE)) { 416 case KERN_SUCCESS: 417 return (0); 418 case KERN_PROTECTION_FAILURE: 419 return (EACCES); 420 } 421 return (EINVAL); 422 } 423 424 struct madvise_args { 425 caddr_t addr; 426 int len; 427 int behav; 428 }; 429 /* ARGSUSED */ 430 int 431 madvise(p, uap, retval) 432 struct proc *p; 433 struct madvise_args *uap; 434 int *retval; 435 { 436 437 /* Not yet implemented */ 438 return (EOPNOTSUPP); 439 } 440 441 struct mincore_args { 442 caddr_t addr; 443 int len; 444 char *vec; 445 }; 446 /* ARGSUSED */ 447 int 448 mincore(p, uap, retval) 449 struct proc *p; 450 struct mincore_args *uap; 451 int *retval; 452 { 453 454 /* Not yet implemented */ 455 return (EOPNOTSUPP); 456 } 457 458 /* 459 * Internal version of mmap. 460 * Currently used by mmap, exec, and sys5 shared memory. 461 * Handle is either a vnode pointer or NULL for MAP_ANON. 462 */ 463 int 464 vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff) 465 register vm_map_t map; 466 register vm_offset_t *addr; 467 register vm_size_t size; 468 vm_prot_t prot, maxprot; 469 register int flags; 470 caddr_t handle; /* XXX should be vp */ 471 vm_offset_t foff; 472 { 473 register vm_pager_t pager; 474 boolean_t fitit; 475 vm_object_t object; 476 struct vnode *vp; 477 int type; 478 int rv = KERN_SUCCESS; 479 480 if (size == 0) 481 return (0); 482 483 if ((flags & MAP_FIXED) == 0) { 484 fitit = TRUE; 485 *addr = round_page(*addr); 486 } else { 487 fitit = FALSE; 488 (void)vm_deallocate(map, *addr, size); 489 } 490 491 /* 492 * Lookup/allocate pager. All except an unnamed anonymous lookup 493 * gain a reference to ensure continued existance of the object. 494 * (XXX the exception is to appease the pageout daemon) 495 */ 496 if (flags & MAP_ANON) 497 type = PG_DFLT; 498 else { 499 vp = (struct vnode *)handle; 500 if (vp->v_type == VCHR) { 501 type = PG_DEVICE; 502 handle = (caddr_t)vp->v_rdev; 503 } else 504 type = PG_VNODE; 505 } 506 pager = vm_pager_allocate(type, handle, size, prot); 507 if (pager == NULL) 508 return (type == PG_DEVICE ? EINVAL : ENOMEM); 509 /* 510 * Find object and release extra reference gained by lookup 511 */ 512 object = vm_object_lookup(pager); 513 vm_object_deallocate(object); 514 515 /* 516 * Anonymous memory. 517 */ 518 if (flags & MAP_ANON) { 519 rv = vm_allocate_with_pager(map, addr, size, fitit, 520 pager, foff, TRUE); 521 if (rv != KERN_SUCCESS) { 522 if (handle == NULL) 523 vm_pager_deallocate(pager); 524 else 525 vm_object_deallocate(object); 526 goto out; 527 } 528 /* 529 * Don't cache anonymous objects. 530 * Loses the reference gained by vm_pager_allocate. 531 * Note that object will be NULL when handle == NULL, 532 * this is ok since vm_allocate_with_pager has made 533 * sure that these objects are uncached. 534 */ 535 (void) pager_cache(object, FALSE); 536 #ifdef DEBUG 537 if (mmapdebug & MDB_MAPIT) 538 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 539 curproc->p_pid, *addr, size, pager); 540 #endif 541 } 542 /* 543 * Must be a mapped file. 544 * Distinguish between character special and regular files. 545 */ 546 else if (vp->v_type == VCHR) { 547 rv = vm_allocate_with_pager(map, addr, size, fitit, 548 pager, foff, FALSE); 549 /* 550 * Uncache the object and lose the reference gained 551 * by vm_pager_allocate(). If the call to 552 * vm_allocate_with_pager() was sucessful, then we 553 * gained an additional reference ensuring the object 554 * will continue to exist. If the call failed then 555 * the deallocate call below will terminate the 556 * object which is fine. 557 */ 558 (void) pager_cache(object, FALSE); 559 if (rv != KERN_SUCCESS) 560 goto out; 561 } 562 /* 563 * A regular file 564 */ 565 else { 566 #ifdef DEBUG 567 if (object == NULL) 568 printf("vm_mmap: no object: vp %x, pager %x\n", 569 vp, pager); 570 #endif 571 /* 572 * Map it directly. 573 * Allows modifications to go out to the vnode. 574 */ 575 if (flags & MAP_SHARED) { 576 rv = vm_allocate_with_pager(map, addr, size, 577 fitit, pager, 578 foff, FALSE); 579 if (rv != KERN_SUCCESS) { 580 vm_object_deallocate(object); 581 goto out; 582 } 583 /* 584 * Don't cache the object. This is the easiest way 585 * of ensuring that data gets back to the filesystem 586 * because vnode_pager_deallocate() will fsync the 587 * vnode. pager_cache() will lose the extra ref. 588 */ 589 if (prot & VM_PROT_WRITE) 590 pager_cache(object, FALSE); 591 else 592 vm_object_deallocate(object); 593 } 594 /* 595 * Copy-on-write of file. Two flavors. 596 * MAP_COPY is true COW, you essentially get a snapshot of 597 * the region at the time of mapping. MAP_PRIVATE means only 598 * that your changes are not reflected back to the object. 599 * Changes made by others will be seen. 600 */ 601 else { 602 vm_map_t tmap; 603 vm_offset_t off; 604 605 /* locate and allocate the target address space */ 606 rv = vm_map_find(map, NULL, (vm_offset_t)0, 607 addr, size, fitit); 608 if (rv != KERN_SUCCESS) { 609 vm_object_deallocate(object); 610 goto out; 611 } 612 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 613 VM_MIN_ADDRESS+size, TRUE); 614 off = VM_MIN_ADDRESS; 615 rv = vm_allocate_with_pager(tmap, &off, size, 616 TRUE, pager, 617 foff, FALSE); 618 if (rv != KERN_SUCCESS) { 619 vm_object_deallocate(object); 620 vm_map_deallocate(tmap); 621 goto out; 622 } 623 /* 624 * (XXX) 625 * MAP_PRIVATE implies that we see changes made by 626 * others. To ensure that we need to guarentee that 627 * no copy object is created (otherwise original 628 * pages would be pushed to the copy object and we 629 * would never see changes made by others). We 630 * totally sleeze it right now by marking the object 631 * internal temporarily. 632 */ 633 if ((flags & MAP_COPY) == 0) 634 object->flags |= OBJ_INTERNAL; 635 rv = vm_map_copy(map, tmap, *addr, size, off, 636 FALSE, FALSE); 637 object->flags &= ~OBJ_INTERNAL; 638 /* 639 * (XXX) 640 * My oh my, this only gets worse... 641 * Force creation of a shadow object so that 642 * vm_map_fork will do the right thing. 643 */ 644 if ((flags & MAP_COPY) == 0) { 645 vm_map_t tmap; 646 vm_map_entry_t tentry; 647 vm_object_t tobject; 648 vm_offset_t toffset; 649 vm_prot_t tprot; 650 boolean_t twired, tsu; 651 652 tmap = map; 653 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 654 &tentry, &tobject, &toffset, 655 &tprot, &twired, &tsu); 656 vm_map_lookup_done(tmap, tentry); 657 } 658 /* 659 * (XXX) 660 * Map copy code cannot detect sharing unless a 661 * sharing map is involved. So we cheat and write 662 * protect everything ourselves. 663 */ 664 vm_object_pmap_copy(object, foff, foff + size); 665 vm_object_deallocate(object); 666 vm_map_deallocate(tmap); 667 if (rv != KERN_SUCCESS) 668 goto out; 669 } 670 #ifdef DEBUG 671 if (mmapdebug & MDB_MAPIT) 672 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 673 curproc->p_pid, *addr, size, pager); 674 #endif 675 } 676 /* 677 * Correct protection (default is VM_PROT_ALL). 678 * If maxprot is different than prot, we must set both explicitly. 679 */ 680 rv = KERN_SUCCESS; 681 if (maxprot != VM_PROT_ALL) 682 rv = vm_map_protect(map, *addr, *addr+size, maxprot, TRUE); 683 if (rv == KERN_SUCCESS && prot != maxprot) 684 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 685 if (rv != KERN_SUCCESS) { 686 (void) vm_deallocate(map, *addr, size); 687 goto out; 688 } 689 /* 690 * Shared memory is also shared with children. 691 */ 692 if (flags & MAP_SHARED) { 693 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 694 if (rv != KERN_SUCCESS) { 695 (void) vm_deallocate(map, *addr, size); 696 goto out; 697 } 698 } 699 out: 700 #ifdef DEBUG 701 if (mmapdebug & MDB_MAPIT) 702 printf("vm_mmap: rv %d\n", rv); 703 #endif 704 switch (rv) { 705 case KERN_SUCCESS: 706 return (0); 707 case KERN_INVALID_ADDRESS: 708 case KERN_NO_SPACE: 709 return (ENOMEM); 710 case KERN_PROTECTION_FAILURE: 711 return (EACCES); 712 default: 713 return (EINVAL); 714 } 715 } 716 717 /* 718 * Internal bastardized version of MACHs vm_region system call. 719 * Given address and size it returns map attributes as well 720 * as the (locked) object mapped at that location. 721 */ 722 int 723 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 724 vm_map_t map; 725 vm_offset_t *addr; /* IN/OUT */ 726 vm_size_t *size; /* OUT */ 727 vm_prot_t *prot; /* OUT */ 728 vm_prot_t *max_prot; /* OUT */ 729 vm_inherit_t *inheritance; /* OUT */ 730 boolean_t *shared; /* OUT */ 731 vm_object_t *object; /* OUT */ 732 vm_offset_t *objoff; /* OUT */ 733 { 734 vm_map_entry_t tmp_entry; 735 register 736 vm_map_entry_t entry; 737 register 738 vm_offset_t tmp_offset; 739 vm_offset_t start; 740 741 if (map == NULL) 742 return(KERN_INVALID_ARGUMENT); 743 744 start = *addr; 745 746 vm_map_lock_read(map); 747 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 748 if ((entry = tmp_entry->next) == &map->header) { 749 vm_map_unlock_read(map); 750 return(KERN_NO_SPACE); 751 } 752 start = entry->start; 753 *addr = start; 754 } else 755 entry = tmp_entry; 756 757 *prot = entry->protection; 758 *max_prot = entry->max_protection; 759 *inheritance = entry->inheritance; 760 761 tmp_offset = entry->offset + (start - entry->start); 762 *size = (entry->end - start); 763 764 if (entry->is_a_map) { 765 register vm_map_t share_map; 766 vm_size_t share_size; 767 768 share_map = entry->object.share_map; 769 770 vm_map_lock_read(share_map); 771 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 772 773 if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 774 *size = share_size; 775 776 vm_object_lock(tmp_entry->object); 777 *object = tmp_entry->object.vm_object; 778 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 779 780 *shared = (share_map->ref_count != 1); 781 vm_map_unlock_read(share_map); 782 } else { 783 vm_object_lock(entry->object); 784 *object = entry->object.vm_object; 785 *objoff = tmp_offset; 786 787 *shared = FALSE; 788 } 789 790 vm_map_unlock_read(map); 791 792 return(KERN_SUCCESS); 793 } 794 795 /* 796 * Yet another bastard routine. 797 */ 798 int 799 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 800 register vm_map_t map; 801 register vm_offset_t *addr; 802 register vm_size_t size; 803 boolean_t fitit; 804 vm_pager_t pager; 805 vm_offset_t poffset; 806 boolean_t internal; 807 { 808 register vm_object_t object; 809 register int result; 810 811 if (map == NULL) 812 return(KERN_INVALID_ARGUMENT); 813 814 *addr = trunc_page(*addr); 815 size = round_page(size); 816 817 /* 818 * Lookup the pager/paging-space in the object cache. 819 * If it's not there, then create a new object and cache 820 * it. 821 */ 822 object = vm_object_lookup(pager); 823 cnt.v_lookups++; 824 if (object == NULL) { 825 object = vm_object_allocate(size); 826 /* 827 * From Mike Hibler: "unnamed anonymous objects should never 828 * be on the hash list ... For now you can just change 829 * vm_allocate_with_pager to not do vm_object_enter if this 830 * is an internal object ..." 831 */ 832 if (!internal) 833 vm_object_enter(object, pager); 834 } else 835 cnt.v_hits++; 836 if (internal) 837 object->flags |= OBJ_INTERNAL; 838 else 839 object->flags &= ~OBJ_INTERNAL; 840 841 result = vm_map_find(map, object, poffset, addr, size, fitit); 842 if (result != KERN_SUCCESS) 843 vm_object_deallocate(object); 844 else if (pager != NULL) 845 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 846 return(result); 847 } 848 849 /* 850 * XXX: this routine belongs in vm_map.c. 851 * 852 * Returns TRUE if the range [start - end) is allocated in either 853 * a single entry (single_entry == TRUE) or multiple contiguous 854 * entries (single_entry == FALSE). 855 * 856 * start and end should be page aligned. 857 */ 858 boolean_t 859 vm_map_is_allocated(map, start, end, single_entry) 860 vm_map_t map; 861 vm_offset_t start, end; 862 boolean_t single_entry; 863 { 864 vm_map_entry_t mapent; 865 register vm_offset_t nend; 866 867 vm_map_lock_read(map); 868 869 /* 870 * Start address not in any entry 871 */ 872 if (!vm_map_lookup_entry(map, start, &mapent)) { 873 vm_map_unlock_read(map); 874 return (FALSE); 875 } 876 /* 877 * Find the maximum stretch of contiguously allocated space 878 */ 879 nend = mapent->end; 880 if (!single_entry) { 881 mapent = mapent->next; 882 while (mapent != &map->header && mapent->start == nend) { 883 nend = mapent->end; 884 mapent = mapent->next; 885 } 886 } 887 888 vm_map_unlock_read(map); 889 return (end <= nend); 890 } 891