1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 13 * 14 * @(#)vm_mmap.c 7.23 (Berkeley) 03/09/93 15 */ 16 17 /* 18 * Mapped file (mmap) interface to VM 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/filedesc.h> 24 #include <sys/proc.h> 25 #include <sys/vnode.h> 26 #include <sys/file.h> 27 #include <sys/mman.h> 28 #include <sys/conf.h> 29 30 #include <miscfs/specfs/specdev.h> 31 32 #include <vm/vm.h> 33 #include <vm/vm_pager.h> 34 #include <vm/vm_prot.h> 35 36 #ifdef DEBUG 37 int mmapdebug = 0; 38 #define MDB_FOLLOW 0x01 39 #define MDB_SYNC 0x02 40 #define MDB_MAPIT 0x04 41 #endif 42 43 struct sbrk_args { 44 int incr; 45 }; 46 /* ARGSUSED */ 47 int 48 sbrk(p, uap, retval) 49 struct proc *p; 50 struct sbrk_args *uap; 51 int *retval; 52 { 53 54 /* Not yet implemented */ 55 return (EOPNOTSUPP); 56 } 57 58 struct sstk_args { 59 int incr; 60 }; 61 /* ARGSUSED */ 62 int 63 sstk(p, uap, retval) 64 struct proc *p; 65 struct sstk_args *uap; 66 int *retval; 67 { 68 69 /* Not yet implemented */ 70 return (EOPNOTSUPP); 71 } 72 73 struct mmap_args { 74 caddr_t addr; 75 int len; 76 int prot; 77 int flags; 78 int fd; 79 long pad; 80 off_t pos; 81 }; 82 83 #ifdef COMPAT_43 84 struct getpagesize_args { 85 int dummy; 86 }; 87 /* ARGSUSED */ 88 int 89 getpagesize(p, uap, retval) 90 struct proc *p; 91 struct getpagesize_args *uap; 92 int *retval; 93 { 94 95 *retval = PAGE_SIZE; 96 return (0); 97 } 98 99 struct osmmap_args { 100 caddr_t addr; 101 int len; 102 int prot; 103 int flags; 104 int fd; 105 long pos; 106 }; 107 int 108 osmmap(p, uap, retval) 109 struct proc *p; 110 register struct osmmap_args *uap; 111 int *retval; 112 { 113 struct mmap_args nargs; 114 static const char cvtbsdprot[8] = { 115 0, 116 PROT_EXEC, 117 PROT_WRITE, 118 PROT_EXEC|PROT_WRITE, 119 PROT_READ, 120 PROT_EXEC|PROT_READ, 121 PROT_WRITE|PROT_READ, 122 PROT_EXEC|PROT_WRITE|PROT_READ, 123 }; 124 #define OMAP_ANON 0x0002 125 #define OMAP_COPY 0x0020 126 #define OMAP_SHARED 0x0010 127 #define OMAP_FIXED 0x0100 128 #define OMAP_INHERIT 0x0800 129 130 nargs.addr = uap->addr; 131 nargs.len = uap->len; 132 nargs.prot = cvtbsdprot[uap->prot&0x7]; 133 nargs.flags = 0; 134 if (uap->flags & OMAP_ANON) 135 nargs.flags |= MAP_ANON; 136 if (uap->flags & OMAP_COPY) 137 nargs.flags |= MAP_COPY; 138 if (uap->flags & OMAP_SHARED) 139 nargs.flags |= MAP_SHARED; 140 else 141 nargs.flags |= MAP_PRIVATE; 142 if (uap->flags & OMAP_FIXED) 143 nargs.flags |= MAP_FIXED; 144 if (uap->flags & OMAP_INHERIT) 145 nargs.flags |= MAP_INHERIT; 146 nargs.fd = uap->fd; 147 nargs.pos = uap->pos; 148 return (smmap(p, &nargs, retval)); 149 } 150 #endif 151 152 int 153 smmap(p, uap, retval) 154 struct proc *p; 155 register struct mmap_args *uap; 156 int *retval; 157 { 158 register struct filedesc *fdp = p->p_fd; 159 register struct file *fp; 160 struct vnode *vp; 161 vm_offset_t addr; 162 vm_size_t size; 163 vm_prot_t prot, maxprot; 164 caddr_t handle; 165 int flags, error; 166 167 flags = uap->flags; 168 #ifdef DEBUG 169 if (mmapdebug & MDB_FOLLOW) 170 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 171 p->p_pid, uap->addr, uap->len, uap->prot, 172 flags, uap->fd, uap->pos); 173 #endif 174 /* 175 * Address (if FIXED) must be page aligned. 176 * Size is implicitly rounded to a page boundary. 177 */ 178 addr = (vm_offset_t) uap->addr; 179 if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) || uap->len < 0 || 180 ((flags & MAP_ANON) && uap->fd != -1)) 181 return (EINVAL); 182 size = (vm_size_t) round_page(uap->len); 183 /* 184 * Check for illegal addresses. Watch out for address wrap... 185 * Note that VM_*_ADDRESS are not constants due to casts (argh). 186 */ 187 if (flags & MAP_FIXED) { 188 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 189 return (EINVAL); 190 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 191 return (EINVAL); 192 if (addr > addr + size) 193 return (EINVAL); 194 } 195 /* 196 * XXX if no hint provided for a non-fixed mapping place it after 197 * the end of the largest possible heap. 198 * 199 * There should really be a pmap call to determine a reasonable 200 * location. 201 */ 202 if (addr == 0 && (flags & MAP_FIXED) == 0) 203 addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 204 /* 205 * If we are mapping a file we need to check various 206 * file/vnode related things. 207 */ 208 if (flags & MAP_ANON) { 209 handle = NULL; 210 maxprot = VM_PROT_ALL; 211 } else { 212 /* 213 * Mapping file, get fp for validation. 214 * Obtain vnode and make sure it is of appropriate type 215 */ 216 if (((unsigned)uap->fd) >= fdp->fd_nfiles || 217 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 218 return(EBADF); 219 if (fp->f_type != DTYPE_VNODE) 220 return(EINVAL); 221 vp = (struct vnode *)fp->f_data; 222 if (vp->v_type != VREG && vp->v_type != VCHR) 223 return(EINVAL); 224 /* 225 * Ensure that file protection and desired protection 226 * are compatible. Note that we only worry about writability 227 * if mapping is shared. 228 */ 229 if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || 230 ((flags & MAP_SHARED) && 231 (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) 232 return(EACCES); 233 handle = (caddr_t)vp; 234 /* 235 * Set maximum protection as dictated by the open file. 236 * XXX use the vnode instead? Problem is: what credentials 237 * do we use for determination? What if proc does a setuid? 238 */ 239 maxprot = 0; 240 if (fp->f_flag & FREAD) 241 maxprot |= VM_PROT_READ|VM_PROT_EXECUTE; 242 if (fp->f_flag & FWRITE) 243 maxprot |= VM_PROT_WRITE; 244 } 245 prot = uap->prot & VM_PROT_ALL; 246 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, 247 flags, handle, (vm_offset_t)uap->pos); 248 if (error == 0) 249 *retval = (int) addr; 250 return(error); 251 } 252 253 struct msync_args { 254 caddr_t addr; 255 int len; 256 }; 257 int 258 msync(p, uap, retval) 259 struct proc *p; 260 struct msync_args *uap; 261 int *retval; 262 { 263 vm_offset_t addr, objoff, oaddr; 264 vm_size_t size, osize; 265 vm_prot_t prot, mprot; 266 vm_inherit_t inherit; 267 vm_object_t object; 268 boolean_t shared; 269 int rv; 270 271 #ifdef DEBUG 272 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 273 printf("msync(%d): addr %x len %x\n", 274 p->p_pid, uap->addr, uap->len); 275 #endif 276 if (((int)uap->addr & PAGE_MASK) || uap->len < 0) 277 return(EINVAL); 278 addr = oaddr = (vm_offset_t)uap->addr; 279 osize = (vm_size_t)uap->len; 280 /* 281 * Region must be entirely contained in a single entry 282 */ 283 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 284 TRUE)) 285 return(EINVAL); 286 /* 287 * Determine the object associated with that entry 288 * (object is returned locked on KERN_SUCCESS) 289 */ 290 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 291 &inherit, &shared, &object, &objoff); 292 if (rv != KERN_SUCCESS) 293 return(EINVAL); 294 #ifdef DEBUG 295 if (mmapdebug & MDB_SYNC) 296 printf("msync: region: object %x addr %x size %d objoff %d\n", 297 object, addr, size, objoff); 298 #endif 299 /* 300 * Do not msync non-vnoded backed objects. 301 */ 302 if ((object->flags & OBJ_INTERNAL) || object->pager == NULL || 303 object->pager->pg_type != PG_VNODE) { 304 vm_object_unlock(object); 305 return(EINVAL); 306 } 307 objoff += oaddr - addr; 308 if (osize == 0) 309 osize = size; 310 #ifdef DEBUG 311 if (mmapdebug & MDB_SYNC) 312 printf("msync: cleaning/flushing object range [%x-%x)\n", 313 objoff, objoff+osize); 314 #endif 315 if (prot & VM_PROT_WRITE) 316 vm_object_page_clean(object, objoff, objoff+osize, FALSE); 317 /* 318 * (XXX) 319 * Bummer, gotta flush all cached pages to ensure 320 * consistency with the file system cache. 321 */ 322 vm_object_page_remove(object, objoff, objoff+osize); 323 vm_object_unlock(object); 324 return(0); 325 } 326 327 struct munmap_args { 328 caddr_t addr; 329 int len; 330 }; 331 int 332 munmap(p, uap, retval) 333 register struct proc *p; 334 register struct munmap_args *uap; 335 int *retval; 336 { 337 vm_offset_t addr; 338 vm_size_t size; 339 340 #ifdef DEBUG 341 if (mmapdebug & MDB_FOLLOW) 342 printf("munmap(%d): addr %x len %x\n", 343 p->p_pid, uap->addr, uap->len); 344 #endif 345 346 addr = (vm_offset_t) uap->addr; 347 if ((addr & PAGE_MASK) || uap->len < 0) 348 return(EINVAL); 349 size = (vm_size_t) round_page(uap->len); 350 if (size == 0) 351 return(0); 352 /* 353 * Check for illegal addresses. Watch out for address wrap... 354 * Note that VM_*_ADDRESS are not constants due to casts (argh). 355 */ 356 if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 357 return (EINVAL); 358 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 359 return (EINVAL); 360 if (addr > addr + size) 361 return (EINVAL); 362 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr + size, 363 FALSE)) 364 return(EINVAL); 365 /* returns nothing but KERN_SUCCESS anyway */ 366 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 367 return(0); 368 } 369 370 void 371 munmapfd(fd) 372 int fd; 373 { 374 #ifdef DEBUG 375 if (mmapdebug & MDB_FOLLOW) 376 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 377 #endif 378 379 /* 380 * XXX -- should vm_deallocate any regions mapped to this file 381 */ 382 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 383 } 384 385 struct mprotect_args { 386 caddr_t addr; 387 int len; 388 int prot; 389 }; 390 int 391 mprotect(p, uap, retval) 392 struct proc *p; 393 struct mprotect_args *uap; 394 int *retval; 395 { 396 vm_offset_t addr; 397 vm_size_t size; 398 register vm_prot_t prot; 399 400 #ifdef DEBUG 401 if (mmapdebug & MDB_FOLLOW) 402 printf("mprotect(%d): addr %x len %x prot %d\n", 403 p->p_pid, uap->addr, uap->len, uap->prot); 404 #endif 405 406 addr = (vm_offset_t)uap->addr; 407 if ((addr & PAGE_MASK) || uap->len < 0) 408 return(EINVAL); 409 size = (vm_size_t)uap->len; 410 prot = uap->prot & VM_PROT_ALL; 411 412 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 413 FALSE)) { 414 case KERN_SUCCESS: 415 return (0); 416 case KERN_PROTECTION_FAILURE: 417 return (EACCES); 418 } 419 return (EINVAL); 420 } 421 422 struct madvise_args { 423 caddr_t addr; 424 int len; 425 int behav; 426 }; 427 /* ARGSUSED */ 428 int 429 madvise(p, uap, retval) 430 struct proc *p; 431 struct madvise_args *uap; 432 int *retval; 433 { 434 435 /* Not yet implemented */ 436 return (EOPNOTSUPP); 437 } 438 439 struct mincore_args { 440 caddr_t addr; 441 int len; 442 char *vec; 443 }; 444 /* ARGSUSED */ 445 int 446 mincore(p, uap, retval) 447 struct proc *p; 448 struct mincore_args *uap; 449 int *retval; 450 { 451 452 /* Not yet implemented */ 453 return (EOPNOTSUPP); 454 } 455 456 /* 457 * Internal version of mmap. 458 * Currently used by mmap, exec, and sys5 shared memory. 459 * Handle is either a vnode pointer or NULL for MAP_ANON. 460 */ 461 int 462 vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff) 463 register vm_map_t map; 464 register vm_offset_t *addr; 465 register vm_size_t size; 466 vm_prot_t prot, maxprot; 467 register int flags; 468 caddr_t handle; /* XXX should be vp */ 469 vm_offset_t foff; 470 { 471 register vm_pager_t pager; 472 boolean_t fitit; 473 vm_object_t object; 474 struct vnode *vp; 475 int type; 476 int rv = KERN_SUCCESS; 477 478 if (size == 0) 479 return (0); 480 481 if ((flags & MAP_FIXED) == 0) { 482 fitit = TRUE; 483 *addr = round_page(*addr); 484 } else { 485 fitit = FALSE; 486 (void)vm_deallocate(map, *addr, size); 487 } 488 489 /* 490 * Lookup/allocate pager. All except an unnamed anonymous lookup 491 * gain a reference to ensure continued existance of the object. 492 * (XXX the exception is to appease the pageout daemon) 493 */ 494 if (flags & MAP_ANON) 495 type = PG_DFLT; 496 else { 497 vp = (struct vnode *)handle; 498 if (vp->v_type == VCHR) { 499 type = PG_DEVICE; 500 handle = (caddr_t)vp->v_rdev; 501 } else 502 type = PG_VNODE; 503 } 504 pager = vm_pager_allocate(type, handle, size, prot); 505 if (pager == NULL) 506 return (type == PG_DEVICE ? EINVAL : ENOMEM); 507 /* 508 * Find object and release extra reference gained by lookup 509 */ 510 object = vm_object_lookup(pager); 511 vm_object_deallocate(object); 512 513 /* 514 * Anonymous memory. 515 */ 516 if (flags & MAP_ANON) { 517 rv = vm_allocate_with_pager(map, addr, size, fitit, 518 pager, (vm_offset_t)foff, TRUE); 519 if (rv != KERN_SUCCESS) { 520 if (handle == NULL) 521 vm_pager_deallocate(pager); 522 else 523 vm_object_deallocate(object); 524 goto out; 525 } 526 /* 527 * Don't cache anonymous objects. 528 * Loses the reference gained by vm_pager_allocate. 529 * Note that object will be NULL when handle == NULL, 530 * this is ok since vm_allocate_with_pager has made 531 * sure that these objects are uncached. 532 */ 533 (void) pager_cache(object, FALSE); 534 #ifdef DEBUG 535 if (mmapdebug & MDB_MAPIT) 536 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 537 curproc->p_pid, *addr, size, pager); 538 #endif 539 } 540 /* 541 * Must be a mapped file. 542 * Distinguish between character special and regular files. 543 */ 544 else if (vp->v_type == VCHR) { 545 rv = vm_allocate_with_pager(map, addr, size, fitit, 546 pager, (vm_offset_t)foff, FALSE); 547 /* 548 * Uncache the object and lose the reference gained 549 * by vm_pager_allocate(). If the call to 550 * vm_allocate_with_pager() was sucessful, then we 551 * gained an additional reference ensuring the object 552 * will continue to exist. If the call failed then 553 * the deallocate call below will terminate the 554 * object which is fine. 555 */ 556 (void) pager_cache(object, FALSE); 557 if (rv != KERN_SUCCESS) 558 goto out; 559 } 560 /* 561 * A regular file 562 */ 563 else { 564 #ifdef DEBUG 565 if (object == NULL) 566 printf("vm_mmap: no object: vp %x, pager %x\n", 567 vp, pager); 568 #endif 569 /* 570 * Map it directly. 571 * Allows modifications to go out to the vnode. 572 */ 573 if (flags & MAP_SHARED) { 574 rv = vm_allocate_with_pager(map, addr, size, 575 fitit, pager, 576 (vm_offset_t)foff, FALSE); 577 if (rv != KERN_SUCCESS) { 578 vm_object_deallocate(object); 579 goto out; 580 } 581 /* 582 * Don't cache the object. This is the easiest way 583 * of ensuring that data gets back to the filesystem 584 * because vnode_pager_deallocate() will fsync the 585 * vnode. pager_cache() will lose the extra ref. 586 */ 587 if (prot & VM_PROT_WRITE) 588 pager_cache(object, FALSE); 589 else 590 vm_object_deallocate(object); 591 } 592 /* 593 * Copy-on-write of file. Two flavors. 594 * MAP_COPY is true COW, you essentially get a snapshot of 595 * the region at the time of mapping. MAP_PRIVATE means only 596 * that your changes are not reflected back to the object. 597 * Changes made by others will be seen. 598 */ 599 else { 600 vm_map_t tmap; 601 vm_offset_t off; 602 603 /* locate and allocate the target address space */ 604 rv = vm_map_find(map, NULL, (vm_offset_t)0, 605 addr, size, fitit); 606 if (rv != KERN_SUCCESS) { 607 vm_object_deallocate(object); 608 goto out; 609 } 610 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 611 VM_MIN_ADDRESS+size, TRUE); 612 off = VM_MIN_ADDRESS; 613 rv = vm_allocate_with_pager(tmap, &off, size, 614 TRUE, pager, 615 (vm_offset_t)foff, FALSE); 616 if (rv != KERN_SUCCESS) { 617 vm_object_deallocate(object); 618 vm_map_deallocate(tmap); 619 goto out; 620 } 621 /* 622 * (XXX) 623 * MAP_PRIVATE implies that we see changes made by 624 * others. To ensure that we need to guarentee that 625 * no copy object is created (otherwise original 626 * pages would be pushed to the copy object and we 627 * would never see changes made by others). We 628 * totally sleeze it right now by marking the object 629 * internal temporarily. 630 */ 631 if ((flags & MAP_COPY) == 0) 632 object->flags |= OBJ_INTERNAL; 633 rv = vm_map_copy(map, tmap, *addr, size, off, 634 FALSE, FALSE); 635 object->flags &= ~OBJ_INTERNAL; 636 /* 637 * (XXX) 638 * My oh my, this only gets worse... 639 * Force creation of a shadow object so that 640 * vm_map_fork will do the right thing. 641 */ 642 if ((flags & MAP_COPY) == 0) { 643 vm_map_t tmap; 644 vm_map_entry_t tentry; 645 vm_object_t tobject; 646 vm_offset_t toffset; 647 vm_prot_t tprot; 648 boolean_t twired, tsu; 649 650 tmap = map; 651 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 652 &tentry, &tobject, &toffset, 653 &tprot, &twired, &tsu); 654 vm_map_lookup_done(tmap, tentry); 655 } 656 /* 657 * (XXX) 658 * Map copy code cannot detect sharing unless a 659 * sharing map is involved. So we cheat and write 660 * protect everything ourselves. 661 */ 662 vm_object_pmap_copy(object, (vm_offset_t)foff, 663 (vm_offset_t)foff+size); 664 vm_object_deallocate(object); 665 vm_map_deallocate(tmap); 666 if (rv != KERN_SUCCESS) 667 goto out; 668 } 669 #ifdef DEBUG 670 if (mmapdebug & MDB_MAPIT) 671 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 672 curproc->p_pid, *addr, size, pager); 673 #endif 674 } 675 /* 676 * Correct protection (default is VM_PROT_ALL). 677 * If maxprot is different than prot, we must set both explicitly. 678 */ 679 rv = KERN_SUCCESS; 680 if (maxprot != VM_PROT_ALL) 681 rv = vm_map_protect(map, *addr, *addr+size, maxprot, TRUE); 682 if (rv == KERN_SUCCESS && prot != maxprot) 683 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 684 if (rv != KERN_SUCCESS) { 685 (void) vm_deallocate(map, *addr, size); 686 goto out; 687 } 688 /* 689 * Shared memory is also shared with children. 690 */ 691 if (flags & MAP_SHARED) { 692 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 693 if (rv != KERN_SUCCESS) { 694 (void) vm_deallocate(map, *addr, size); 695 goto out; 696 } 697 } 698 out: 699 #ifdef DEBUG 700 if (mmapdebug & MDB_MAPIT) 701 printf("vm_mmap: rv %d\n", rv); 702 #endif 703 switch (rv) { 704 case KERN_SUCCESS: 705 return (0); 706 case KERN_INVALID_ADDRESS: 707 case KERN_NO_SPACE: 708 return (ENOMEM); 709 case KERN_PROTECTION_FAILURE: 710 return (EACCES); 711 default: 712 return (EINVAL); 713 } 714 } 715 716 /* 717 * Internal bastardized version of MACHs vm_region system call. 718 * Given address and size it returns map attributes as well 719 * as the (locked) object mapped at that location. 720 */ 721 int 722 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 723 vm_map_t map; 724 vm_offset_t *addr; /* IN/OUT */ 725 vm_size_t *size; /* OUT */ 726 vm_prot_t *prot; /* OUT */ 727 vm_prot_t *max_prot; /* OUT */ 728 vm_inherit_t *inheritance; /* OUT */ 729 boolean_t *shared; /* OUT */ 730 vm_object_t *object; /* OUT */ 731 vm_offset_t *objoff; /* OUT */ 732 { 733 vm_map_entry_t tmp_entry; 734 register 735 vm_map_entry_t entry; 736 register 737 vm_offset_t tmp_offset; 738 vm_offset_t start; 739 740 if (map == NULL) 741 return(KERN_INVALID_ARGUMENT); 742 743 start = *addr; 744 745 vm_map_lock_read(map); 746 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 747 if ((entry = tmp_entry->next) == &map->header) { 748 vm_map_unlock_read(map); 749 return(KERN_NO_SPACE); 750 } 751 start = entry->start; 752 *addr = start; 753 } else 754 entry = tmp_entry; 755 756 *prot = entry->protection; 757 *max_prot = entry->max_protection; 758 *inheritance = entry->inheritance; 759 760 tmp_offset = entry->offset + (start - entry->start); 761 *size = (entry->end - start); 762 763 if (entry->is_a_map) { 764 register vm_map_t share_map; 765 vm_size_t share_size; 766 767 share_map = entry->object.share_map; 768 769 vm_map_lock_read(share_map); 770 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 771 772 if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 773 *size = share_size; 774 775 vm_object_lock(tmp_entry->object); 776 *object = tmp_entry->object.vm_object; 777 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 778 779 *shared = (share_map->ref_count != 1); 780 vm_map_unlock_read(share_map); 781 } else { 782 vm_object_lock(entry->object); 783 *object = entry->object.vm_object; 784 *objoff = tmp_offset; 785 786 *shared = FALSE; 787 } 788 789 vm_map_unlock_read(map); 790 791 return(KERN_SUCCESS); 792 } 793 794 /* 795 * Yet another bastard routine. 796 */ 797 int 798 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 799 register vm_map_t map; 800 register vm_offset_t *addr; 801 register vm_size_t size; 802 boolean_t fitit; 803 vm_pager_t pager; 804 vm_offset_t poffset; 805 boolean_t internal; 806 { 807 register vm_object_t object; 808 register int result; 809 810 if (map == NULL) 811 return(KERN_INVALID_ARGUMENT); 812 813 *addr = trunc_page(*addr); 814 size = round_page(size); 815 816 /* 817 * Lookup the pager/paging-space in the object cache. 818 * If it's not there, then create a new object and cache 819 * it. 820 */ 821 object = vm_object_lookup(pager); 822 cnt.v_lookups++; 823 if (object == NULL) { 824 object = vm_object_allocate(size); 825 /* 826 * From Mike Hibler: "unnamed anonymous objects should never 827 * be on the hash list ... For now you can just change 828 * vm_allocate_with_pager to not do vm_object_enter if this 829 * is an internal object ..." 830 */ 831 if (!internal) 832 vm_object_enter(object, pager); 833 } else 834 cnt.v_hits++; 835 if (internal) 836 object->flags |= OBJ_INTERNAL; 837 else 838 object->flags &= ~OBJ_INTERNAL; 839 840 result = vm_map_find(map, object, poffset, addr, size, fitit); 841 if (result != KERN_SUCCESS) 842 vm_object_deallocate(object); 843 else if (pager != NULL) 844 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 845 return(result); 846 } 847 848 /* 849 * XXX: this routine belongs in vm_map.c. 850 * 851 * Returns TRUE if the range [start - end) is allocated in either 852 * a single entry (single_entry == TRUE) or multiple contiguous 853 * entries (single_entry == FALSE). 854 * 855 * start and end should be page aligned. 856 */ 857 boolean_t 858 vm_map_is_allocated(map, start, end, single_entry) 859 vm_map_t map; 860 vm_offset_t start, end; 861 boolean_t single_entry; 862 { 863 vm_map_entry_t mapent; 864 register vm_offset_t nend; 865 866 vm_map_lock_read(map); 867 868 /* 869 * Start address not in any entry 870 */ 871 if (!vm_map_lookup_entry(map, start, &mapent)) { 872 vm_map_unlock_read(map); 873 return (FALSE); 874 } 875 /* 876 * Find the maximum stretch of contiguously allocated space 877 */ 878 nend = mapent->end; 879 if (!single_entry) { 880 mapent = mapent->next; 881 while (mapent != &map->header && mapent->start == nend) { 882 nend = mapent->end; 883 mapent = mapent->next; 884 } 885 } 886 887 vm_map_unlock_read(map); 888 return (end <= nend); 889 } 890