1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$ 13 * 14 * @(#)vm_mmap.c 7.6 (Berkeley) 07/30/91 15 */ 16 17 /* 18 * Mapped file (mmap) interface to VM 19 */ 20 21 #include "param.h" 22 #include "systm.h" 23 #include "filedesc.h" 24 #include "proc.h" 25 #include "vnode.h" 26 #include "specdev.h" 27 #include "file.h" 28 #include "mman.h" 29 #include "conf.h" 30 31 #include "vm.h" 32 #include "vm_pager.h" 33 #include "vm_prot.h" 34 #include "vm_statistics.h" 35 36 #ifdef DEBUG 37 int mmapdebug = 0; 38 #define MDB_FOLLOW 0x01 39 #define MDB_SYNC 0x02 40 #define MDB_MAPIT 0x04 41 #endif 42 43 /* ARGSUSED */ 44 getpagesize(p, uap, retval) 45 struct proc *p; 46 void *uap; 47 int *retval; 48 { 49 50 *retval = PAGE_SIZE; 51 return (0); 52 } 53 54 /* ARGSUSED */ 55 sbrk(p, uap, retval) 56 struct proc *p; 57 struct args { 58 int incr; 59 } *uap; 60 int *retval; 61 { 62 63 /* Not yet implemented */ 64 return (EOPNOTSUPP); 65 } 66 67 /* ARGSUSED */ 68 sstk(p, uap, retval) 69 struct proc *p; 70 struct args { 71 int incr; 72 } *uap; 73 int *retval; 74 { 75 76 /* Not yet implemented */ 77 return (EOPNOTSUPP); 78 } 79 80 smmap(p, uap, retval) 81 struct proc *p; 82 register struct args { 83 caddr_t addr; 84 int len; 85 int prot; 86 int flags; 87 int fd; 88 off_t pos; 89 } *uap; 90 int *retval; 91 { 92 register struct filedesc *fdp = p->p_fd; 93 register struct file *fp; 94 struct vnode *vp; 95 vm_offset_t addr; 96 vm_size_t size; 97 vm_prot_t prot; 98 caddr_t handle; 99 int mtype, error; 100 101 #ifdef DEBUG 102 if (mmapdebug & MDB_FOLLOW) 103 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 104 p->p_pid, uap->addr, uap->len, uap->prot, 105 uap->flags, uap->fd, uap->pos); 106 #endif 107 /* 108 * Make sure one of the sharing types is specified 109 */ 110 mtype = uap->flags & MAP_TYPE; 111 switch (mtype) { 112 case MAP_FILE: 113 case MAP_ANON: 114 break; 115 default: 116 return(EINVAL); 117 } 118 /* 119 * Address (if FIXED) must be page aligned. 120 * Size is implicitly rounded to a page boundary. 121 */ 122 addr = (vm_offset_t) uap->addr; 123 if ((uap->flags & MAP_FIXED) && (addr & page_mask) || uap->len < 0) 124 return(EINVAL); 125 size = (vm_size_t) round_page(uap->len); 126 /* 127 * XXX if no hint provided for a non-fixed mapping place it after 128 * the end of the largest possible heap. 129 * 130 * There should really be a pmap call to determine a reasonable 131 * location. 132 */ 133 if (addr == 0 && (uap->flags & MAP_FIXED) == 0) 134 addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 135 /* 136 * Mapping file or named anonymous, get fp for validation 137 */ 138 if (mtype == MAP_FILE || uap->fd != -1) { 139 if (((unsigned)uap->fd) >= fdp->fd_nfiles || 140 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 141 return(EBADF); 142 } 143 /* 144 * If we are mapping a file we need to check various 145 * file/vnode related things. 146 */ 147 if (mtype == MAP_FILE) { 148 /* 149 * Obtain vnode and make sure it is of appropriate type 150 */ 151 if (fp->f_type != DTYPE_VNODE) 152 return(EINVAL); 153 vp = (struct vnode *)fp->f_data; 154 if (vp->v_type != VREG && vp->v_type != VCHR) 155 return(EINVAL); 156 /* 157 * Ensure that file protection and desired protection 158 * are compatible. Note that we only worry about writability 159 * if mapping is shared. 160 */ 161 if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || 162 ((uap->flags & MAP_SHARED) && 163 (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) 164 return(EACCES); 165 handle = (caddr_t)vp; 166 } else if (uap->fd != -1) 167 handle = (caddr_t)fp; 168 else 169 handle = NULL; 170 /* 171 * Map protections to MACH style 172 */ 173 prot = VM_PROT_NONE; 174 if (uap->prot & PROT_READ) 175 prot |= VM_PROT_READ; 176 if (uap->prot & PROT_WRITE) 177 prot |= VM_PROT_WRITE; 178 if (uap->prot & PROT_EXEC) 179 prot |= VM_PROT_EXECUTE; 180 181 error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, 182 uap->flags, handle, (vm_offset_t)uap->pos); 183 if (error == 0) 184 *retval = (int) addr; 185 return(error); 186 } 187 188 msync(p, uap, retval) 189 struct proc *p; 190 struct args { 191 caddr_t addr; 192 int len; 193 } *uap; 194 int *retval; 195 { 196 vm_offset_t addr, objoff, oaddr; 197 vm_size_t size, osize; 198 vm_prot_t prot, mprot; 199 vm_inherit_t inherit; 200 vm_object_t object; 201 boolean_t shared; 202 int rv; 203 204 #ifdef DEBUG 205 if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 206 printf("msync(%d): addr %x len %x\n", 207 p->p_pid, uap->addr, uap->len); 208 #endif 209 if (((int)uap->addr & page_mask) || uap->len < 0) 210 return(EINVAL); 211 addr = oaddr = (vm_offset_t)uap->addr; 212 osize = (vm_size_t)uap->len; 213 /* 214 * Region must be entirely contained in a single entry 215 */ 216 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 217 TRUE)) 218 return(EINVAL); 219 /* 220 * Determine the object associated with that entry 221 * (object is returned locked on KERN_SUCCESS) 222 */ 223 rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 224 &inherit, &shared, &object, &objoff); 225 if (rv != KERN_SUCCESS) 226 return(EINVAL); 227 #ifdef DEBUG 228 if (mmapdebug & MDB_SYNC) 229 printf("msync: region: object %x addr %x size %d objoff %d\n", 230 object, addr, size, objoff); 231 #endif 232 /* 233 * Do not msync non-vnoded backed objects. 234 */ 235 if (object->internal || object->pager == NULL || 236 object->pager->pg_type != PG_VNODE) { 237 vm_object_unlock(object); 238 return(EINVAL); 239 } 240 objoff += oaddr - addr; 241 if (osize == 0) 242 osize = size; 243 #ifdef DEBUG 244 if (mmapdebug & MDB_SYNC) 245 printf("msync: cleaning/flushing object range [%x-%x)\n", 246 objoff, objoff+osize); 247 #endif 248 if (prot & VM_PROT_WRITE) 249 vm_object_page_clean(object, objoff, objoff+osize); 250 /* 251 * (XXX) 252 * Bummer, gotta flush all cached pages to ensure 253 * consistency with the file system cache. 254 */ 255 vm_object_page_remove(object, objoff, objoff+osize); 256 vm_object_unlock(object); 257 return(0); 258 } 259 260 munmap(p, uap, retval) 261 register struct proc *p; 262 register struct args { 263 caddr_t addr; 264 int len; 265 } *uap; 266 int *retval; 267 { 268 vm_offset_t addr; 269 vm_size_t size; 270 271 #ifdef DEBUG 272 if (mmapdebug & MDB_FOLLOW) 273 printf("munmap(%d): addr %x len %x\n", 274 p->p_pid, uap->addr, uap->len); 275 #endif 276 277 addr = (vm_offset_t) uap->addr; 278 if ((addr & page_mask) || uap->len < 0) 279 return(EINVAL); 280 size = (vm_size_t) round_page(uap->len); 281 if (size == 0) 282 return(0); 283 if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size, 284 FALSE)) 285 return(EINVAL); 286 /* returns nothing but KERN_SUCCESS anyway */ 287 (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 288 return(0); 289 } 290 291 munmapfd(fd) 292 { 293 #ifdef DEBUG 294 if (mmapdebug & MDB_FOLLOW) 295 printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 296 #endif 297 298 /* 299 * XXX -- should vm_deallocate any regions mapped to this file 300 */ 301 curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 302 } 303 304 mprotect(p, uap, retval) 305 struct proc *p; 306 struct args { 307 caddr_t addr; 308 int len; 309 int prot; 310 } *uap; 311 int *retval; 312 { 313 vm_offset_t addr; 314 vm_size_t size; 315 register vm_prot_t prot; 316 317 #ifdef DEBUG 318 if (mmapdebug & MDB_FOLLOW) 319 printf("mprotect(%d): addr %x len %x prot %d\n", 320 p->p_pid, uap->addr, uap->len, uap->prot); 321 #endif 322 323 addr = (vm_offset_t) uap->addr; 324 if ((addr & page_mask) || uap->len < 0) 325 return(EINVAL); 326 size = (vm_size_t) uap->len; 327 /* 328 * Map protections 329 */ 330 prot = VM_PROT_NONE; 331 if (uap->prot & PROT_READ) 332 prot |= VM_PROT_READ; 333 if (uap->prot & PROT_WRITE) 334 prot |= VM_PROT_WRITE; 335 if (uap->prot & PROT_EXEC) 336 prot |= VM_PROT_EXECUTE; 337 338 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 339 FALSE)) { 340 case KERN_SUCCESS: 341 return (0); 342 case KERN_PROTECTION_FAILURE: 343 return (EACCES); 344 } 345 return (EINVAL); 346 } 347 348 /* ARGSUSED */ 349 madvise(p, uap, retval) 350 struct proc *p; 351 struct args { 352 caddr_t addr; 353 int len; 354 int behav; 355 } *uap; 356 int *retval; 357 { 358 359 /* Not yet implemented */ 360 return (EOPNOTSUPP); 361 } 362 363 /* ARGSUSED */ 364 mincore(p, uap, retval) 365 struct proc *p; 366 struct args { 367 caddr_t addr; 368 int len; 369 char *vec; 370 } *uap; 371 int *retval; 372 { 373 374 /* Not yet implemented */ 375 return (EOPNOTSUPP); 376 } 377 378 /* 379 * Internal version of mmap. 380 * Currently used by mmap, exec, and sys5 shared memory. 381 * Handle is: 382 * MAP_FILE: a vnode pointer 383 * MAP_ANON: NULL or a file pointer 384 */ 385 vm_mmap(map, addr, size, prot, flags, handle, foff) 386 register vm_map_t map; 387 register vm_offset_t *addr; 388 register vm_size_t size; 389 vm_prot_t prot; 390 register int flags; 391 caddr_t handle; /* XXX should be vp */ 392 vm_offset_t foff; 393 { 394 register vm_pager_t pager; 395 boolean_t fitit; 396 vm_object_t object; 397 struct vnode *vp; 398 int type; 399 int rv = KERN_SUCCESS; 400 401 if (size == 0) 402 return (0); 403 404 if ((flags & MAP_FIXED) == 0) { 405 fitit = TRUE; 406 *addr = round_page(*addr); 407 } else { 408 fitit = FALSE; 409 (void) vm_deallocate(map, *addr, size); 410 } 411 412 /* 413 * Lookup/allocate pager. All except an unnamed anonymous lookup 414 * gain a reference to ensure continued existance of the object. 415 * (XXX the exception is to appease the pageout daemon) 416 */ 417 if ((flags & MAP_TYPE) == MAP_ANON) 418 type = PG_DFLT; 419 else { 420 vp = (struct vnode *)handle; 421 if (vp->v_type == VCHR) { 422 type = PG_DEVICE; 423 handle = (caddr_t)vp->v_rdev; 424 } else 425 type = PG_VNODE; 426 } 427 pager = vm_pager_allocate(type, handle, size, prot); 428 if (pager == NULL) 429 return (type == PG_DEVICE ? EINVAL : ENOMEM); 430 /* 431 * Find object and release extra reference gained by lookup 432 */ 433 object = vm_object_lookup(pager); 434 vm_object_deallocate(object); 435 436 /* 437 * Anonymous memory. 438 */ 439 if ((flags & MAP_TYPE) == MAP_ANON) { 440 rv = vm_allocate_with_pager(map, addr, size, fitit, 441 pager, (vm_offset_t)foff, TRUE); 442 if (rv != KERN_SUCCESS) { 443 if (handle == NULL) 444 vm_pager_deallocate(pager); 445 else 446 vm_object_deallocate(object); 447 goto out; 448 } 449 /* 450 * Don't cache anonymous objects. 451 * Loses the reference gained by vm_pager_allocate. 452 */ 453 (void) pager_cache(object, FALSE); 454 #ifdef DEBUG 455 if (mmapdebug & MDB_MAPIT) 456 printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 457 curproc->p_pid, *addr, size, pager); 458 #endif 459 } 460 /* 461 * Must be type MAP_FILE. 462 * Distinguish between character special and regular files. 463 */ 464 else if (vp->v_type == VCHR) { 465 rv = vm_allocate_with_pager(map, addr, size, fitit, 466 pager, (vm_offset_t)foff, FALSE); 467 /* 468 * Uncache the object and lose the reference gained 469 * by vm_pager_allocate(). If the call to 470 * vm_allocate_with_pager() was sucessful, then we 471 * gained an additional reference ensuring the object 472 * will continue to exist. If the call failed then 473 * the deallocate call below will terminate the 474 * object which is fine. 475 */ 476 (void) pager_cache(object, FALSE); 477 if (rv != KERN_SUCCESS) 478 goto out; 479 } 480 /* 481 * A regular file 482 */ 483 else { 484 #ifdef DEBUG 485 if (object == NULL) 486 printf("vm_mmap: no object: vp %x, pager %x\n", 487 vp, pager); 488 #endif 489 /* 490 * Map it directly. 491 * Allows modifications to go out to the vnode. 492 */ 493 if (flags & MAP_SHARED) { 494 rv = vm_allocate_with_pager(map, addr, size, 495 fitit, pager, 496 (vm_offset_t)foff, FALSE); 497 if (rv != KERN_SUCCESS) { 498 vm_object_deallocate(object); 499 goto out; 500 } 501 /* 502 * Don't cache the object. This is the easiest way 503 * of ensuring that data gets back to the filesystem 504 * because vnode_pager_deallocate() will fsync the 505 * vnode. pager_cache() will lose the extra ref. 506 */ 507 if (prot & VM_PROT_WRITE) 508 pager_cache(object, FALSE); 509 else 510 vm_object_deallocate(object); 511 } 512 /* 513 * Copy-on-write of file. Two flavors. 514 * MAP_COPY is true COW, you essentially get a snapshot of 515 * the region at the time of mapping. MAP_PRIVATE means only 516 * that your changes are not reflected back to the object. 517 * Changes made by others will be seen. 518 */ 519 else { 520 vm_map_t tmap; 521 vm_offset_t off; 522 523 /* locate and allocate the target address space */ 524 rv = vm_map_find(map, NULL, (vm_offset_t)0, 525 addr, size, fitit); 526 if (rv != KERN_SUCCESS) { 527 vm_object_deallocate(object); 528 goto out; 529 } 530 tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 531 VM_MIN_ADDRESS+size, TRUE); 532 off = VM_MIN_ADDRESS; 533 rv = vm_allocate_with_pager(tmap, &off, size, 534 TRUE, pager, 535 (vm_offset_t)foff, FALSE); 536 if (rv != KERN_SUCCESS) { 537 vm_object_deallocate(object); 538 vm_map_deallocate(tmap); 539 goto out; 540 } 541 /* 542 * (XXX) 543 * MAP_PRIVATE implies that we see changes made by 544 * others. To ensure that we need to guarentee that 545 * no copy object is created (otherwise original 546 * pages would be pushed to the copy object and we 547 * would never see changes made by others). We 548 * totally sleeze it right now by marking the object 549 * internal temporarily. 550 */ 551 if ((flags & MAP_COPY) == 0) 552 object->internal = TRUE; 553 rv = vm_map_copy(map, tmap, *addr, size, off, 554 FALSE, FALSE); 555 object->internal = FALSE; 556 /* 557 * (XXX) 558 * My oh my, this only gets worse... 559 * Force creation of a shadow object so that 560 * vm_map_fork will do the right thing. 561 */ 562 if ((flags & MAP_COPY) == 0) { 563 vm_map_t tmap; 564 vm_map_entry_t tentry; 565 vm_object_t tobject; 566 vm_offset_t toffset; 567 vm_prot_t tprot; 568 boolean_t twired, tsu; 569 570 tmap = map; 571 vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 572 &tentry, &tobject, &toffset, 573 &tprot, &twired, &tsu); 574 vm_map_lookup_done(tmap, tentry); 575 } 576 /* 577 * (XXX) 578 * Map copy code cannot detect sharing unless a 579 * sharing map is involved. So we cheat and write 580 * protect everything ourselves. 581 */ 582 vm_object_pmap_copy(object, (vm_offset_t)foff, 583 (vm_offset_t)foff+size); 584 vm_object_deallocate(object); 585 vm_map_deallocate(tmap); 586 if (rv != KERN_SUCCESS) 587 goto out; 588 } 589 #ifdef DEBUG 590 if (mmapdebug & MDB_MAPIT) 591 printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 592 curproc->p_pid, *addr, size, pager); 593 #endif 594 } 595 /* 596 * Correct protection (default is VM_PROT_ALL). 597 * Note that we set the maximum protection. This may not be 598 * entirely correct. Maybe the maximum protection should be based 599 * on the object permissions where it makes sense (e.g. a vnode). 600 * 601 * Changed my mind: leave max prot at VM_PROT_ALL. 602 */ 603 if (prot != VM_PROT_ALL) { 604 rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 605 if (rv != KERN_SUCCESS) { 606 (void) vm_deallocate(map, *addr, size); 607 goto out; 608 } 609 } 610 /* 611 * Shared memory is also shared with children. 612 */ 613 if (flags & MAP_SHARED) { 614 rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 615 if (rv != KERN_SUCCESS) { 616 (void) vm_deallocate(map, *addr, size); 617 goto out; 618 } 619 } 620 out: 621 #ifdef DEBUG 622 if (mmapdebug & MDB_MAPIT) 623 printf("vm_mmap: rv %d\n", rv); 624 #endif 625 switch (rv) { 626 case KERN_SUCCESS: 627 return (0); 628 case KERN_INVALID_ADDRESS: 629 case KERN_NO_SPACE: 630 return (ENOMEM); 631 case KERN_PROTECTION_FAILURE: 632 return (EACCES); 633 default: 634 return (EINVAL); 635 } 636 } 637 638 /* 639 * Internal bastardized version of MACHs vm_region system call. 640 * Given address and size it returns map attributes as well 641 * as the (locked) object mapped at that location. 642 */ 643 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 644 vm_map_t map; 645 vm_offset_t *addr; /* IN/OUT */ 646 vm_size_t *size; /* OUT */ 647 vm_prot_t *prot; /* OUT */ 648 vm_prot_t *max_prot; /* OUT */ 649 vm_inherit_t *inheritance; /* OUT */ 650 boolean_t *shared; /* OUT */ 651 vm_object_t *object; /* OUT */ 652 vm_offset_t *objoff; /* OUT */ 653 { 654 vm_map_entry_t tmp_entry; 655 register 656 vm_map_entry_t entry; 657 register 658 vm_offset_t tmp_offset; 659 vm_offset_t start; 660 661 if (map == NULL) 662 return(KERN_INVALID_ARGUMENT); 663 664 start = *addr; 665 666 vm_map_lock_read(map); 667 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 668 if ((entry = tmp_entry->next) == &map->header) { 669 vm_map_unlock_read(map); 670 return(KERN_NO_SPACE); 671 } 672 start = entry->start; 673 *addr = start; 674 } else 675 entry = tmp_entry; 676 677 *prot = entry->protection; 678 *max_prot = entry->max_protection; 679 *inheritance = entry->inheritance; 680 681 tmp_offset = entry->offset + (start - entry->start); 682 *size = (entry->end - start); 683 684 if (entry->is_a_map) { 685 register vm_map_t share_map; 686 vm_size_t share_size; 687 688 share_map = entry->object.share_map; 689 690 vm_map_lock_read(share_map); 691 (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 692 693 if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 694 *size = share_size; 695 696 vm_object_lock(tmp_entry->object); 697 *object = tmp_entry->object.vm_object; 698 *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 699 700 *shared = (share_map->ref_count != 1); 701 vm_map_unlock_read(share_map); 702 } else { 703 vm_object_lock(entry->object); 704 *object = entry->object.vm_object; 705 *objoff = tmp_offset; 706 707 *shared = FALSE; 708 } 709 710 vm_map_unlock_read(map); 711 712 return(KERN_SUCCESS); 713 } 714 715 /* 716 * Yet another bastard routine. 717 */ 718 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 719 register vm_map_t map; 720 register vm_offset_t *addr; 721 register vm_size_t size; 722 boolean_t fitit; 723 vm_pager_t pager; 724 vm_offset_t poffset; 725 boolean_t internal; 726 { 727 register vm_object_t object; 728 register int result; 729 730 if (map == NULL) 731 return(KERN_INVALID_ARGUMENT); 732 733 *addr = trunc_page(*addr); 734 size = round_page(size); 735 736 /* 737 * Lookup the pager/paging-space in the object cache. 738 * If it's not there, then create a new object and cache 739 * it. 740 */ 741 object = vm_object_lookup(pager); 742 vm_stat.lookups++; 743 if (object == NULL) { 744 object = vm_object_allocate(size); 745 vm_object_enter(object, pager); 746 } else 747 vm_stat.hits++; 748 object->internal = internal; 749 750 result = vm_map_find(map, object, poffset, addr, size, fitit); 751 if (result != KERN_SUCCESS) 752 vm_object_deallocate(object); 753 else if (pager != NULL) 754 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 755 return(result); 756 } 757 758 /* 759 * XXX: this routine belongs in vm_map.c. 760 * 761 * Returns TRUE if the range [start - end) is allocated in either 762 * a single entry (single_entry == TRUE) or multiple contiguous 763 * entries (single_entry == FALSE). 764 * 765 * start and end should be page aligned. 766 */ 767 boolean_t 768 vm_map_is_allocated(map, start, end, single_entry) 769 vm_map_t map; 770 vm_offset_t start, end; 771 boolean_t single_entry; 772 { 773 vm_map_entry_t mapent; 774 register vm_offset_t nend; 775 776 vm_map_lock_read(map); 777 778 /* 779 * Start address not in any entry 780 */ 781 if (!vm_map_lookup_entry(map, start, &mapent)) { 782 vm_map_unlock_read(map); 783 return (FALSE); 784 } 785 /* 786 * Find the maximum stretch of contiguously allocated space 787 */ 788 nend = mapent->end; 789 if (!single_entry) { 790 mapent = mapent->next; 791 while (mapent != &map->header && mapent->start == nend) { 792 nend = mapent->end; 793 mapent = mapent->next; 794 } 795 } 796 797 vm_map_unlock_read(map); 798 return (end <= nend); 799 } 800