1808a4de1Smckusick /* 2808a4de1Smckusick * Copyright (c) 1988 University of Utah. 313c4e08cSbostic * Copyright (c) 1991, 1993 413c4e08cSbostic * The Regents of the University of California. All rights reserved. 5808a4de1Smckusick * 6808a4de1Smckusick * This code is derived from software contributed to Berkeley by 7808a4de1Smckusick * the Systems Programming Group of the University of Utah Computer 8808a4de1Smckusick * Science Department. 9808a4de1Smckusick * 10808a4de1Smckusick * %sccs.include.redist.c% 11808a4de1Smckusick * 12ff2bfc3fShibler * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 13808a4de1Smckusick * 14*d08ee885Shibler * @(#)vm_mmap.c 8.3 (Berkeley) 11/14/93 15808a4de1Smckusick */ 16808a4de1Smckusick 17808a4de1Smckusick /* 18808a4de1Smckusick * Mapped file (mmap) interface to VM 19808a4de1Smckusick */ 20808a4de1Smckusick 2164f61df8Sbostic #include <sys/param.h> 2264f61df8Sbostic #include <sys/systm.h> 2364f61df8Sbostic #include <sys/filedesc.h> 2480376accShibler #include <sys/resourcevar.h> 2564f61df8Sbostic #include <sys/proc.h> 2664f61df8Sbostic #include <sys/vnode.h> 2764f61df8Sbostic #include <sys/file.h> 2864f61df8Sbostic #include <sys/mman.h> 2964f61df8Sbostic #include <sys/conf.h> 30808a4de1Smckusick 314938844dSpendry #include <miscfs/specfs/specdev.h> 324938844dSpendry 3364f61df8Sbostic #include <vm/vm.h> 3464f61df8Sbostic #include <vm/vm_pager.h> 3564f61df8Sbostic #include <vm/vm_prot.h> 36808a4de1Smckusick 37808a4de1Smckusick #ifdef DEBUG 38808a4de1Smckusick int mmapdebug = 0; 39808a4de1Smckusick #define MDB_FOLLOW 0x01 40808a4de1Smckusick #define MDB_SYNC 0x02 41808a4de1Smckusick #define MDB_MAPIT 0x04 42808a4de1Smckusick #endif 43808a4de1Smckusick 44499c7c41Storek struct sbrk_args { 45499c7c41Storek int incr; 46499c7c41Storek }; 47808a4de1Smckusick /* ARGSUSED */ 4864f61df8Sbostic int 49808a4de1Smckusick sbrk(p, uap, retval) 50808a4de1Smckusick struct proc *p; 51499c7c41Storek struct sbrk_args *uap; 52808a4de1Smckusick int *retval; 53808a4de1Smckusick { 54808a4de1Smckusick 55808a4de1Smckusick /* Not yet implemented */ 56808a4de1Smckusick return (EOPNOTSUPP); 57808a4de1Smckusick } 58808a4de1Smckusick 59499c7c41Storek struct sstk_args { 60499c7c41Storek int incr; 61499c7c41Storek }; 62808a4de1Smckusick /* ARGSUSED */ 6364f61df8Sbostic int 64808a4de1Smckusick sstk(p, uap, retval) 65808a4de1Smckusick struct proc *p; 66499c7c41Storek struct sstk_args *uap; 67808a4de1Smckusick int *retval; 68808a4de1Smckusick { 69808a4de1Smckusick 70808a4de1Smckusick /* Not yet implemented */ 71808a4de1Smckusick return (EOPNOTSUPP); 72808a4de1Smckusick } 73808a4de1Smckusick 746ad25c8eStorek #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 754fc8e28eSmckusick struct getpagesize_args { 764fc8e28eSmckusick int dummy; 774fc8e28eSmckusick }; 784fc8e28eSmckusick /* ARGSUSED */ 794fc8e28eSmckusick int 80f68f3b2eSmckusick ogetpagesize(p, uap, retval) 814fc8e28eSmckusick struct proc *p; 824fc8e28eSmckusick struct getpagesize_args *uap; 834fc8e28eSmckusick int *retval; 844fc8e28eSmckusick { 854fc8e28eSmckusick 864fc8e28eSmckusick *retval = PAGE_SIZE; 874fc8e28eSmckusick return (0); 884fc8e28eSmckusick } 896ad25c8eStorek #endif /* COMPAT_43 || COMPAT_SUNOS */ 904fc8e28eSmckusick 913d556935Sbostic struct mmap_args { 923d556935Sbostic caddr_t addr; 933d556935Sbostic size_t len; 943d556935Sbostic int prot; 953d556935Sbostic int flags; 963d556935Sbostic int fd; 973d556935Sbostic long pad; 983d556935Sbostic off_t pos; 993d556935Sbostic }; 1003d556935Sbostic 1016ad25c8eStorek #ifdef COMPAT_43 1023d556935Sbostic struct ommap_args { 103808a4de1Smckusick caddr_t addr; 104808a4de1Smckusick int len; 105808a4de1Smckusick int prot; 106808a4de1Smckusick int flags; 107808a4de1Smckusick int fd; 1082f455721Storek long pos; 109499c7c41Storek }; 110499c7c41Storek int 1113d556935Sbostic ommap(p, uap, retval) 112499c7c41Storek struct proc *p; 1133d556935Sbostic register struct ommap_args *uap; 114808a4de1Smckusick int *retval; 115808a4de1Smckusick { 1169cf4a8a8Smckusick struct mmap_args nargs; 1179cf4a8a8Smckusick static const char cvtbsdprot[8] = { 1189cf4a8a8Smckusick 0, 1199cf4a8a8Smckusick PROT_EXEC, 1209cf4a8a8Smckusick PROT_WRITE, 1219cf4a8a8Smckusick PROT_EXEC|PROT_WRITE, 1229cf4a8a8Smckusick PROT_READ, 1239cf4a8a8Smckusick PROT_EXEC|PROT_READ, 1249cf4a8a8Smckusick PROT_WRITE|PROT_READ, 1259cf4a8a8Smckusick PROT_EXEC|PROT_WRITE|PROT_READ, 1269cf4a8a8Smckusick }; 1279cf4a8a8Smckusick #define OMAP_ANON 0x0002 1289cf4a8a8Smckusick #define OMAP_COPY 0x0020 1299cf4a8a8Smckusick #define OMAP_SHARED 0x0010 1309cf4a8a8Smckusick #define OMAP_FIXED 0x0100 1319cf4a8a8Smckusick #define OMAP_INHERIT 0x0800 1329cf4a8a8Smckusick 1339cf4a8a8Smckusick nargs.addr = uap->addr; 1349cf4a8a8Smckusick nargs.len = uap->len; 1359cf4a8a8Smckusick nargs.prot = cvtbsdprot[uap->prot&0x7]; 1369cf4a8a8Smckusick nargs.flags = 0; 1379cf4a8a8Smckusick if (uap->flags & OMAP_ANON) 1389cf4a8a8Smckusick nargs.flags |= MAP_ANON; 1399cf4a8a8Smckusick if (uap->flags & OMAP_COPY) 1409cf4a8a8Smckusick nargs.flags |= MAP_COPY; 1419cf4a8a8Smckusick if (uap->flags & OMAP_SHARED) 1429cf4a8a8Smckusick nargs.flags |= MAP_SHARED; 1439cf4a8a8Smckusick else 1449cf4a8a8Smckusick nargs.flags |= MAP_PRIVATE; 1459cf4a8a8Smckusick if (uap->flags & OMAP_FIXED) 1469cf4a8a8Smckusick nargs.flags |= MAP_FIXED; 1479cf4a8a8Smckusick if (uap->flags & OMAP_INHERIT) 1489cf4a8a8Smckusick nargs.flags |= MAP_INHERIT; 1499cf4a8a8Smckusick nargs.fd = uap->fd; 1509cf4a8a8Smckusick nargs.pos = uap->pos; 1513d556935Sbostic return (mmap(p, &nargs, retval)); 1529cf4a8a8Smckusick } 1539cf4a8a8Smckusick #endif 1549cf4a8a8Smckusick 1559cf4a8a8Smckusick int 1563d556935Sbostic mmap(p, uap, retval) 1579cf4a8a8Smckusick struct proc *p; 1589cf4a8a8Smckusick register struct mmap_args *uap; 1599cf4a8a8Smckusick int *retval; 1609cf4a8a8Smckusick { 161b5a4ea96Smckusick register struct filedesc *fdp = p->p_fd; 162b5a4ea96Smckusick register struct file *fp; 163808a4de1Smckusick struct vnode *vp; 164808a4de1Smckusick vm_offset_t addr; 165808a4de1Smckusick vm_size_t size; 166525ac35aShibler vm_prot_t prot, maxprot; 167808a4de1Smckusick caddr_t handle; 1688c153b3cSmckusick int flags, error; 169808a4de1Smckusick 1709f95614cSmckusick prot = uap->prot & VM_PROT_ALL; 1718c153b3cSmckusick flags = uap->flags; 172808a4de1Smckusick #ifdef DEBUG 173808a4de1Smckusick if (mmapdebug & MDB_FOLLOW) 174808a4de1Smckusick printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", 1759f95614cSmckusick p->p_pid, uap->addr, uap->len, prot, 176af75fa5dSralph flags, uap->fd, (vm_offset_t)uap->pos); 177808a4de1Smckusick #endif 178808a4de1Smckusick /* 1790c4a53bbShibler * Address (if FIXED) must be page aligned. 1800c4a53bbShibler * Size is implicitly rounded to a page boundary. 181808a4de1Smckusick */ 182808a4de1Smckusick addr = (vm_offset_t) uap->addr; 1839f95614cSmckusick if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) || 1849f95614cSmckusick (ssize_t)uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1)) 185808a4de1Smckusick return (EINVAL); 1860c4a53bbShibler size = (vm_size_t) round_page(uap->len); 1873e8d6014Smckusick /* 1883e8d6014Smckusick * Check for illegal addresses. Watch out for address wrap... 1893e8d6014Smckusick * Note that VM_*_ADDRESS are not constants due to casts (argh). 1903e8d6014Smckusick */ 1918c153b3cSmckusick if (flags & MAP_FIXED) { 1923e8d6014Smckusick if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 19349e1a72aSmckusick return (EINVAL); 1943e8d6014Smckusick if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 1953e8d6014Smckusick return (EINVAL); 1963e8d6014Smckusick if (addr > addr + size) 1973e8d6014Smckusick return (EINVAL); 1983e8d6014Smckusick } 1990c4a53bbShibler /* 2000c4a53bbShibler * XXX if no hint provided for a non-fixed mapping place it after 2010c4a53bbShibler * the end of the largest possible heap. 2020c4a53bbShibler * 2030c4a53bbShibler * There should really be a pmap call to determine a reasonable 2040c4a53bbShibler * location. 2050c4a53bbShibler */ 2068c153b3cSmckusick if (addr == 0 && (flags & MAP_FIXED) == 0) 2070c4a53bbShibler addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 208525ac35aShibler if (flags & MAP_ANON) { 2099f95614cSmckusick /* 2109f95614cSmckusick * Mapping blank space is trivial. 2119f95614cSmckusick */ 2129cf4a8a8Smckusick handle = NULL; 213525ac35aShibler maxprot = VM_PROT_ALL; 214525ac35aShibler } else { 215808a4de1Smckusick /* 2169cf4a8a8Smckusick * Mapping file, get fp for validation. 2179f95614cSmckusick * Obtain vnode and make sure it is of appropriate type. 218808a4de1Smckusick */ 2199cf4a8a8Smckusick if (((unsigned)uap->fd) >= fdp->fd_nfiles || 2209cf4a8a8Smckusick (fp = fdp->fd_ofiles[uap->fd]) == NULL) 2219cf4a8a8Smckusick return (EBADF); 222808a4de1Smckusick if (fp->f_type != DTYPE_VNODE) 223808a4de1Smckusick return (EINVAL); 224808a4de1Smckusick vp = (struct vnode *)fp->f_data; 225808a4de1Smckusick if (vp->v_type != VREG && vp->v_type != VCHR) 226808a4de1Smckusick return (EINVAL); 227808a4de1Smckusick /* 228*d08ee885Shibler * XXX hack to handle use of /dev/zero to map anon 229*d08ee885Shibler * memory (ala SunOS). 230*d08ee885Shibler */ 231*d08ee885Shibler if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 232*d08ee885Shibler handle = NULL; 233*d08ee885Shibler maxprot = VM_PROT_ALL; 234*d08ee885Shibler flags |= MAP_ANON; 235*d08ee885Shibler } else { 236*d08ee885Shibler /* 237*d08ee885Shibler * Ensure that file and memory protections are 238*d08ee885Shibler * compatible. Note that we only worry about 239*d08ee885Shibler * writability if mapping is shared; in this case, 240*d08ee885Shibler * current and max prot are dictated by the open file. 241*d08ee885Shibler * XXX use the vnode instead? Problem is: what 242*d08ee885Shibler * credentials do we use for determination? 243*d08ee885Shibler * What if proc does a setuid? 244808a4de1Smckusick */ 2459f95614cSmckusick maxprot = VM_PROT_EXECUTE; /* ??? */ 246525ac35aShibler if (fp->f_flag & FREAD) 2479f95614cSmckusick maxprot |= VM_PROT_READ; 2489f95614cSmckusick else if (prot & PROT_READ) 2499f95614cSmckusick return (EACCES); 2509f95614cSmckusick if (flags & MAP_SHARED) { 251525ac35aShibler if (fp->f_flag & FWRITE) 252525ac35aShibler maxprot |= VM_PROT_WRITE; 2539f95614cSmckusick else if (prot & PROT_WRITE) 2549f95614cSmckusick return (EACCES); 2559f95614cSmckusick } else 2569f95614cSmckusick maxprot |= VM_PROT_WRITE; 2579f95614cSmckusick handle = (caddr_t)vp; 258525ac35aShibler } 259*d08ee885Shibler } 260525ac35aShibler error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, 2618c153b3cSmckusick flags, handle, (vm_offset_t)uap->pos); 262808a4de1Smckusick if (error == 0) 263808a4de1Smckusick *retval = (int)addr; 264808a4de1Smckusick return (error); 265808a4de1Smckusick } 266808a4de1Smckusick 267499c7c41Storek struct msync_args { 268499c7c41Storek caddr_t addr; 269499c7c41Storek int len; 270499c7c41Storek }; 27164f61df8Sbostic int 272808a4de1Smckusick msync(p, uap, retval) 273808a4de1Smckusick struct proc *p; 274499c7c41Storek struct msync_args *uap; 275808a4de1Smckusick int *retval; 276808a4de1Smckusick { 277808a4de1Smckusick vm_offset_t addr, objoff, oaddr; 278808a4de1Smckusick vm_size_t size, osize; 279808a4de1Smckusick vm_prot_t prot, mprot; 280808a4de1Smckusick vm_inherit_t inherit; 281808a4de1Smckusick vm_object_t object; 282808a4de1Smckusick boolean_t shared; 283808a4de1Smckusick int rv; 284808a4de1Smckusick 285808a4de1Smckusick #ifdef DEBUG 286808a4de1Smckusick if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) 287808a4de1Smckusick printf("msync(%d): addr %x len %x\n", 288808a4de1Smckusick p->p_pid, uap->addr, uap->len); 289808a4de1Smckusick #endif 290e2e72134Smckusick if (((int)uap->addr & PAGE_MASK) || uap->len < 0) 291808a4de1Smckusick return(EINVAL); 292808a4de1Smckusick addr = oaddr = (vm_offset_t)uap->addr; 293808a4de1Smckusick osize = (vm_size_t)uap->len; 294808a4de1Smckusick /* 295808a4de1Smckusick * Region must be entirely contained in a single entry 296808a4de1Smckusick */ 297d2b14339Skarels if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, 298d2b14339Skarels TRUE)) 299808a4de1Smckusick return(EINVAL); 300808a4de1Smckusick /* 301808a4de1Smckusick * Determine the object associated with that entry 302808a4de1Smckusick * (object is returned locked on KERN_SUCCESS) 303808a4de1Smckusick */ 304d2b14339Skarels rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, 305808a4de1Smckusick &inherit, &shared, &object, &objoff); 306808a4de1Smckusick if (rv != KERN_SUCCESS) 307808a4de1Smckusick return(EINVAL); 308808a4de1Smckusick #ifdef DEBUG 309808a4de1Smckusick if (mmapdebug & MDB_SYNC) 310808a4de1Smckusick printf("msync: region: object %x addr %x size %d objoff %d\n", 311808a4de1Smckusick object, addr, size, objoff); 312808a4de1Smckusick #endif 313808a4de1Smckusick /* 314808a4de1Smckusick * Do not msync non-vnoded backed objects. 315808a4de1Smckusick */ 3166fa1e730Smckusick if ((object->flags & OBJ_INTERNAL) || object->pager == NULL || 317808a4de1Smckusick object->pager->pg_type != PG_VNODE) { 318808a4de1Smckusick vm_object_unlock(object); 319808a4de1Smckusick return(EINVAL); 320808a4de1Smckusick } 321808a4de1Smckusick objoff += oaddr - addr; 322808a4de1Smckusick if (osize == 0) 323808a4de1Smckusick osize = size; 324808a4de1Smckusick #ifdef DEBUG 325808a4de1Smckusick if (mmapdebug & MDB_SYNC) 326808a4de1Smckusick printf("msync: cleaning/flushing object range [%x-%x)\n", 327808a4de1Smckusick objoff, objoff+osize); 328808a4de1Smckusick #endif 329808a4de1Smckusick if (prot & VM_PROT_WRITE) 330ff2bfc3fShibler vm_object_page_clean(object, objoff, objoff+osize, FALSE); 331808a4de1Smckusick /* 332808a4de1Smckusick * (XXX) 333808a4de1Smckusick * Bummer, gotta flush all cached pages to ensure 334808a4de1Smckusick * consistency with the file system cache. 335808a4de1Smckusick */ 336808a4de1Smckusick vm_object_page_remove(object, objoff, objoff+osize); 337808a4de1Smckusick vm_object_unlock(object); 338808a4de1Smckusick return(0); 339808a4de1Smckusick } 340808a4de1Smckusick 341499c7c41Storek struct munmap_args { 342499c7c41Storek caddr_t addr; 343499c7c41Storek int len; 344499c7c41Storek }; 34564f61df8Sbostic int 346808a4de1Smckusick munmap(p, uap, retval) 347808a4de1Smckusick register struct proc *p; 348499c7c41Storek register struct munmap_args *uap; 349808a4de1Smckusick int *retval; 350808a4de1Smckusick { 351808a4de1Smckusick vm_offset_t addr; 352808a4de1Smckusick vm_size_t size; 353808a4de1Smckusick 354808a4de1Smckusick #ifdef DEBUG 355808a4de1Smckusick if (mmapdebug & MDB_FOLLOW) 356808a4de1Smckusick printf("munmap(%d): addr %x len %x\n", 357808a4de1Smckusick p->p_pid, uap->addr, uap->len); 358808a4de1Smckusick #endif 359808a4de1Smckusick 360808a4de1Smckusick addr = (vm_offset_t) uap->addr; 361e2e72134Smckusick if ((addr & PAGE_MASK) || uap->len < 0) 362808a4de1Smckusick return(EINVAL); 3630c4a53bbShibler size = (vm_size_t) round_page(uap->len); 364808a4de1Smckusick if (size == 0) 365808a4de1Smckusick return(0); 3663e8d6014Smckusick /* 3673e8d6014Smckusick * Check for illegal addresses. Watch out for address wrap... 3683e8d6014Smckusick * Note that VM_*_ADDRESS are not constants due to casts (argh). 3693e8d6014Smckusick */ 3703e8d6014Smckusick if (VM_MAXUSER_ADDRESS > 0 && addr + size >= VM_MAXUSER_ADDRESS) 3713e8d6014Smckusick return (EINVAL); 3723e8d6014Smckusick if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 3733e8d6014Smckusick return (EINVAL); 3743e8d6014Smckusick if (addr > addr + size) 3753e8d6014Smckusick return (EINVAL); 376d2b14339Skarels if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr + size, 377d2b14339Skarels FALSE)) 378808a4de1Smckusick return(EINVAL); 379808a4de1Smckusick /* returns nothing but KERN_SUCCESS anyway */ 380d2b14339Skarels (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); 381808a4de1Smckusick return(0); 382808a4de1Smckusick } 383808a4de1Smckusick 38464f61df8Sbostic void 385808a4de1Smckusick munmapfd(fd) 386a5fa67a9Smckusick int fd; 387808a4de1Smckusick { 388808a4de1Smckusick #ifdef DEBUG 389808a4de1Smckusick if (mmapdebug & MDB_FOLLOW) 390d2b14339Skarels printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd); 391808a4de1Smckusick #endif 392808a4de1Smckusick 393808a4de1Smckusick /* 394808a4de1Smckusick * XXX -- should vm_deallocate any regions mapped to this file 395808a4de1Smckusick */ 396d2b14339Skarels curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 397808a4de1Smckusick } 398808a4de1Smckusick 399499c7c41Storek struct mprotect_args { 4000c4a53bbShibler caddr_t addr; 401808a4de1Smckusick int len; 402808a4de1Smckusick int prot; 403499c7c41Storek }; 404499c7c41Storek int 405499c7c41Storek mprotect(p, uap, retval) 406499c7c41Storek struct proc *p; 407499c7c41Storek struct mprotect_args *uap; 408808a4de1Smckusick int *retval; 409808a4de1Smckusick { 410808a4de1Smckusick vm_offset_t addr; 411808a4de1Smckusick vm_size_t size; 412808a4de1Smckusick register vm_prot_t prot; 413808a4de1Smckusick 414808a4de1Smckusick #ifdef DEBUG 415808a4de1Smckusick if (mmapdebug & MDB_FOLLOW) 416808a4de1Smckusick printf("mprotect(%d): addr %x len %x prot %d\n", 417808a4de1Smckusick p->p_pid, uap->addr, uap->len, uap->prot); 418808a4de1Smckusick #endif 419808a4de1Smckusick 420808a4de1Smckusick addr = (vm_offset_t)uap->addr; 421e2e72134Smckusick if ((addr & PAGE_MASK) || uap->len < 0) 422808a4de1Smckusick return(EINVAL); 4230c4a53bbShibler size = (vm_size_t)uap->len; 424525ac35aShibler prot = uap->prot & VM_PROT_ALL; 425808a4de1Smckusick 426d2b14339Skarels switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, 427d2b14339Skarels FALSE)) { 428808a4de1Smckusick case KERN_SUCCESS: 429808a4de1Smckusick return (0); 430808a4de1Smckusick case KERN_PROTECTION_FAILURE: 431808a4de1Smckusick return (EACCES); 432808a4de1Smckusick } 433808a4de1Smckusick return (EINVAL); 434808a4de1Smckusick } 435808a4de1Smckusick 436499c7c41Storek struct madvise_args { 437499c7c41Storek caddr_t addr; 438499c7c41Storek int len; 439499c7c41Storek int behav; 440499c7c41Storek }; 441808a4de1Smckusick /* ARGSUSED */ 44264f61df8Sbostic int 443808a4de1Smckusick madvise(p, uap, retval) 444808a4de1Smckusick struct proc *p; 445499c7c41Storek struct madvise_args *uap; 446808a4de1Smckusick int *retval; 447808a4de1Smckusick { 448808a4de1Smckusick 449808a4de1Smckusick /* Not yet implemented */ 450808a4de1Smckusick return (EOPNOTSUPP); 451808a4de1Smckusick } 452808a4de1Smckusick 453499c7c41Storek struct mincore_args { 454499c7c41Storek caddr_t addr; 455499c7c41Storek int len; 456499c7c41Storek char *vec; 457499c7c41Storek }; 458808a4de1Smckusick /* ARGSUSED */ 45964f61df8Sbostic int 460808a4de1Smckusick mincore(p, uap, retval) 461808a4de1Smckusick struct proc *p; 462499c7c41Storek struct mincore_args *uap; 463808a4de1Smckusick int *retval; 464808a4de1Smckusick { 465808a4de1Smckusick 466808a4de1Smckusick /* Not yet implemented */ 467808a4de1Smckusick return (EOPNOTSUPP); 468808a4de1Smckusick } 469808a4de1Smckusick 47080376accShibler struct mlock_args { 47180376accShibler caddr_t addr; 472b3b90abdSmckusick size_t len; 47380376accShibler }; 47480376accShibler int 47580376accShibler mlock(p, uap, retval) 47680376accShibler struct proc *p; 47780376accShibler struct mlock_args *uap; 47880376accShibler int *retval; 47980376accShibler { 48080376accShibler vm_offset_t addr; 48180376accShibler vm_size_t size; 48280376accShibler int error; 48380376accShibler extern int vm_page_max_wired; 48480376accShibler 48580376accShibler #ifdef DEBUG 48680376accShibler if (mmapdebug & MDB_FOLLOW) 48780376accShibler printf("mlock(%d): addr %x len %x\n", 48880376accShibler p->p_pid, uap->addr, uap->len); 48980376accShibler #endif 49080376accShibler addr = (vm_offset_t)uap->addr; 491bffd8654Smckusick if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr) 49280376accShibler return (EINVAL); 49380376accShibler size = round_page((vm_size_t)uap->len); 49480376accShibler if (atop(size) + cnt.v_wire_count > vm_page_max_wired) 4958cb0bc2dShibler return (EAGAIN); 49680376accShibler #ifdef pmap_wired_count 49780376accShibler if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 49880376accShibler p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 4998cb0bc2dShibler return (EAGAIN); 50080376accShibler #else 50180376accShibler if (error = suser(p->p_ucred, &p->p_acflag)) 50280376accShibler return (error); 50380376accShibler #endif 50480376accShibler 50580376accShibler error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE); 50680376accShibler return (error == KERN_SUCCESS ? 0 : ENOMEM); 50780376accShibler } 50880376accShibler 50980376accShibler struct munlock_args { 51080376accShibler caddr_t addr; 511b3b90abdSmckusick size_t len; 51280376accShibler }; 51380376accShibler int 51480376accShibler munlock(p, uap, retval) 51580376accShibler struct proc *p; 51680376accShibler struct munlock_args *uap; 51780376accShibler int *retval; 51880376accShibler { 51980376accShibler vm_offset_t addr; 52080376accShibler vm_size_t size; 52180376accShibler int error; 52280376accShibler 52380376accShibler #ifdef DEBUG 52480376accShibler if (mmapdebug & MDB_FOLLOW) 52580376accShibler printf("munlock(%d): addr %x len %x\n", 52680376accShibler p->p_pid, uap->addr, uap->len); 52780376accShibler #endif 52880376accShibler addr = (vm_offset_t)uap->addr; 529bffd8654Smckusick if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr) 53080376accShibler return (EINVAL); 53180376accShibler #ifndef pmap_wired_count 53280376accShibler if (error = suser(p->p_ucred, &p->p_acflag)) 53380376accShibler return (error); 53480376accShibler #endif 53580376accShibler size = round_page((vm_size_t)uap->len); 53680376accShibler 53780376accShibler error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE); 53880376accShibler return (error == KERN_SUCCESS ? 0 : ENOMEM); 53980376accShibler } 54080376accShibler 541808a4de1Smckusick /* 542808a4de1Smckusick * Internal version of mmap. 543808a4de1Smckusick * Currently used by mmap, exec, and sys5 shared memory. 5449cf4a8a8Smckusick * Handle is either a vnode pointer or NULL for MAP_ANON. 545808a4de1Smckusick */ 54664f61df8Sbostic int 547525ac35aShibler vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff) 548808a4de1Smckusick register vm_map_t map; 549808a4de1Smckusick register vm_offset_t *addr; 550808a4de1Smckusick register vm_size_t size; 551525ac35aShibler vm_prot_t prot, maxprot; 552808a4de1Smckusick register int flags; 553808a4de1Smckusick caddr_t handle; /* XXX should be vp */ 554808a4de1Smckusick vm_offset_t foff; 555808a4de1Smckusick { 556808a4de1Smckusick register vm_pager_t pager; 557808a4de1Smckusick boolean_t fitit; 558808a4de1Smckusick vm_object_t object; 559bffd8654Smckusick struct vnode *vp = NULL; 560808a4de1Smckusick int type; 561808a4de1Smckusick int rv = KERN_SUCCESS; 562808a4de1Smckusick 563808a4de1Smckusick if (size == 0) 564808a4de1Smckusick return (0); 565808a4de1Smckusick 566808a4de1Smckusick if ((flags & MAP_FIXED) == 0) { 567808a4de1Smckusick fitit = TRUE; 568808a4de1Smckusick *addr = round_page(*addr); 569808a4de1Smckusick } else { 570808a4de1Smckusick fitit = FALSE; 571808a4de1Smckusick (void)vm_deallocate(map, *addr, size); 572808a4de1Smckusick } 573808a4de1Smckusick 574808a4de1Smckusick /* 575808a4de1Smckusick * Lookup/allocate pager. All except an unnamed anonymous lookup 576808a4de1Smckusick * gain a reference to ensure continued existance of the object. 577808a4de1Smckusick * (XXX the exception is to appease the pageout daemon) 578808a4de1Smckusick */ 5799cf4a8a8Smckusick if (flags & MAP_ANON) 580808a4de1Smckusick type = PG_DFLT; 581808a4de1Smckusick else { 582808a4de1Smckusick vp = (struct vnode *)handle; 583808a4de1Smckusick if (vp->v_type == VCHR) { 584808a4de1Smckusick type = PG_DEVICE; 585808a4de1Smckusick handle = (caddr_t)vp->v_rdev; 586808a4de1Smckusick } else 587808a4de1Smckusick type = PG_VNODE; 588808a4de1Smckusick } 589eed4f36eStorek pager = vm_pager_allocate(type, handle, size, prot, foff); 590d2b14339Skarels if (pager == NULL) 591808a4de1Smckusick return (type == PG_DEVICE ? EINVAL : ENOMEM); 592808a4de1Smckusick /* 593808a4de1Smckusick * Find object and release extra reference gained by lookup 594808a4de1Smckusick */ 595808a4de1Smckusick object = vm_object_lookup(pager); 596808a4de1Smckusick vm_object_deallocate(object); 597808a4de1Smckusick 598808a4de1Smckusick /* 599808a4de1Smckusick * Anonymous memory. 600808a4de1Smckusick */ 6019cf4a8a8Smckusick if (flags & MAP_ANON) { 602808a4de1Smckusick rv = vm_allocate_with_pager(map, addr, size, fitit, 603af75fa5dSralph pager, foff, TRUE); 604808a4de1Smckusick if (rv != KERN_SUCCESS) { 605808a4de1Smckusick if (handle == NULL) 606808a4de1Smckusick vm_pager_deallocate(pager); 607808a4de1Smckusick else 608808a4de1Smckusick vm_object_deallocate(object); 609808a4de1Smckusick goto out; 610808a4de1Smckusick } 611808a4de1Smckusick /* 612808a4de1Smckusick * Don't cache anonymous objects. 613808a4de1Smckusick * Loses the reference gained by vm_pager_allocate. 614525ac35aShibler * Note that object will be NULL when handle == NULL, 615525ac35aShibler * this is ok since vm_allocate_with_pager has made 616525ac35aShibler * sure that these objects are uncached. 617808a4de1Smckusick */ 618808a4de1Smckusick (void) pager_cache(object, FALSE); 619808a4de1Smckusick #ifdef DEBUG 620808a4de1Smckusick if (mmapdebug & MDB_MAPIT) 621808a4de1Smckusick printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", 622d2b14339Skarels curproc->p_pid, *addr, size, pager); 623808a4de1Smckusick #endif 624808a4de1Smckusick } 625808a4de1Smckusick /* 6269cf4a8a8Smckusick * Must be a mapped file. 627808a4de1Smckusick * Distinguish between character special and regular files. 628808a4de1Smckusick */ 629808a4de1Smckusick else if (vp->v_type == VCHR) { 630808a4de1Smckusick rv = vm_allocate_with_pager(map, addr, size, fitit, 631af75fa5dSralph pager, foff, FALSE); 632808a4de1Smckusick /* 633808a4de1Smckusick * Uncache the object and lose the reference gained 634808a4de1Smckusick * by vm_pager_allocate(). If the call to 635808a4de1Smckusick * vm_allocate_with_pager() was sucessful, then we 636808a4de1Smckusick * gained an additional reference ensuring the object 637808a4de1Smckusick * will continue to exist. If the call failed then 638808a4de1Smckusick * the deallocate call below will terminate the 639808a4de1Smckusick * object which is fine. 640808a4de1Smckusick */ 641808a4de1Smckusick (void) pager_cache(object, FALSE); 642808a4de1Smckusick if (rv != KERN_SUCCESS) 643808a4de1Smckusick goto out; 644808a4de1Smckusick } 645808a4de1Smckusick /* 646808a4de1Smckusick * A regular file 647808a4de1Smckusick */ 648808a4de1Smckusick else { 649808a4de1Smckusick #ifdef DEBUG 650d2b14339Skarels if (object == NULL) 651808a4de1Smckusick printf("vm_mmap: no object: vp %x, pager %x\n", 652808a4de1Smckusick vp, pager); 653808a4de1Smckusick #endif 654808a4de1Smckusick /* 655808a4de1Smckusick * Map it directly. 656808a4de1Smckusick * Allows modifications to go out to the vnode. 657808a4de1Smckusick */ 658808a4de1Smckusick if (flags & MAP_SHARED) { 659808a4de1Smckusick rv = vm_allocate_with_pager(map, addr, size, 660808a4de1Smckusick fitit, pager, 661af75fa5dSralph foff, FALSE); 662808a4de1Smckusick if (rv != KERN_SUCCESS) { 663808a4de1Smckusick vm_object_deallocate(object); 664808a4de1Smckusick goto out; 665808a4de1Smckusick } 666808a4de1Smckusick /* 667808a4de1Smckusick * Don't cache the object. This is the easiest way 668808a4de1Smckusick * of ensuring that data gets back to the filesystem 669808a4de1Smckusick * because vnode_pager_deallocate() will fsync the 670808a4de1Smckusick * vnode. pager_cache() will lose the extra ref. 671808a4de1Smckusick */ 672808a4de1Smckusick if (prot & VM_PROT_WRITE) 673808a4de1Smckusick pager_cache(object, FALSE); 674808a4de1Smckusick else 675808a4de1Smckusick vm_object_deallocate(object); 676808a4de1Smckusick } 677808a4de1Smckusick /* 678808a4de1Smckusick * Copy-on-write of file. Two flavors. 679808a4de1Smckusick * MAP_COPY is true COW, you essentially get a snapshot of 680808a4de1Smckusick * the region at the time of mapping. MAP_PRIVATE means only 681808a4de1Smckusick * that your changes are not reflected back to the object. 682808a4de1Smckusick * Changes made by others will be seen. 683808a4de1Smckusick */ 684808a4de1Smckusick else { 685808a4de1Smckusick vm_map_t tmap; 686808a4de1Smckusick vm_offset_t off; 687808a4de1Smckusick 688808a4de1Smckusick /* locate and allocate the target address space */ 689d2b14339Skarels rv = vm_map_find(map, NULL, (vm_offset_t)0, 690808a4de1Smckusick addr, size, fitit); 691808a4de1Smckusick if (rv != KERN_SUCCESS) { 692808a4de1Smckusick vm_object_deallocate(object); 693808a4de1Smckusick goto out; 694808a4de1Smckusick } 695808a4de1Smckusick tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, 696808a4de1Smckusick VM_MIN_ADDRESS+size, TRUE); 697808a4de1Smckusick off = VM_MIN_ADDRESS; 698808a4de1Smckusick rv = vm_allocate_with_pager(tmap, &off, size, 699808a4de1Smckusick TRUE, pager, 700af75fa5dSralph foff, FALSE); 701808a4de1Smckusick if (rv != KERN_SUCCESS) { 702808a4de1Smckusick vm_object_deallocate(object); 703808a4de1Smckusick vm_map_deallocate(tmap); 704808a4de1Smckusick goto out; 705808a4de1Smckusick } 706808a4de1Smckusick /* 707808a4de1Smckusick * (XXX) 708808a4de1Smckusick * MAP_PRIVATE implies that we see changes made by 709808a4de1Smckusick * others. To ensure that we need to guarentee that 710808a4de1Smckusick * no copy object is created (otherwise original 711808a4de1Smckusick * pages would be pushed to the copy object and we 712808a4de1Smckusick * would never see changes made by others). We 713808a4de1Smckusick * totally sleeze it right now by marking the object 714808a4de1Smckusick * internal temporarily. 715808a4de1Smckusick */ 716808a4de1Smckusick if ((flags & MAP_COPY) == 0) 7176fa1e730Smckusick object->flags |= OBJ_INTERNAL; 718808a4de1Smckusick rv = vm_map_copy(map, tmap, *addr, size, off, 719808a4de1Smckusick FALSE, FALSE); 7206fa1e730Smckusick object->flags &= ~OBJ_INTERNAL; 721808a4de1Smckusick /* 722808a4de1Smckusick * (XXX) 723808a4de1Smckusick * My oh my, this only gets worse... 724808a4de1Smckusick * Force creation of a shadow object so that 725808a4de1Smckusick * vm_map_fork will do the right thing. 726808a4de1Smckusick */ 727808a4de1Smckusick if ((flags & MAP_COPY) == 0) { 728808a4de1Smckusick vm_map_t tmap; 729808a4de1Smckusick vm_map_entry_t tentry; 730808a4de1Smckusick vm_object_t tobject; 731808a4de1Smckusick vm_offset_t toffset; 732808a4de1Smckusick vm_prot_t tprot; 733808a4de1Smckusick boolean_t twired, tsu; 734808a4de1Smckusick 735808a4de1Smckusick tmap = map; 736808a4de1Smckusick vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, 737808a4de1Smckusick &tentry, &tobject, &toffset, 738808a4de1Smckusick &tprot, &twired, &tsu); 739808a4de1Smckusick vm_map_lookup_done(tmap, tentry); 740808a4de1Smckusick } 741808a4de1Smckusick /* 742808a4de1Smckusick * (XXX) 743808a4de1Smckusick * Map copy code cannot detect sharing unless a 744808a4de1Smckusick * sharing map is involved. So we cheat and write 74586863962Shibler * protect everything ourselves. 746808a4de1Smckusick */ 747af75fa5dSralph vm_object_pmap_copy(object, foff, foff + size); 748808a4de1Smckusick vm_object_deallocate(object); 749808a4de1Smckusick vm_map_deallocate(tmap); 750808a4de1Smckusick if (rv != KERN_SUCCESS) 751808a4de1Smckusick goto out; 752808a4de1Smckusick } 753808a4de1Smckusick #ifdef DEBUG 754808a4de1Smckusick if (mmapdebug & MDB_MAPIT) 755808a4de1Smckusick printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", 756d2b14339Skarels curproc->p_pid, *addr, size, pager); 757808a4de1Smckusick #endif 758808a4de1Smckusick } 759808a4de1Smckusick /* 760808a4de1Smckusick * Correct protection (default is VM_PROT_ALL). 761525ac35aShibler * If maxprot is different than prot, we must set both explicitly. 762808a4de1Smckusick */ 763525ac35aShibler rv = KERN_SUCCESS; 764525ac35aShibler if (maxprot != VM_PROT_ALL) 765525ac35aShibler rv = vm_map_protect(map, *addr, *addr+size, maxprot, TRUE); 766525ac35aShibler if (rv == KERN_SUCCESS && prot != maxprot) 767808a4de1Smckusick rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); 768808a4de1Smckusick if (rv != KERN_SUCCESS) { 769808a4de1Smckusick (void) vm_deallocate(map, *addr, size); 770808a4de1Smckusick goto out; 771808a4de1Smckusick } 772808a4de1Smckusick /* 773808a4de1Smckusick * Shared memory is also shared with children. 774808a4de1Smckusick */ 775808a4de1Smckusick if (flags & MAP_SHARED) { 776808a4de1Smckusick rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); 777808a4de1Smckusick if (rv != KERN_SUCCESS) { 778808a4de1Smckusick (void) vm_deallocate(map, *addr, size); 779808a4de1Smckusick goto out; 780808a4de1Smckusick } 781808a4de1Smckusick } 782808a4de1Smckusick out: 783808a4de1Smckusick #ifdef DEBUG 784808a4de1Smckusick if (mmapdebug & MDB_MAPIT) 785808a4de1Smckusick printf("vm_mmap: rv %d\n", rv); 786808a4de1Smckusick #endif 787808a4de1Smckusick switch (rv) { 788808a4de1Smckusick case KERN_SUCCESS: 789808a4de1Smckusick return (0); 790808a4de1Smckusick case KERN_INVALID_ADDRESS: 791808a4de1Smckusick case KERN_NO_SPACE: 792808a4de1Smckusick return (ENOMEM); 793808a4de1Smckusick case KERN_PROTECTION_FAILURE: 794808a4de1Smckusick return (EACCES); 795808a4de1Smckusick default: 796808a4de1Smckusick return (EINVAL); 797808a4de1Smckusick } 798808a4de1Smckusick } 799808a4de1Smckusick 800808a4de1Smckusick /* 801808a4de1Smckusick * Internal bastardized version of MACHs vm_region system call. 802808a4de1Smckusick * Given address and size it returns map attributes as well 803808a4de1Smckusick * as the (locked) object mapped at that location. 804808a4de1Smckusick */ 80564f61df8Sbostic int 806808a4de1Smckusick vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) 807808a4de1Smckusick vm_map_t map; 808808a4de1Smckusick vm_offset_t *addr; /* IN/OUT */ 809808a4de1Smckusick vm_size_t *size; /* OUT */ 810808a4de1Smckusick vm_prot_t *prot; /* OUT */ 811808a4de1Smckusick vm_prot_t *max_prot; /* OUT */ 812808a4de1Smckusick vm_inherit_t *inheritance; /* OUT */ 813808a4de1Smckusick boolean_t *shared; /* OUT */ 814808a4de1Smckusick vm_object_t *object; /* OUT */ 815808a4de1Smckusick vm_offset_t *objoff; /* OUT */ 816808a4de1Smckusick { 817808a4de1Smckusick vm_map_entry_t tmp_entry; 818808a4de1Smckusick register 819808a4de1Smckusick vm_map_entry_t entry; 820808a4de1Smckusick register 821808a4de1Smckusick vm_offset_t tmp_offset; 822808a4de1Smckusick vm_offset_t start; 823808a4de1Smckusick 824d2b14339Skarels if (map == NULL) 825808a4de1Smckusick return(KERN_INVALID_ARGUMENT); 826808a4de1Smckusick 827808a4de1Smckusick start = *addr; 828808a4de1Smckusick 829808a4de1Smckusick vm_map_lock_read(map); 830808a4de1Smckusick if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 831808a4de1Smckusick if ((entry = tmp_entry->next) == &map->header) { 832808a4de1Smckusick vm_map_unlock_read(map); 833808a4de1Smckusick return(KERN_NO_SPACE); 834808a4de1Smckusick } 835808a4de1Smckusick start = entry->start; 836808a4de1Smckusick *addr = start; 837808a4de1Smckusick } else 838808a4de1Smckusick entry = tmp_entry; 839808a4de1Smckusick 840808a4de1Smckusick *prot = entry->protection; 841808a4de1Smckusick *max_prot = entry->max_protection; 842808a4de1Smckusick *inheritance = entry->inheritance; 843808a4de1Smckusick 844808a4de1Smckusick tmp_offset = entry->offset + (start - entry->start); 845808a4de1Smckusick *size = (entry->end - start); 846808a4de1Smckusick 847808a4de1Smckusick if (entry->is_a_map) { 848808a4de1Smckusick register vm_map_t share_map; 849808a4de1Smckusick vm_size_t share_size; 850808a4de1Smckusick 851808a4de1Smckusick share_map = entry->object.share_map; 852808a4de1Smckusick 853808a4de1Smckusick vm_map_lock_read(share_map); 854808a4de1Smckusick (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); 855808a4de1Smckusick 856808a4de1Smckusick if ((share_size = (tmp_entry->end - tmp_offset)) < *size) 857808a4de1Smckusick *size = share_size; 858808a4de1Smckusick 859808a4de1Smckusick vm_object_lock(tmp_entry->object); 860808a4de1Smckusick *object = tmp_entry->object.vm_object; 861808a4de1Smckusick *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); 862808a4de1Smckusick 863808a4de1Smckusick *shared = (share_map->ref_count != 1); 864808a4de1Smckusick vm_map_unlock_read(share_map); 865808a4de1Smckusick } else { 866808a4de1Smckusick vm_object_lock(entry->object); 867808a4de1Smckusick *object = entry->object.vm_object; 868808a4de1Smckusick *objoff = tmp_offset; 869808a4de1Smckusick 870808a4de1Smckusick *shared = FALSE; 871808a4de1Smckusick } 872808a4de1Smckusick 873808a4de1Smckusick vm_map_unlock_read(map); 874808a4de1Smckusick 875808a4de1Smckusick return(KERN_SUCCESS); 876808a4de1Smckusick } 877808a4de1Smckusick 878808a4de1Smckusick /* 879808a4de1Smckusick * Yet another bastard routine. 880808a4de1Smckusick */ 88164f61df8Sbostic int 882808a4de1Smckusick vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) 883808a4de1Smckusick register vm_map_t map; 884808a4de1Smckusick register vm_offset_t *addr; 885808a4de1Smckusick register vm_size_t size; 886808a4de1Smckusick boolean_t fitit; 887808a4de1Smckusick vm_pager_t pager; 888808a4de1Smckusick vm_offset_t poffset; 889808a4de1Smckusick boolean_t internal; 890808a4de1Smckusick { 891808a4de1Smckusick register vm_object_t object; 892808a4de1Smckusick register int result; 893808a4de1Smckusick 894d2b14339Skarels if (map == NULL) 895808a4de1Smckusick return(KERN_INVALID_ARGUMENT); 896808a4de1Smckusick 897808a4de1Smckusick *addr = trunc_page(*addr); 898808a4de1Smckusick size = round_page(size); 899808a4de1Smckusick 900808a4de1Smckusick /* 901808a4de1Smckusick * Lookup the pager/paging-space in the object cache. 902808a4de1Smckusick * If it's not there, then create a new object and cache 903808a4de1Smckusick * it. 904808a4de1Smckusick */ 905808a4de1Smckusick object = vm_object_lookup(pager); 9066fa1e730Smckusick cnt.v_lookups++; 907d2b14339Skarels if (object == NULL) { 908808a4de1Smckusick object = vm_object_allocate(size); 909a5fa67a9Smckusick /* 910a5fa67a9Smckusick * From Mike Hibler: "unnamed anonymous objects should never 911a5fa67a9Smckusick * be on the hash list ... For now you can just change 912a5fa67a9Smckusick * vm_allocate_with_pager to not do vm_object_enter if this 913a5fa67a9Smckusick * is an internal object ..." 914a5fa67a9Smckusick */ 915a5fa67a9Smckusick if (!internal) 916808a4de1Smckusick vm_object_enter(object, pager); 917808a4de1Smckusick } else 9186fa1e730Smckusick cnt.v_hits++; 9196fa1e730Smckusick if (internal) 9206fa1e730Smckusick object->flags |= OBJ_INTERNAL; 9216fa1e730Smckusick else 9226fa1e730Smckusick object->flags &= ~OBJ_INTERNAL; 923808a4de1Smckusick 924808a4de1Smckusick result = vm_map_find(map, object, poffset, addr, size, fitit); 925808a4de1Smckusick if (result != KERN_SUCCESS) 926808a4de1Smckusick vm_object_deallocate(object); 927d2b14339Skarels else if (pager != NULL) 928808a4de1Smckusick vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 929808a4de1Smckusick return(result); 930808a4de1Smckusick } 931808a4de1Smckusick 932808a4de1Smckusick /* 933808a4de1Smckusick * XXX: this routine belongs in vm_map.c. 934808a4de1Smckusick * 935808a4de1Smckusick * Returns TRUE if the range [start - end) is allocated in either 936808a4de1Smckusick * a single entry (single_entry == TRUE) or multiple contiguous 937808a4de1Smckusick * entries (single_entry == FALSE). 938808a4de1Smckusick * 939808a4de1Smckusick * start and end should be page aligned. 940808a4de1Smckusick */ 941808a4de1Smckusick boolean_t 942808a4de1Smckusick vm_map_is_allocated(map, start, end, single_entry) 943808a4de1Smckusick vm_map_t map; 944808a4de1Smckusick vm_offset_t start, end; 945808a4de1Smckusick boolean_t single_entry; 946808a4de1Smckusick { 947808a4de1Smckusick vm_map_entry_t mapent; 948808a4de1Smckusick register vm_offset_t nend; 949808a4de1Smckusick 950808a4de1Smckusick vm_map_lock_read(map); 951808a4de1Smckusick 952808a4de1Smckusick /* 953808a4de1Smckusick * Start address not in any entry 954808a4de1Smckusick */ 955808a4de1Smckusick if (!vm_map_lookup_entry(map, start, &mapent)) { 956808a4de1Smckusick vm_map_unlock_read(map); 957808a4de1Smckusick return (FALSE); 958808a4de1Smckusick } 959808a4de1Smckusick /* 960808a4de1Smckusick * Find the maximum stretch of contiguously allocated space 961808a4de1Smckusick */ 962808a4de1Smckusick nend = mapent->end; 963808a4de1Smckusick if (!single_entry) { 964808a4de1Smckusick mapent = mapent->next; 965808a4de1Smckusick while (mapent != &map->header && mapent->start == nend) { 966808a4de1Smckusick nend = mapent->end; 967808a4de1Smckusick mapent = mapent->next; 968808a4de1Smckusick } 969808a4de1Smckusick } 970808a4de1Smckusick 971808a4de1Smckusick vm_map_unlock_read(map); 972808a4de1Smckusick return (end <= nend); 973808a4de1Smckusick } 974