xref: /original-bsd/sys/vm/vm_mmap.c (revision 68d9582f)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
13  *
14  *	@(#)vm_mmap.c	7.11 (Berkeley) 06/19/92
15  */
16 
17 /*
18  * Mapped file (mmap) interface to VM
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/filedesc.h>
24 #include <sys/proc.h>
25 #include <sys/vnode.h>
26 #include <sys/specdev.h>
27 #include <sys/file.h>
28 #include <sys/mman.h>
29 #include <sys/conf.h>
30 
31 #include <vm/vm.h>
32 #include <vm/vm_pager.h>
33 #include <vm/vm_prot.h>
34 
35 #ifdef DEBUG
36 int mmapdebug = 0;
37 #define MDB_FOLLOW	0x01
38 #define MDB_SYNC	0x02
39 #define MDB_MAPIT	0x04
40 #endif
41 
42 /* ARGSUSED */
43 int
44 getpagesize(p, uap, retval)
45 	struct proc *p;
46 	void *uap;
47 	int *retval;
48 {
49 
50 	*retval = PAGE_SIZE;
51 	return (0);
52 }
53 
54 /* ARGSUSED */
55 int
56 sbrk(p, uap, retval)
57 	struct proc *p;
58 	struct args {
59 		int	incr;
60 	} *uap;
61 	int *retval;
62 {
63 
64 	/* Not yet implemented */
65 	return (EOPNOTSUPP);
66 }
67 
68 /* ARGSUSED */
69 int
70 sstk(p, uap, retval)
71 	struct proc *p;
72 	struct args {
73 		int	incr;
74 	} *uap;
75 	int *retval;
76 {
77 
78 	/* Not yet implemented */
79 	return (EOPNOTSUPP);
80 }
81 
82 int
83 smmap(p, uap, retval)
84 	struct proc *p;
85 	register struct args {
86 		caddr_t	addr;
87 		int	len;
88 		int	prot;
89 		int	flags;
90 		int	fd;
91 		off_t	pos;
92 	} *uap;
93 	int *retval;
94 {
95 	register struct filedesc *fdp = p->p_fd;
96 	register struct file *fp;
97 	struct vnode *vp;
98 	vm_offset_t addr;
99 	vm_size_t size;
100 	vm_prot_t prot;
101 	caddr_t handle;
102 	int mtype, error;
103 
104 #ifdef DEBUG
105 	if (mmapdebug & MDB_FOLLOW)
106 		printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
107 		       p->p_pid, uap->addr, uap->len, uap->prot,
108 		       uap->flags, uap->fd, uap->pos);
109 #endif
110 	/*
111 	 * Make sure one of the sharing types is specified
112 	 */
113 	mtype = uap->flags & MAP_TYPE;
114 	switch (mtype) {
115 	case MAP_FILE:
116 	case MAP_ANON:
117 		break;
118 	default:
119 		return(EINVAL);
120 	}
121 	/*
122 	 * Address (if FIXED) must be page aligned.
123 	 * Size is implicitly rounded to a page boundary.
124 	 */
125 	addr = (vm_offset_t) uap->addr;
126 	if ((uap->flags & MAP_FIXED) && (addr & PAGE_MASK) || uap->len < 0)
127 		return(EINVAL);
128 	size = (vm_size_t) round_page(uap->len);
129 	/*
130 	 * XXX if no hint provided for a non-fixed mapping place it after
131 	 * the end of the largest possible heap.
132 	 *
133 	 * There should really be a pmap call to determine a reasonable
134 	 * location.
135 	 */
136 	if (addr == 0 && (uap->flags & MAP_FIXED) == 0)
137 		addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
138 	/*
139 	 * Mapping file or named anonymous, get fp for validation
140 	 */
141 	if (mtype == MAP_FILE || uap->fd != -1) {
142 		if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
143 		    (fp = fdp->fd_ofiles[uap->fd]) == NULL)
144 			return(EBADF);
145 	}
146 	/*
147 	 * If we are mapping a file we need to check various
148 	 * file/vnode related things.
149 	 */
150 	if (mtype == MAP_FILE) {
151 		/*
152 		 * Obtain vnode and make sure it is of appropriate type
153 		 */
154 		if (fp->f_type != DTYPE_VNODE)
155 			return(EINVAL);
156 		vp = (struct vnode *)fp->f_data;
157 		if (vp->v_type != VREG && vp->v_type != VCHR)
158 			return(EINVAL);
159 		/*
160 		 * Ensure that file protection and desired protection
161 		 * are compatible.  Note that we only worry about writability
162 		 * if mapping is shared.
163 		 */
164 		if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 ||
165 		    ((uap->flags & MAP_SHARED) &&
166 		     (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0))
167 			return(EACCES);
168 		handle = (caddr_t)vp;
169 	} else if (uap->fd != -1)
170 		handle = (caddr_t)fp;
171 	else
172 		handle = NULL;
173 	/*
174 	 * Map protections to MACH style
175 	 */
176 	prot = VM_PROT_NONE;
177 	if (uap->prot & PROT_READ)
178 		prot |= VM_PROT_READ;
179 	if (uap->prot & PROT_WRITE)
180 		prot |= VM_PROT_WRITE;
181 	if (uap->prot & PROT_EXEC)
182 		prot |= VM_PROT_EXECUTE;
183 
184 	error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot,
185 			uap->flags, handle, (vm_offset_t)uap->pos);
186 	if (error == 0)
187 		*retval = (int) addr;
188 	return(error);
189 }
190 
191 int
192 msync(p, uap, retval)
193 	struct proc *p;
194 	struct args {
195 		caddr_t	addr;
196 		int	len;
197 	} *uap;
198 	int *retval;
199 {
200 	vm_offset_t addr, objoff, oaddr;
201 	vm_size_t size, osize;
202 	vm_prot_t prot, mprot;
203 	vm_inherit_t inherit;
204 	vm_object_t object;
205 	boolean_t shared;
206 	int rv;
207 
208 #ifdef DEBUG
209 	if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
210 		printf("msync(%d): addr %x len %x\n",
211 		       p->p_pid, uap->addr, uap->len);
212 #endif
213 	if (((int)uap->addr & PAGE_MASK) || uap->len < 0)
214 		return(EINVAL);
215 	addr = oaddr = (vm_offset_t)uap->addr;
216 	osize = (vm_size_t)uap->len;
217 	/*
218 	 * Region must be entirely contained in a single entry
219 	 */
220 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize,
221 	    TRUE))
222 		return(EINVAL);
223 	/*
224 	 * Determine the object associated with that entry
225 	 * (object is returned locked on KERN_SUCCESS)
226 	 */
227 	rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot,
228 		       &inherit, &shared, &object, &objoff);
229 	if (rv != KERN_SUCCESS)
230 		return(EINVAL);
231 #ifdef DEBUG
232 	if (mmapdebug & MDB_SYNC)
233 		printf("msync: region: object %x addr %x size %d objoff %d\n",
234 		       object, addr, size, objoff);
235 #endif
236 	/*
237 	 * Do not msync non-vnoded backed objects.
238 	 */
239 	if ((object->flags & OBJ_INTERNAL) || object->pager == NULL ||
240 	    object->pager->pg_type != PG_VNODE) {
241 		vm_object_unlock(object);
242 		return(EINVAL);
243 	}
244 	objoff += oaddr - addr;
245 	if (osize == 0)
246 		osize = size;
247 #ifdef DEBUG
248 	if (mmapdebug & MDB_SYNC)
249 		printf("msync: cleaning/flushing object range [%x-%x)\n",
250 		       objoff, objoff+osize);
251 #endif
252 	if (prot & VM_PROT_WRITE)
253 		vm_object_page_clean(object, objoff, objoff+osize, FALSE);
254 	/*
255 	 * (XXX)
256 	 * Bummer, gotta flush all cached pages to ensure
257 	 * consistency with the file system cache.
258 	 */
259 	vm_object_page_remove(object, objoff, objoff+osize);
260 	vm_object_unlock(object);
261 	return(0);
262 }
263 
264 int
265 munmap(p, uap, retval)
266 	register struct proc *p;
267 	register struct args {
268 		caddr_t	addr;
269 		int	len;
270 	} *uap;
271 	int *retval;
272 {
273 	vm_offset_t addr;
274 	vm_size_t size;
275 
276 #ifdef DEBUG
277 	if (mmapdebug & MDB_FOLLOW)
278 		printf("munmap(%d): addr %x len %x\n",
279 		       p->p_pid, uap->addr, uap->len);
280 #endif
281 
282 	addr = (vm_offset_t) uap->addr;
283 	if ((addr & PAGE_MASK) || uap->len < 0)
284 		return(EINVAL);
285 	size = (vm_size_t) round_page(uap->len);
286 	if (size == 0)
287 		return(0);
288 	if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size,
289 	    FALSE))
290 		return(EINVAL);
291 	/* returns nothing but KERN_SUCCESS anyway */
292 	(void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size);
293 	return(0);
294 }
295 
296 void
297 munmapfd(fd)
298 	int fd;
299 {
300 #ifdef DEBUG
301 	if (mmapdebug & MDB_FOLLOW)
302 		printf("munmapfd(%d): fd %d\n", curproc->p_pid, fd);
303 #endif
304 
305 	/*
306 	 * XXX -- should vm_deallocate any regions mapped to this file
307 	 */
308 	curproc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
309 }
310 
311 int
312 mprotect(p, uap, retval)
313 	struct proc *p;
314 	struct args {
315 		caddr_t	addr;
316 		int	len;
317 		int	prot;
318 	} *uap;
319 	int *retval;
320 {
321 	vm_offset_t addr;
322 	vm_size_t size;
323 	register vm_prot_t prot;
324 
325 #ifdef DEBUG
326 	if (mmapdebug & MDB_FOLLOW)
327 		printf("mprotect(%d): addr %x len %x prot %d\n",
328 		       p->p_pid, uap->addr, uap->len, uap->prot);
329 #endif
330 
331 	addr = (vm_offset_t) uap->addr;
332 	if ((addr & PAGE_MASK) || uap->len < 0)
333 		return(EINVAL);
334 	size = (vm_size_t) uap->len;
335 	/*
336 	 * Map protections
337 	 */
338 	prot = VM_PROT_NONE;
339 	if (uap->prot & PROT_READ)
340 		prot |= VM_PROT_READ;
341 	if (uap->prot & PROT_WRITE)
342 		prot |= VM_PROT_WRITE;
343 	if (uap->prot & PROT_EXEC)
344 		prot |= VM_PROT_EXECUTE;
345 
346 	switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
347 	    FALSE)) {
348 	case KERN_SUCCESS:
349 		return (0);
350 	case KERN_PROTECTION_FAILURE:
351 		return (EACCES);
352 	}
353 	return (EINVAL);
354 }
355 
356 /* ARGSUSED */
357 int
358 madvise(p, uap, retval)
359 	struct proc *p;
360 	struct args {
361 		caddr_t	addr;
362 		int	len;
363 		int	behav;
364 	} *uap;
365 	int *retval;
366 {
367 
368 	/* Not yet implemented */
369 	return (EOPNOTSUPP);
370 }
371 
372 /* ARGSUSED */
373 int
374 mincore(p, uap, retval)
375 	struct proc *p;
376 	struct args {
377 		caddr_t	addr;
378 		int	len;
379 		char	*vec;
380 	} *uap;
381 	int *retval;
382 {
383 
384 	/* Not yet implemented */
385 	return (EOPNOTSUPP);
386 }
387 
388 /*
389  * Internal version of mmap.
390  * Currently used by mmap, exec, and sys5 shared memory.
391  * Handle is:
392  *	MAP_FILE: a vnode pointer
393  *	MAP_ANON: NULL or a file pointer
394  */
395 int
396 vm_mmap(map, addr, size, prot, flags, handle, foff)
397 	register vm_map_t map;
398 	register vm_offset_t *addr;
399 	register vm_size_t size;
400 	vm_prot_t prot;
401 	register int flags;
402 	caddr_t handle;		/* XXX should be vp */
403 	vm_offset_t foff;
404 {
405 	register vm_pager_t pager;
406 	boolean_t fitit;
407 	vm_object_t object;
408 	struct vnode *vp;
409 	int type;
410 	int rv = KERN_SUCCESS;
411 
412 	if (size == 0)
413 		return (0);
414 
415 	if ((flags & MAP_FIXED) == 0) {
416 		fitit = TRUE;
417 		*addr = round_page(*addr);
418 	} else {
419 		fitit = FALSE;
420 		(void) vm_deallocate(map, *addr, size);
421 	}
422 
423 	/*
424 	 * Lookup/allocate pager.  All except an unnamed anonymous lookup
425 	 * gain a reference to ensure continued existance of the object.
426 	 * (XXX the exception is to appease the pageout daemon)
427 	 */
428 	if ((flags & MAP_TYPE) == MAP_ANON)
429 		type = PG_DFLT;
430 	else {
431 		vp = (struct vnode *)handle;
432 		if (vp->v_type == VCHR) {
433 			type = PG_DEVICE;
434 			handle = (caddr_t)vp->v_rdev;
435 		} else
436 			type = PG_VNODE;
437 	}
438 	pager = vm_pager_allocate(type, handle, size, prot);
439 	if (pager == NULL)
440 		return (type == PG_DEVICE ? EINVAL : ENOMEM);
441 	/*
442 	 * Find object and release extra reference gained by lookup
443 	 */
444 	object = vm_object_lookup(pager);
445 	vm_object_deallocate(object);
446 
447 	/*
448 	 * Anonymous memory.
449 	 */
450 	if ((flags & MAP_TYPE) == MAP_ANON) {
451 		rv = vm_allocate_with_pager(map, addr, size, fitit,
452 					    pager, (vm_offset_t)foff, TRUE);
453 		if (rv != KERN_SUCCESS) {
454 			if (handle == NULL)
455 				vm_pager_deallocate(pager);
456 			else
457 				vm_object_deallocate(object);
458 			goto out;
459 		}
460 		/*
461 		 * Don't cache anonymous objects.
462 		 * Loses the reference gained by vm_pager_allocate.
463 		 */
464 		(void) pager_cache(object, FALSE);
465 #ifdef DEBUG
466 		if (mmapdebug & MDB_MAPIT)
467 			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
468 			       curproc->p_pid, *addr, size, pager);
469 #endif
470 	}
471 	/*
472 	 * Must be type MAP_FILE.
473 	 * Distinguish between character special and regular files.
474 	 */
475 	else if (vp->v_type == VCHR) {
476 		rv = vm_allocate_with_pager(map, addr, size, fitit,
477 					    pager, (vm_offset_t)foff, FALSE);
478 		/*
479 		 * Uncache the object and lose the reference gained
480 		 * by vm_pager_allocate().  If the call to
481 		 * vm_allocate_with_pager() was sucessful, then we
482 		 * gained an additional reference ensuring the object
483 		 * will continue to exist.  If the call failed then
484 		 * the deallocate call below will terminate the
485 		 * object which is fine.
486 		 */
487 		(void) pager_cache(object, FALSE);
488 		if (rv != KERN_SUCCESS)
489 			goto out;
490 	}
491 	/*
492 	 * A regular file
493 	 */
494 	else {
495 #ifdef DEBUG
496 		if (object == NULL)
497 			printf("vm_mmap: no object: vp %x, pager %x\n",
498 			       vp, pager);
499 #endif
500 		/*
501 		 * Map it directly.
502 		 * Allows modifications to go out to the vnode.
503 		 */
504 		if (flags & MAP_SHARED) {
505 			rv = vm_allocate_with_pager(map, addr, size,
506 						    fitit, pager,
507 						    (vm_offset_t)foff, FALSE);
508 			if (rv != KERN_SUCCESS) {
509 				vm_object_deallocate(object);
510 				goto out;
511 			}
512 			/*
513 			 * Don't cache the object.  This is the easiest way
514 			 * of ensuring that data gets back to the filesystem
515 			 * because vnode_pager_deallocate() will fsync the
516 			 * vnode.  pager_cache() will lose the extra ref.
517 			 */
518 			if (prot & VM_PROT_WRITE)
519 				pager_cache(object, FALSE);
520 			else
521 				vm_object_deallocate(object);
522 		}
523 		/*
524 		 * Copy-on-write of file.  Two flavors.
525 		 * MAP_COPY is true COW, you essentially get a snapshot of
526 		 * the region at the time of mapping.  MAP_PRIVATE means only
527 		 * that your changes are not reflected back to the object.
528 		 * Changes made by others will be seen.
529 		 */
530 		else {
531 			vm_map_t tmap;
532 			vm_offset_t off;
533 
534 			/* locate and allocate the target address space */
535 			rv = vm_map_find(map, NULL, (vm_offset_t)0,
536 					 addr, size, fitit);
537 			if (rv != KERN_SUCCESS) {
538 				vm_object_deallocate(object);
539 				goto out;
540 			}
541 			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
542 					     VM_MIN_ADDRESS+size, TRUE);
543 			off = VM_MIN_ADDRESS;
544 			rv = vm_allocate_with_pager(tmap, &off, size,
545 						    TRUE, pager,
546 						    (vm_offset_t)foff, FALSE);
547 			if (rv != KERN_SUCCESS) {
548 				vm_object_deallocate(object);
549 				vm_map_deallocate(tmap);
550 				goto out;
551 			}
552 			/*
553 			 * (XXX)
554 			 * MAP_PRIVATE implies that we see changes made by
555 			 * others.  To ensure that we need to guarentee that
556 			 * no copy object is created (otherwise original
557 			 * pages would be pushed to the copy object and we
558 			 * would never see changes made by others).  We
559 			 * totally sleeze it right now by marking the object
560 			 * internal temporarily.
561 			 */
562 			if ((flags & MAP_COPY) == 0)
563 				object->flags |= OBJ_INTERNAL;
564 			rv = vm_map_copy(map, tmap, *addr, size, off,
565 					 FALSE, FALSE);
566 			object->flags &= ~OBJ_INTERNAL;
567 			/*
568 			 * (XXX)
569 			 * My oh my, this only gets worse...
570 			 * Force creation of a shadow object so that
571 			 * vm_map_fork will do the right thing.
572 			 */
573 			if ((flags & MAP_COPY) == 0) {
574 				vm_map_t tmap;
575 				vm_map_entry_t tentry;
576 				vm_object_t tobject;
577 				vm_offset_t toffset;
578 				vm_prot_t tprot;
579 				boolean_t twired, tsu;
580 
581 				tmap = map;
582 				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
583 					      &tentry, &tobject, &toffset,
584 					      &tprot, &twired, &tsu);
585 				vm_map_lookup_done(tmap, tentry);
586 			}
587 			/*
588 			 * (XXX)
589 			 * Map copy code cannot detect sharing unless a
590 			 * sharing map is involved.  So we cheat and write
591 			 * protect everything ourselves.
592 			 */
593 			vm_object_pmap_copy(object, (vm_offset_t)foff,
594 					    (vm_offset_t)foff+size);
595 			vm_object_deallocate(object);
596 			vm_map_deallocate(tmap);
597 			if (rv != KERN_SUCCESS)
598 				goto out;
599 		}
600 #ifdef DEBUG
601 		if (mmapdebug & MDB_MAPIT)
602 			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
603 			       curproc->p_pid, *addr, size, pager);
604 #endif
605 	}
606 	/*
607 	 * Correct protection (default is VM_PROT_ALL).
608 	 * Note that we set the maximum protection.  This may not be
609 	 * entirely correct.  Maybe the maximum protection should be based
610 	 * on the object permissions where it makes sense (e.g. a vnode).
611 	 *
612 	 * Changed my mind: leave max prot at VM_PROT_ALL.
613 	 */
614 	if (prot != VM_PROT_ALL) {
615 		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
616 		if (rv != KERN_SUCCESS) {
617 			(void) vm_deallocate(map, *addr, size);
618 			goto out;
619 		}
620 	}
621 	/*
622 	 * Shared memory is also shared with children.
623 	 */
624 	if (flags & MAP_SHARED) {
625 		rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE);
626 		if (rv != KERN_SUCCESS) {
627 			(void) vm_deallocate(map, *addr, size);
628 			goto out;
629 		}
630 	}
631 out:
632 #ifdef DEBUG
633 	if (mmapdebug & MDB_MAPIT)
634 		printf("vm_mmap: rv %d\n", rv);
635 #endif
636 	switch (rv) {
637 	case KERN_SUCCESS:
638 		return (0);
639 	case KERN_INVALID_ADDRESS:
640 	case KERN_NO_SPACE:
641 		return (ENOMEM);
642 	case KERN_PROTECTION_FAILURE:
643 		return (EACCES);
644 	default:
645 		return (EINVAL);
646 	}
647 }
648 
649 /*
650  * Internal bastardized version of MACHs vm_region system call.
651  * Given address and size it returns map attributes as well
652  * as the (locked) object mapped at that location.
653  */
654 int
655 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
656 	vm_map_t	map;
657 	vm_offset_t	*addr;		/* IN/OUT */
658 	vm_size_t	*size;		/* OUT */
659 	vm_prot_t	*prot;		/* OUT */
660 	vm_prot_t	*max_prot;	/* OUT */
661 	vm_inherit_t	*inheritance;	/* OUT */
662 	boolean_t	*shared;	/* OUT */
663 	vm_object_t	*object;	/* OUT */
664 	vm_offset_t	*objoff;	/* OUT */
665 {
666 	vm_map_entry_t	tmp_entry;
667 	register
668 	vm_map_entry_t	entry;
669 	register
670 	vm_offset_t	tmp_offset;
671 	vm_offset_t	start;
672 
673 	if (map == NULL)
674 		return(KERN_INVALID_ARGUMENT);
675 
676 	start = *addr;
677 
678 	vm_map_lock_read(map);
679 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
680 		if ((entry = tmp_entry->next) == &map->header) {
681 			vm_map_unlock_read(map);
682 		   	return(KERN_NO_SPACE);
683 		}
684 		start = entry->start;
685 		*addr = start;
686 	} else
687 		entry = tmp_entry;
688 
689 	*prot = entry->protection;
690 	*max_prot = entry->max_protection;
691 	*inheritance = entry->inheritance;
692 
693 	tmp_offset = entry->offset + (start - entry->start);
694 	*size = (entry->end - start);
695 
696 	if (entry->is_a_map) {
697 		register vm_map_t share_map;
698 		vm_size_t share_size;
699 
700 		share_map = entry->object.share_map;
701 
702 		vm_map_lock_read(share_map);
703 		(void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
704 
705 		if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
706 			*size = share_size;
707 
708 		vm_object_lock(tmp_entry->object);
709 		*object = tmp_entry->object.vm_object;
710 		*objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
711 
712 		*shared = (share_map->ref_count != 1);
713 		vm_map_unlock_read(share_map);
714 	} else {
715 		vm_object_lock(entry->object);
716 		*object = entry->object.vm_object;
717 		*objoff = tmp_offset;
718 
719 		*shared = FALSE;
720 	}
721 
722 	vm_map_unlock_read(map);
723 
724 	return(KERN_SUCCESS);
725 }
726 
727 /*
728  * Yet another bastard routine.
729  */
730 int
731 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
732 	register vm_map_t	map;
733 	register vm_offset_t	*addr;
734 	register vm_size_t	size;
735 	boolean_t		fitit;
736 	vm_pager_t		pager;
737 	vm_offset_t		poffset;
738 	boolean_t		internal;
739 {
740 	register vm_object_t	object;
741 	register int		result;
742 
743 	if (map == NULL)
744 		return(KERN_INVALID_ARGUMENT);
745 
746 	*addr = trunc_page(*addr);
747 	size = round_page(size);
748 
749 	/*
750 	 *	Lookup the pager/paging-space in the object cache.
751 	 *	If it's not there, then create a new object and cache
752 	 *	it.
753 	 */
754 	object = vm_object_lookup(pager);
755 	cnt.v_lookups++;
756 	if (object == NULL) {
757 		object = vm_object_allocate(size);
758 		/*
759 		 * From Mike Hibler: "unnamed anonymous objects should never
760 		 * be on the hash list ... For now you can just change
761 		 * vm_allocate_with_pager to not do vm_object_enter if this
762 		 * is an internal object ..."
763 		 */
764 		if (!internal)
765 			vm_object_enter(object, pager);
766 	} else
767 		cnt.v_hits++;
768 	if (internal)
769 		object->flags |= OBJ_INTERNAL;
770 	else
771 		object->flags &= ~OBJ_INTERNAL;
772 
773 	result = vm_map_find(map, object, poffset, addr, size, fitit);
774 	if (result != KERN_SUCCESS)
775 		vm_object_deallocate(object);
776 	else if (pager != NULL)
777 		vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
778 	return(result);
779 }
780 
781 /*
782  * XXX: this routine belongs in vm_map.c.
783  *
784  * Returns TRUE if the range [start - end) is allocated in either
785  * a single entry (single_entry == TRUE) or multiple contiguous
786  * entries (single_entry == FALSE).
787  *
788  * start and end should be page aligned.
789  */
790 boolean_t
791 vm_map_is_allocated(map, start, end, single_entry)
792 	vm_map_t map;
793 	vm_offset_t start, end;
794 	boolean_t single_entry;
795 {
796 	vm_map_entry_t mapent;
797 	register vm_offset_t nend;
798 
799 	vm_map_lock_read(map);
800 
801 	/*
802 	 * Start address not in any entry
803 	 */
804 	if (!vm_map_lookup_entry(map, start, &mapent)) {
805 		vm_map_unlock_read(map);
806 		return (FALSE);
807 	}
808 	/*
809 	 * Find the maximum stretch of contiguously allocated space
810 	 */
811 	nend = mapent->end;
812 	if (!single_entry) {
813 		mapent = mapent->next;
814 		while (mapent != &map->header && mapent->start == nend) {
815 			nend = mapent->end;
816 			mapent = mapent->next;
817 		}
818 	}
819 
820 	vm_map_unlock_read(map);
821 	return (end <= nend);
822 }
823