xref: /original-bsd/sys/vm/vm_mmap.c (revision 95a66346)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$
13  *
14  *	@(#)vm_mmap.c	7.2 (Berkeley) 01/10/91
15  */
16 
17 /*
18  * Mapped file (mmap) interface to VM
19  */
20 
21 #include "param.h"
22 #include "systm.h"
23 #include "user.h"
24 #include "filedesc.h"
25 #include "proc.h"
26 #include "vnode.h"
27 #include "specdev.h"
28 #include "file.h"
29 #include "mman.h"
30 #include "conf.h"
31 
32 #include "../vm/vm_param.h"
33 #include "../vm/vm_map.h"
34 #include "../vm/vm_pager.h"
35 #include "../vm/vm_prot.h"
36 
37 #ifdef DEBUG
38 int mmapdebug = 0;
39 #define MDB_FOLLOW	0x01
40 #define MDB_SYNC	0x02
41 #define MDB_MAPIT	0x04
42 #endif
43 
44 /* ARGSUSED */
45 getpagesize(p, uap, retval)
46 	struct proc *p;
47 	struct args *uap;
48 	int *retval;
49 {
50 
51 	*retval = NBPG * CLSIZE;
52 	return (0);
53 }
54 
55 /* ARGSUSED */
56 sbrk(p, uap, retval)
57 	struct proc *p;
58 	struct args {
59 		int	incr;
60 	} *uap;
61 	int *retval;
62 {
63 
64 	/* Not yet implemented */
65 	return (EOPNOTSUPP);
66 }
67 
68 /* ARGSUSED */
69 sstk(p, uap, retval)
70 	struct proc *p;
71 	struct args {
72 		int	incr;
73 	} *uap;
74 	int *retval;
75 {
76 
77 	/* Not yet implemented */
78 	return (EOPNOTSUPP);
79 }
80 
81 smmap(p, uap, retval)
82 	struct proc *p;
83 	register struct args {
84 		caddr_t	addr;
85 		int	len;
86 		int	prot;
87 		int	flags;
88 		int	fd;
89 		off_t	pos;
90 	} *uap;
91 	int *retval;
92 {
93 	register struct filedesc *fdp = p->p_fd;
94 	register struct file *fp;
95 	struct vnode *vp;
96 	vm_offset_t addr;
97 	vm_size_t size;
98 	vm_prot_t prot;
99 	caddr_t handle;
100 	int mtype, error;
101 
102 #ifdef DEBUG
103 	if (mmapdebug & MDB_FOLLOW)
104 		printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
105 		       p->p_pid, uap->addr, uap->len, uap->prot,
106 		       uap->flags, uap->fd, uap->pos);
107 #endif
108 	/*
109 	 * Make sure one of the sharing types is specified
110 	 */
111 	mtype = uap->flags & MAP_TYPE;
112 	switch (mtype) {
113 	case MAP_FILE:
114 	case MAP_ANON:
115 		break;
116 	default:
117 		return(EINVAL);
118 	}
119 	/*
120 	 * Address (if FIXED) and size must be page aligned
121 	 */
122 	size = (vm_size_t)uap->len;
123 	addr = (vm_offset_t)uap->addr;
124 	if ((size & page_mask) ||
125 	    (uap->flags & MAP_FIXED) && (addr & page_mask))
126 		return(EINVAL);
127 	/*
128 	 * Mapping file or named anonymous, get fp for validation
129 	 */
130 	if (mtype == MAP_FILE || uap->fd != -1) {
131 		if (((unsigned)uap->fd) >= fdp->fd_maxfiles ||
132 		    (fp = OFILE(fdp, uap->fd)) == NULL)
133 			return(EBADF);
134 	}
135 	/*
136 	 * If we are mapping a file we need to check various
137 	 * file/vnode related things.
138 	 */
139 	if (mtype == MAP_FILE) {
140 		/*
141 		 * Obtain vnode and make sure it is of appropriate type
142 		 */
143 		if (fp->f_type != DTYPE_VNODE)
144 			return(EINVAL);
145 		vp = (struct vnode *)fp->f_data;
146 		if (vp->v_type != VREG && vp->v_type != VCHR)
147 			return(EINVAL);
148 		/*
149 		 * Ensure that file protection and desired protection
150 		 * are compatible.  Note that we only worry about writability
151 		 * if mapping is shared.
152 		 */
153 		if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 ||
154 		    ((uap->flags & MAP_SHARED) &&
155 		     (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0))
156 			return(EACCES);
157 		handle = (caddr_t)vp;
158 	} else if (uap->fd != -1)
159 		handle = (caddr_t)fp;
160 	else
161 		handle = NULL;
162 	/*
163 	 * Map protections to MACH style
164 	 */
165 	prot = VM_PROT_NONE;
166 	if (uap->prot & PROT_READ)
167 		prot |= VM_PROT_READ;
168 	if (uap->prot & PROT_WRITE)
169 		prot |= VM_PROT_WRITE;
170 	if (uap->prot & PROT_EXEC)
171 		prot |= VM_PROT_EXECUTE;
172 
173 	error = vm_mmap(p->p_map, &addr, size, prot,
174 			uap->flags, handle, (vm_offset_t)uap->pos);
175 	if (error == 0)
176 		*retval = (int) addr;
177 	return(error);
178 }
179 
180 msync(p, uap, retval)
181 	struct proc *p;
182 	struct args {
183 		char	*addr;
184 		int	len;
185 	} *uap;
186 	int *retval;
187 {
188 	vm_offset_t addr, objoff, oaddr;
189 	vm_size_t size, osize;
190 	vm_prot_t prot, mprot;
191 	vm_inherit_t inherit;
192 	vm_object_t object;
193 	boolean_t shared;
194 	int rv;
195 
196 #ifdef DEBUG
197 	if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
198 		printf("msync(%d): addr %x len %x\n",
199 		       p->p_pid, uap->addr, uap->len);
200 #endif
201 	if (((int)uap->addr & page_mask) || (uap->len & page_mask))
202 		return(EINVAL);
203 	addr = oaddr = (vm_offset_t)uap->addr;
204 	osize = (vm_size_t)uap->len;
205 	/*
206 	 * Region must be entirely contained in a single entry
207 	 */
208 	if (!vm_map_is_allocated(p->p_map, addr, addr+osize, TRUE))
209 		return(EINVAL);
210 	/*
211 	 * Determine the object associated with that entry
212 	 * (object is returned locked on KERN_SUCCESS)
213 	 */
214 	rv = vm_region(p->p_map, &addr, &size, &prot, &mprot,
215 		       &inherit, &shared, &object, &objoff);
216 	if (rv != KERN_SUCCESS)
217 		return(EINVAL);
218 #ifdef DEBUG
219 	if (mmapdebug & MDB_SYNC)
220 		printf("msync: region: object %x addr %x size %d objoff %d\n",
221 		       object, addr, size, objoff);
222 #endif
223 	/*
224 	 * Do not msync non-vnoded backed objects.
225 	 */
226 	if (object->internal || object->pager == vm_pager_null ||
227 	    object->pager->pg_type != PG_VNODE) {
228 		vm_object_unlock(object);
229 		return(EINVAL);
230 	}
231 	objoff += oaddr - addr;
232 	if (osize == 0)
233 		osize = size;
234 #ifdef DEBUG
235 	if (mmapdebug & MDB_SYNC)
236 		printf("msync: cleaning/flushing object range [%x-%x)\n",
237 		       objoff, objoff+osize);
238 #endif
239 	if (prot & VM_PROT_WRITE)
240 		vm_object_page_clean(object, objoff, objoff+osize);
241 	/*
242 	 * (XXX)
243 	 * Bummer, gotta flush all cached pages to ensure
244 	 * consistency with the file system cache.
245 	 */
246 	vm_object_page_remove(object, objoff, objoff+osize);
247 	vm_object_unlock(object);
248 	return(0);
249 }
250 
251 munmap(p, uap, retval)
252 	register struct proc *p;
253 	register struct args {
254 		caddr_t	addr;
255 		int	len;
256 	} *uap;
257 	int *retval;
258 {
259 	vm_offset_t addr;
260 	vm_size_t size;
261 
262 #ifdef DEBUG
263 	if (mmapdebug & MDB_FOLLOW)
264 		printf("munmap(%d): addr %x len %x\n",
265 		       p->p_pid, uap->addr, uap->len);
266 #endif
267 
268 	addr = (vm_offset_t) uap->addr;
269 	size = (vm_size_t) uap->len;
270 	if ((addr & page_mask) || (size & page_mask))
271 		return(EINVAL);
272 	if (size == 0)
273 		return(0);
274 	if (!vm_map_is_allocated(p->p_map, addr, addr+size, FALSE))
275 		return(EINVAL);
276 	/* returns nothing but KERN_SUCCESS anyway */
277 	(void) vm_map_remove(p->p_map, addr, addr+size);
278 	return(0);
279 }
280 
281 munmapfd(fd)
282 {
283 #ifdef DEBUG
284 	if (mmapdebug & MDB_FOLLOW)
285 		printf("munmapfd(%d): fd %d\n", u.u_procp->p_pid, fd);
286 #endif
287 
288 	/*
289 	 * XXX -- should vm_deallocate any regions mapped to this file
290 	 */
291 	OFILEFLAGS(u.u_procp->p_fd, fd) &= ~UF_MAPPED;
292 }
293 
294 mprotect(p, uap, retval)
295 	struct proc *p;
296 	struct args {
297 		char	*addr;
298 		int	len;
299 		int	prot;
300 	} *uap;
301 	int *retval;
302 {
303 	vm_offset_t addr;
304 	vm_size_t size;
305 	register vm_prot_t prot;
306 
307 #ifdef DEBUG
308 	if (mmapdebug & MDB_FOLLOW)
309 		printf("mprotect(%d): addr %x len %x prot %d\n",
310 		       p->p_pid, uap->addr, uap->len, uap->prot);
311 #endif
312 
313 	addr = (vm_offset_t) uap->addr;
314 	size = (vm_size_t) uap->len;
315 	if ((addr & page_mask) || (size & page_mask))
316 		return(EINVAL);
317 	/*
318 	 * Map protections
319 	 */
320 	prot = VM_PROT_NONE;
321 	if (uap->prot & PROT_READ)
322 		prot |= VM_PROT_READ;
323 	if (uap->prot & PROT_WRITE)
324 		prot |= VM_PROT_WRITE;
325 	if (uap->prot & PROT_EXEC)
326 		prot |= VM_PROT_EXECUTE;
327 
328 	switch (vm_map_protect(p->p_map, addr, addr+size, prot, FALSE)) {
329 	case KERN_SUCCESS:
330 		return (0);
331 	case KERN_PROTECTION_FAILURE:
332 		return (EACCES);
333 	}
334 	return (EINVAL);
335 }
336 
337 /* ARGSUSED */
338 madvise(p, uap, retval)
339 	struct proc *p;
340 	struct args {
341 		char	*addr;
342 		int	len;
343 		int	behav;
344 	} *uap;
345 	int *retval;
346 {
347 
348 	/* Not yet implemented */
349 	return (EOPNOTSUPP);
350 }
351 
352 /* ARGSUSED */
353 mincore(p, uap, retval)
354 	struct proc *p;
355 	struct args {
356 		char	*addr;
357 		int	len;
358 		char	*vec;
359 	} *uap;
360 	int *retval;
361 {
362 
363 	/* Not yet implemented */
364 	return (EOPNOTSUPP);
365 }
366 
367 /*
368  * Internal version of mmap.
369  * Currently used by mmap, exec, and sys5 shared memory.
370  * Handle is:
371  *	MAP_FILE: a vnode pointer
372  *	MAP_ANON: NULL or a file pointer
373  */
374 vm_mmap(map, addr, size, prot, flags, handle, foff)
375 	register vm_map_t map;
376 	register vm_offset_t *addr;
377 	register vm_size_t size;
378 	vm_prot_t prot;
379 	register int flags;
380 	caddr_t handle;		/* XXX should be vp */
381 	vm_offset_t foff;
382 {
383 	register vm_pager_t pager;
384 	boolean_t fitit;
385 	vm_object_t object;
386 	struct vnode *vp;
387 	int type;
388 	int rv = KERN_SUCCESS;
389 
390 	if (size == 0)
391 		return (0);
392 
393 	if ((flags & MAP_FIXED) == 0) {
394 		fitit = TRUE;
395 		*addr = round_page(*addr);
396 	} else {
397 		fitit = FALSE;
398 		(void) vm_deallocate(map, *addr, size);
399 	}
400 
401 	/*
402 	 * Lookup/allocate pager.  All except an unnamed anonymous lookup
403 	 * gain a reference to ensure continued existance of the object.
404 	 * (XXX the exception is to appease the pageout daemon)
405 	 */
406 	if ((flags & MAP_TYPE) == MAP_ANON)
407 		type = PG_DFLT;
408 	else {
409 		vp = (struct vnode *)handle;
410 		if (vp->v_type == VCHR) {
411 			type = PG_DEVICE;
412 			handle = (caddr_t)vp->v_rdev;
413 		} else
414 			type = PG_VNODE;
415 	}
416 	pager = vm_pager_allocate(type, handle, size, prot);
417 	if (pager == VM_PAGER_NULL)
418 		return (type == PG_DEVICE ? EINVAL : ENOMEM);
419 	/*
420 	 * Find object and release extra reference gained by lookup
421 	 */
422 	object = vm_object_lookup(pager);
423 	vm_object_deallocate(object);
424 
425 	/*
426 	 * Anonymous memory.
427 	 */
428 	if ((flags & MAP_TYPE) == MAP_ANON) {
429 		rv = vm_allocate_with_pager(map, addr, size, fitit,
430 					    pager, (vm_offset_t)foff, TRUE);
431 		if (rv != KERN_SUCCESS) {
432 			if (handle == NULL)
433 				vm_pager_deallocate(pager);
434 			else
435 				vm_object_deallocate(object);
436 			goto out;
437 		}
438 		/*
439 		 * Don't cache anonymous objects.
440 		 * Loses the reference gained by vm_pager_allocate.
441 		 */
442 		(void) pager_cache(object, FALSE);
443 #ifdef DEBUG
444 		if (mmapdebug & MDB_MAPIT)
445 			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
446 			       u.u_procp->p_pid, *addr, size, pager);
447 #endif
448 	}
449 	/*
450 	 * Must be type MAP_FILE.
451 	 * Distinguish between character special and regular files.
452 	 */
453 	else if (vp->v_type == VCHR) {
454 		rv = vm_allocate_with_pager(map, addr, size, fitit,
455 					    pager, (vm_offset_t)foff, FALSE);
456 		/*
457 		 * Uncache the object and lose the reference gained
458 		 * by vm_pager_allocate().  If the call to
459 		 * vm_allocate_with_pager() was sucessful, then we
460 		 * gained an additional reference ensuring the object
461 		 * will continue to exist.  If the call failed then
462 		 * the deallocate call below will terminate the
463 		 * object which is fine.
464 		 */
465 		(void) pager_cache(object, FALSE);
466 		if (rv != KERN_SUCCESS)
467 			goto out;
468 	}
469 	/*
470 	 * A regular file
471 	 */
472 	else {
473 #ifdef DEBUG
474 		if (object == VM_OBJECT_NULL)
475 			printf("vm_mmap: no object: vp %x, pager %x\n",
476 			       vp, pager);
477 #endif
478 		/*
479 		 * Map it directly.
480 		 * Allows modifications to go out to the vnode.
481 		 */
482 		if (flags & MAP_SHARED) {
483 			rv = vm_allocate_with_pager(map, addr, size,
484 						    fitit, pager,
485 						    (vm_offset_t)foff, FALSE);
486 			if (rv != KERN_SUCCESS) {
487 				vm_object_deallocate(object);
488 				goto out;
489 			}
490 			/*
491 			 * Don't cache the object.  This is the easiest way
492 			 * of ensuring that data gets back to the filesystem
493 			 * because vnode_pager_deallocate() will fsync the
494 			 * vnode.  pager_cache() will lose the extra ref.
495 			 */
496 			if (prot & VM_PROT_WRITE)
497 				pager_cache(object, FALSE);
498 			else
499 				vm_object_deallocate(object);
500 		}
501 		/*
502 		 * Copy-on-write of file.  Two flavors.
503 		 * MAP_COPY is true COW, you essentially get a snapshot of
504 		 * the region at the time of mapping.  MAP_PRIVATE means only
505 		 * that your changes are not reflected back to the object.
506 		 * Changes made by others will be seen.
507 		 */
508 		else {
509 			vm_map_t tmap;
510 			vm_offset_t off;
511 
512 			/* locate and allocate the target address space */
513 			rv = vm_map_find(map, VM_OBJECT_NULL, (vm_offset_t)0,
514 					 addr, size, fitit);
515 			if (rv != KERN_SUCCESS) {
516 				vm_object_deallocate(object);
517 				goto out;
518 			}
519 			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
520 					     VM_MIN_ADDRESS+size, TRUE);
521 			off = VM_MIN_ADDRESS;
522 			rv = vm_allocate_with_pager(tmap, &off, size,
523 						    TRUE, pager,
524 						    (vm_offset_t)foff, FALSE);
525 			if (rv != KERN_SUCCESS) {
526 				vm_object_deallocate(object);
527 				vm_map_deallocate(tmap);
528 				goto out;
529 			}
530 			/*
531 			 * (XXX)
532 			 * MAP_PRIVATE implies that we see changes made by
533 			 * others.  To ensure that we need to guarentee that
534 			 * no copy object is created (otherwise original
535 			 * pages would be pushed to the copy object and we
536 			 * would never see changes made by others).  We
537 			 * totally sleeze it right now by marking the object
538 			 * internal temporarily.
539 			 */
540 			if ((flags & MAP_COPY) == 0)
541 				object->internal = TRUE;
542 			rv = vm_map_copy(map, tmap, *addr, size, off,
543 					 FALSE, FALSE);
544 			object->internal = FALSE;
545 			/*
546 			 * (XXX)
547 			 * My oh my, this only gets worse...
548 			 * Force creation of a shadow object so that
549 			 * vm_map_fork will do the right thing.
550 			 */
551 			if ((flags & MAP_COPY) == 0) {
552 				vm_map_t tmap;
553 				vm_map_entry_t tentry;
554 				vm_object_t tobject;
555 				vm_offset_t toffset;
556 				vm_prot_t tprot;
557 				boolean_t twired, tsu;
558 
559 				tmap = map;
560 				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
561 					      &tentry, &tobject, &toffset,
562 					      &tprot, &twired, &tsu);
563 				vm_map_lookup_done(tmap, tentry);
564 			}
565 			/*
566 			 * (XXX)
567 			 * Map copy code cannot detect sharing unless a
568 			 * sharing map is involved.  So we cheat and write
569 			 * protect everything ourselves.  Note we cannot
570 			 * use vm_object_pmap_copy() because that relies
571 			 * on the page copy_on_write bit which isn't
572 			 * always accurate with shared objects.
573 			 */
574 			vm_object_pmap_force_copy(object, (vm_offset_t)foff,
575 					    (vm_offset_t)foff+size);
576 			vm_object_deallocate(object);
577 			vm_map_deallocate(tmap);
578 			if (rv != KERN_SUCCESS)
579 				goto out;
580 		}
581 #ifdef DEBUG
582 		if (mmapdebug & MDB_MAPIT)
583 			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
584 			       u.u_procp->p_pid, *addr, size, pager);
585 #endif
586 	}
587 	/*
588 	 * Correct protection (default is VM_PROT_ALL).
589 	 * Note that we set the maximum protection.  This may not be
590 	 * entirely correct.  Maybe the maximum protection should be based
591 	 * on the object permissions where it makes sense (e.g. a vnode).
592 	 *
593 	 * Changed my mind: leave max prot at VM_PROT_ALL.
594 	 */
595 	if (prot != VM_PROT_ALL) {
596 		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
597 		if (rv != KERN_SUCCESS) {
598 			(void) vm_deallocate(map, *addr, size);
599 			goto out;
600 		}
601 	}
602 	/*
603 	 * Shared memory is also shared with children.
604 	 */
605 	if (flags & MAP_SHARED) {
606 		rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE);
607 		if (rv != KERN_SUCCESS) {
608 			(void) vm_deallocate(map, *addr, size);
609 			goto out;
610 		}
611 	}
612 out:
613 #ifdef DEBUG
614 	if (mmapdebug & MDB_MAPIT)
615 		printf("vm_mmap: rv %d\n", rv);
616 #endif
617 	switch (rv) {
618 	case KERN_SUCCESS:
619 		return (0);
620 	case KERN_INVALID_ADDRESS:
621 	case KERN_NO_SPACE:
622 		return (ENOMEM);
623 	case KERN_PROTECTION_FAILURE:
624 		return (EACCES);
625 	default:
626 		return (EINVAL);
627 	}
628 }
629 
630 /*
631  * Internal bastardized version of MACHs vm_region system call.
632  * Given address and size it returns map attributes as well
633  * as the (locked) object mapped at that location.
634  */
635 vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff)
636 	vm_map_t	map;
637 	vm_offset_t	*addr;		/* IN/OUT */
638 	vm_size_t	*size;		/* OUT */
639 	vm_prot_t	*prot;		/* OUT */
640 	vm_prot_t	*max_prot;	/* OUT */
641 	vm_inherit_t	*inheritance;	/* OUT */
642 	boolean_t	*shared;	/* OUT */
643 	vm_object_t	*object;	/* OUT */
644 	vm_offset_t	*objoff;	/* OUT */
645 {
646 	vm_map_entry_t	tmp_entry;
647 	register
648 	vm_map_entry_t	entry;
649 	register
650 	vm_offset_t	tmp_offset;
651 	vm_offset_t	start;
652 
653 	if (map == VM_MAP_NULL)
654 		return(KERN_INVALID_ARGUMENT);
655 
656 	start = *addr;
657 
658 	vm_map_lock_read(map);
659 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
660 		if ((entry = tmp_entry->next) == &map->header) {
661 			vm_map_unlock_read(map);
662 		   	return(KERN_NO_SPACE);
663 		}
664 		start = entry->start;
665 		*addr = start;
666 	} else
667 		entry = tmp_entry;
668 
669 	*prot = entry->protection;
670 	*max_prot = entry->max_protection;
671 	*inheritance = entry->inheritance;
672 
673 	tmp_offset = entry->offset + (start - entry->start);
674 	*size = (entry->end - start);
675 
676 	if (entry->is_a_map) {
677 		register vm_map_t share_map;
678 		vm_size_t share_size;
679 
680 		share_map = entry->object.share_map;
681 
682 		vm_map_lock_read(share_map);
683 		(void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry);
684 
685 		if ((share_size = (tmp_entry->end - tmp_offset)) < *size)
686 			*size = share_size;
687 
688 		vm_object_lock(tmp_entry->object);
689 		*object = tmp_entry->object.vm_object;
690 		*objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start);
691 
692 		*shared = (share_map->ref_count != 1);
693 		vm_map_unlock_read(share_map);
694 	} else {
695 		vm_object_lock(entry->object);
696 		*object = entry->object.vm_object;
697 		*objoff = tmp_offset;
698 
699 		*shared = FALSE;
700 	}
701 
702 	vm_map_unlock_read(map);
703 
704 	return(KERN_SUCCESS);
705 }
706 
707 /*
708  * Yet another bastard routine.
709  */
710 vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
711 	register vm_map_t	map;
712 	register vm_offset_t	*addr;
713 	register vm_size_t	size;
714 	boolean_t		fitit;
715 	vm_pager_t		pager;
716 	vm_offset_t		poffset;
717 	boolean_t		internal;
718 {
719 	register vm_object_t	object;
720 	register int		result;
721 
722 	if (map == VM_MAP_NULL)
723 		return(KERN_INVALID_ARGUMENT);
724 
725 	*addr = trunc_page(*addr);
726 	size = round_page(size);
727 
728 	/*
729 	 *	Lookup the pager/paging-space in the object cache.
730 	 *	If it's not there, then create a new object and cache
731 	 *	it.
732 	 */
733 	object = vm_object_lookup(pager);
734 	vm_stat.lookups++;
735 	if (object == VM_OBJECT_NULL) {
736 		object = vm_object_allocate(size);
737 		vm_object_enter(object, pager);
738 	} else
739 		vm_stat.hits++;
740 	object->internal = internal;
741 
742 	result = vm_map_find(map, object, poffset, addr, size, fitit);
743 	if (result != KERN_SUCCESS)
744 		vm_object_deallocate(object);
745 	else if (pager != vm_pager_null)
746 		vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
747 	return(result);
748 }
749 
750 /*
751  * XXX: this routine belongs in vm_map.c.
752  *
753  * Returns TRUE if the range [start - end) is allocated in either
754  * a single entry (single_entry == TRUE) or multiple contiguous
755  * entries (single_entry == FALSE).
756  *
757  * start and end should be page aligned.
758  */
759 boolean_t
760 vm_map_is_allocated(map, start, end, single_entry)
761 	vm_map_t map;
762 	vm_offset_t start, end;
763 	boolean_t single_entry;
764 {
765 	vm_map_entry_t mapent;
766 	register vm_offset_t nend;
767 
768 	vm_map_lock_read(map);
769 
770 	/*
771 	 * Start address not in any entry
772 	 */
773 	if (!vm_map_lookup_entry(map, start, &mapent)) {
774 		vm_map_unlock_read(map);
775 		return (FALSE);
776 	}
777 	/*
778 	 * Find the maximum stretch of contiguously allocated space
779 	 */
780 	nend = mapent->end;
781 	if (!single_entry) {
782 		mapent = mapent->next;
783 		while (mapent != &map->header && mapent->start == nend) {
784 			nend = mapent->end;
785 			mapent = mapent->next;
786 		}
787 	}
788 
789 	vm_map_unlock_read(map);
790 	return (end <= nend);
791 }
792 
793 #include "../vm/vm_page.h"
794 
795 /*
796  * Doesn't trust the COW bit in the page structure.
797  * vm_fault can improperly set it.
798  */
799 void
800 vm_object_pmap_force_copy(object, start, end)
801 	register vm_object_t	object;
802 	register vm_offset_t	start;
803 	register vm_offset_t	end;
804 {
805 	register vm_page_t	p;
806 
807 	if (object == VM_OBJECT_NULL)
808 		return;
809 
810 	vm_object_lock(object);
811 	p = (vm_page_t) queue_first(&object->memq);
812 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
813 		if (start <= p->offset && p->offset < end) {
814 			pmap_copy_on_write(VM_PAGE_TO_PHYS(p));
815 			p->copy_on_write = TRUE;
816 		}
817 		p = (vm_page_t) queue_next(&p->listq);
818 	}
819 	vm_object_unlock(object);
820 }
821