1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * %sccs.include.redist.c%
9 *
10 * @(#)vm_map.c 8.9 (Berkeley) 05/17/95
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
37 */
38
39 /*
40 * Virtual memory mapping module.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49
50 /*
51 * Virtual memory maps provide for the mapping, protection,
52 * and sharing of virtual memory objects. In addition,
53 * this module provides for an efficient virtual copy of
54 * memory from one map to another.
55 *
56 * Synchronization is required prior to most operations.
57 *
58 * Maps consist of an ordered doubly-linked list of simple
59 * entries; a single hint is used to speed up lookups.
60 *
61 * In order to properly represent the sharing of virtual
62 * memory regions among maps, the map structure is bi-level.
63 * Top-level ("address") maps refer to regions of sharable
64 * virtual memory. These regions are implemented as
65 * ("sharing") maps, which then refer to the actual virtual
66 * memory objects. When two address maps "share" memory,
67 * their top-level maps both have references to the same
68 * sharing map. When memory is virtual-copied from one
69 * address map to another, the references in the sharing
70 * maps are actually copied -- no copying occurs at the
71 * virtual memory object level.
72 *
73 * Since portions of maps are specified by start/end addreses,
74 * which may not align with existing map entries, all
75 * routines merely "clip" entries to these start/end values.
76 * [That is, an entry is split into two, bordering at a
77 * start or end value.] Note that these clippings may not
78 * always be necessary (as the two resulting entries are then
79 * not changed); however, the clipping is done for convenience.
80 * No attempt is currently made to "glue back together" two
81 * abutting entries.
82 *
83 * As mentioned above, virtual copy operations are performed
84 * by copying VM object references from one sharing map to
85 * another, and then marking both regions as copy-on-write.
86 * It is important to note that only one writeable reference
87 * to a VM object region exists in any map -- this means that
88 * shadow object creation can be delayed until a write operation
89 * occurs.
90 */
91
92 /*
93 * vm_map_startup:
94 *
95 * Initialize the vm_map module. Must be called before
96 * any other vm_map routines.
97 *
98 * Map and entry structures are allocated from the general
99 * purpose memory pool with some exceptions:
100 *
101 * - The kernel map and kmem submap are allocated statically.
102 * - Kernel map entries are allocated out of a static pool.
103 *
104 * These restrictions are necessary since malloc() uses the
105 * maps and requires map entries.
106 */
107
108 vm_offset_t kentry_data;
109 vm_size_t kentry_data_size;
110 vm_map_entry_t kentry_free;
111 vm_map_t kmap_free;
112
113 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
114 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
115
116 void
vm_map_startup()117 vm_map_startup()
118 {
119 register int i;
120 register vm_map_entry_t mep;
121 vm_map_t mp;
122
123 /*
124 * Static map structures for allocation before initialization of
125 * kernel map or kmem map. vm_map_create knows how to deal with them.
126 */
127 kmap_free = mp = (vm_map_t) kentry_data;
128 i = MAX_KMAP;
129 while (--i > 0) {
130 mp->header.next = (vm_map_entry_t) (mp + 1);
131 mp++;
132 }
133 mp++->header.next = NULL;
134
135 /*
136 * Form a free list of statically allocated kernel map entries
137 * with the rest.
138 */
139 kentry_free = mep = (vm_map_entry_t) mp;
140 i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
141 while (--i > 0) {
142 mep->next = mep + 1;
143 mep++;
144 }
145 mep->next = NULL;
146 }
147
148 /*
149 * Allocate a vmspace structure, including a vm_map and pmap,
150 * and initialize those structures. The refcnt is set to 1.
151 * The remaining fields must be initialized by the caller.
152 */
153 struct vmspace *
vmspace_alloc(min,max,pageable)154 vmspace_alloc(min, max, pageable)
155 vm_offset_t min, max;
156 int pageable;
157 {
158 register struct vmspace *vm;
159
160 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
161 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
162 vm_map_init(&vm->vm_map, min, max, pageable);
163 pmap_pinit(&vm->vm_pmap);
164 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
165 vm->vm_refcnt = 1;
166 return (vm);
167 }
168
169 void
vmspace_free(vm)170 vmspace_free(vm)
171 register struct vmspace *vm;
172 {
173
174 if (--vm->vm_refcnt == 0) {
175 /*
176 * Lock the map, to wait out all other references to it.
177 * Delete all of the mappings and pages they hold,
178 * then call the pmap module to reclaim anything left.
179 */
180 vm_map_lock(&vm->vm_map);
181 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
182 vm->vm_map.max_offset);
183 pmap_release(&vm->vm_pmap);
184 FREE(vm, M_VMMAP);
185 }
186 }
187
188 /*
189 * vm_map_create:
190 *
191 * Creates and returns a new empty VM map with
192 * the given physical map structure, and having
193 * the given lower and upper address bounds.
194 */
195 vm_map_t
vm_map_create(pmap,min,max,pageable)196 vm_map_create(pmap, min, max, pageable)
197 pmap_t pmap;
198 vm_offset_t min, max;
199 boolean_t pageable;
200 {
201 register vm_map_t result;
202 extern vm_map_t kmem_map;
203
204 if (kmem_map == NULL) {
205 result = kmap_free;
206 if (result == NULL)
207 panic("vm_map_create: out of maps");
208 kmap_free = (vm_map_t) result->header.next;
209 } else
210 MALLOC(result, vm_map_t, sizeof(struct vm_map),
211 M_VMMAP, M_WAITOK);
212
213 vm_map_init(result, min, max, pageable);
214 result->pmap = pmap;
215 return(result);
216 }
217
218 /*
219 * Initialize an existing vm_map structure
220 * such as that in the vmspace structure.
221 * The pmap is set elsewhere.
222 */
223 void
vm_map_init(map,min,max,pageable)224 vm_map_init(map, min, max, pageable)
225 register struct vm_map *map;
226 vm_offset_t min, max;
227 boolean_t pageable;
228 {
229 map->header.next = map->header.prev = &map->header;
230 map->nentries = 0;
231 map->size = 0;
232 map->ref_count = 1;
233 map->is_main_map = TRUE;
234 map->min_offset = min;
235 map->max_offset = max;
236 map->entries_pageable = pageable;
237 map->first_free = &map->header;
238 map->hint = &map->header;
239 map->timestamp = 0;
240 lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
241 simple_lock_init(&map->ref_lock);
242 simple_lock_init(&map->hint_lock);
243 }
244
245 /*
246 * vm_map_entry_create: [ internal use only ]
247 *
248 * Allocates a VM map entry for insertion.
249 * No entry fields are filled in. This routine is
250 */
251 vm_map_entry_t
vm_map_entry_create(map)252 vm_map_entry_create(map)
253 vm_map_t map;
254 {
255 vm_map_entry_t entry;
256 #ifdef DEBUG
257 extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
258 boolean_t isspecial;
259
260 isspecial = (map == kernel_map || map == kmem_map ||
261 map == mb_map || map == pager_map);
262 if (isspecial && map->entries_pageable ||
263 !isspecial && !map->entries_pageable)
264 panic("vm_map_entry_create: bogus map");
265 #endif
266 if (map->entries_pageable) {
267 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
268 M_VMMAPENT, M_WAITOK);
269 } else {
270 if (entry = kentry_free)
271 kentry_free = kentry_free->next;
272 }
273 if (entry == NULL)
274 panic("vm_map_entry_create: out of map entries");
275
276 return(entry);
277 }
278
279 /*
280 * vm_map_entry_dispose: [ internal use only ]
281 *
282 * Inverse of vm_map_entry_create.
283 */
284 void
vm_map_entry_dispose(map,entry)285 vm_map_entry_dispose(map, entry)
286 vm_map_t map;
287 vm_map_entry_t entry;
288 {
289 #ifdef DEBUG
290 extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
291 boolean_t isspecial;
292
293 isspecial = (map == kernel_map || map == kmem_map ||
294 map == mb_map || map == pager_map);
295 if (isspecial && map->entries_pageable ||
296 !isspecial && !map->entries_pageable)
297 panic("vm_map_entry_dispose: bogus map");
298 #endif
299 if (map->entries_pageable) {
300 FREE(entry, M_VMMAPENT);
301 } else {
302 entry->next = kentry_free;
303 kentry_free = entry;
304 }
305 }
306
307 /*
308 * vm_map_entry_{un,}link:
309 *
310 * Insert/remove entries from maps.
311 */
312 #define vm_map_entry_link(map, after_where, entry) \
313 { \
314 (map)->nentries++; \
315 (entry)->prev = (after_where); \
316 (entry)->next = (after_where)->next; \
317 (entry)->prev->next = (entry); \
318 (entry)->next->prev = (entry); \
319 }
320 #define vm_map_entry_unlink(map, entry) \
321 { \
322 (map)->nentries--; \
323 (entry)->next->prev = (entry)->prev; \
324 (entry)->prev->next = (entry)->next; \
325 }
326
327 /*
328 * vm_map_reference:
329 *
330 * Creates another valid reference to the given map.
331 *
332 */
333 void
vm_map_reference(map)334 vm_map_reference(map)
335 register vm_map_t map;
336 {
337 if (map == NULL)
338 return;
339
340 simple_lock(&map->ref_lock);
341 #ifdef DEBUG
342 if (map->ref_count == 0)
343 panic("vm_map_reference: zero ref_count");
344 #endif
345 map->ref_count++;
346 simple_unlock(&map->ref_lock);
347 }
348
349 /*
350 * vm_map_deallocate:
351 *
352 * Removes a reference from the specified map,
353 * destroying it if no references remain.
354 * The map should not be locked.
355 */
356 void
vm_map_deallocate(map)357 vm_map_deallocate(map)
358 register vm_map_t map;
359 {
360
361 if (map == NULL)
362 return;
363
364 simple_lock(&map->ref_lock);
365 if (--map->ref_count > 0) {
366 simple_unlock(&map->ref_lock);
367 return;
368 }
369
370 /*
371 * Lock the map, to wait out all other references
372 * to it.
373 */
374
375 vm_map_lock_drain_interlock(map);
376
377 (void) vm_map_delete(map, map->min_offset, map->max_offset);
378
379 pmap_destroy(map->pmap);
380
381 vm_map_unlock(map);
382
383 FREE(map, M_VMMAP);
384 }
385
386 /*
387 * vm_map_insert:
388 *
389 * Inserts the given whole VM object into the target
390 * map at the specified address range. The object's
391 * size should match that of the address range.
392 *
393 * Requires that the map be locked, and leaves it so.
394 */
395 int
vm_map_insert(map,object,offset,start,end)396 vm_map_insert(map, object, offset, start, end)
397 vm_map_t map;
398 vm_object_t object;
399 vm_offset_t offset;
400 vm_offset_t start;
401 vm_offset_t end;
402 {
403 register vm_map_entry_t new_entry;
404 register vm_map_entry_t prev_entry;
405 vm_map_entry_t temp_entry;
406
407 /*
408 * Check that the start and end points are not bogus.
409 */
410
411 if ((start < map->min_offset) || (end > map->max_offset) ||
412 (start >= end))
413 return(KERN_INVALID_ADDRESS);
414
415 /*
416 * Find the entry prior to the proposed
417 * starting address; if it's part of an
418 * existing entry, this range is bogus.
419 */
420
421 if (vm_map_lookup_entry(map, start, &temp_entry))
422 return(KERN_NO_SPACE);
423
424 prev_entry = temp_entry;
425
426 /*
427 * Assert that the next entry doesn't overlap the
428 * end point.
429 */
430
431 if ((prev_entry->next != &map->header) &&
432 (prev_entry->next->start < end))
433 return(KERN_NO_SPACE);
434
435 /*
436 * See if we can avoid creating a new entry by
437 * extending one of our neighbors.
438 */
439
440 if (object == NULL) {
441 if ((prev_entry != &map->header) &&
442 (prev_entry->end == start) &&
443 (map->is_main_map) &&
444 (prev_entry->is_a_map == FALSE) &&
445 (prev_entry->is_sub_map == FALSE) &&
446 (prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
447 (prev_entry->protection == VM_PROT_DEFAULT) &&
448 (prev_entry->max_protection == VM_PROT_DEFAULT) &&
449 (prev_entry->wired_count == 0)) {
450
451 if (vm_object_coalesce(prev_entry->object.vm_object,
452 NULL,
453 prev_entry->offset,
454 (vm_offset_t) 0,
455 (vm_size_t)(prev_entry->end
456 - prev_entry->start),
457 (vm_size_t)(end - prev_entry->end))) {
458 /*
459 * Coalesced the two objects - can extend
460 * the previous map entry to include the
461 * new range.
462 */
463 map->size += (end - prev_entry->end);
464 prev_entry->end = end;
465 return(KERN_SUCCESS);
466 }
467 }
468 }
469
470 /*
471 * Create a new entry
472 */
473
474 new_entry = vm_map_entry_create(map);
475 new_entry->start = start;
476 new_entry->end = end;
477
478 new_entry->is_a_map = FALSE;
479 new_entry->is_sub_map = FALSE;
480 new_entry->object.vm_object = object;
481 new_entry->offset = offset;
482
483 new_entry->copy_on_write = FALSE;
484 new_entry->needs_copy = FALSE;
485
486 if (map->is_main_map) {
487 new_entry->inheritance = VM_INHERIT_DEFAULT;
488 new_entry->protection = VM_PROT_DEFAULT;
489 new_entry->max_protection = VM_PROT_DEFAULT;
490 new_entry->wired_count = 0;
491 }
492
493 /*
494 * Insert the new entry into the list
495 */
496
497 vm_map_entry_link(map, prev_entry, new_entry);
498 map->size += new_entry->end - new_entry->start;
499
500 /*
501 * Update the free space hint
502 */
503
504 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
505 map->first_free = new_entry;
506
507 return(KERN_SUCCESS);
508 }
509
510 /*
511 * SAVE_HINT:
512 *
513 * Saves the specified entry as the hint for
514 * future lookups. Performs necessary interlocks.
515 */
516 #define SAVE_HINT(map,value) \
517 simple_lock(&(map)->hint_lock); \
518 (map)->hint = (value); \
519 simple_unlock(&(map)->hint_lock);
520
521 /*
522 * vm_map_lookup_entry: [ internal use only ]
523 *
524 * Finds the map entry containing (or
525 * immediately preceding) the specified address
526 * in the given map; the entry is returned
527 * in the "entry" parameter. The boolean
528 * result indicates whether the address is
529 * actually contained in the map.
530 */
531 boolean_t
vm_map_lookup_entry(map,address,entry)532 vm_map_lookup_entry(map, address, entry)
533 register vm_map_t map;
534 register vm_offset_t address;
535 vm_map_entry_t *entry; /* OUT */
536 {
537 register vm_map_entry_t cur;
538 register vm_map_entry_t last;
539
540 /*
541 * Start looking either from the head of the
542 * list, or from the hint.
543 */
544
545 simple_lock(&map->hint_lock);
546 cur = map->hint;
547 simple_unlock(&map->hint_lock);
548
549 if (cur == &map->header)
550 cur = cur->next;
551
552 if (address >= cur->start) {
553 /*
554 * Go from hint to end of list.
555 *
556 * But first, make a quick check to see if
557 * we are already looking at the entry we
558 * want (which is usually the case).
559 * Note also that we don't need to save the hint
560 * here... it is the same hint (unless we are
561 * at the header, in which case the hint didn't
562 * buy us anything anyway).
563 */
564 last = &map->header;
565 if ((cur != last) && (cur->end > address)) {
566 *entry = cur;
567 return(TRUE);
568 }
569 }
570 else {
571 /*
572 * Go from start to hint, *inclusively*
573 */
574 last = cur->next;
575 cur = map->header.next;
576 }
577
578 /*
579 * Search linearly
580 */
581
582 while (cur != last) {
583 if (cur->end > address) {
584 if (address >= cur->start) {
585 /*
586 * Save this lookup for future
587 * hints, and return
588 */
589
590 *entry = cur;
591 SAVE_HINT(map, cur);
592 return(TRUE);
593 }
594 break;
595 }
596 cur = cur->next;
597 }
598 *entry = cur->prev;
599 SAVE_HINT(map, *entry);
600 return(FALSE);
601 }
602
603 /*
604 * Find sufficient space for `length' bytes in the given map, starting at
605 * `start'. The map must be locked. Returns 0 on success, 1 on no space.
606 */
607 int
vm_map_findspace(map,start,length,addr)608 vm_map_findspace(map, start, length, addr)
609 register vm_map_t map;
610 register vm_offset_t start;
611 vm_size_t length;
612 vm_offset_t *addr;
613 {
614 register vm_map_entry_t entry, next;
615 register vm_offset_t end;
616
617 if (start < map->min_offset)
618 start = map->min_offset;
619 if (start > map->max_offset)
620 return (1);
621
622 /*
623 * Look for the first possible address; if there's already
624 * something at this address, we have to start after it.
625 */
626 if (start == map->min_offset) {
627 if ((entry = map->first_free) != &map->header)
628 start = entry->end;
629 } else {
630 vm_map_entry_t tmp;
631 if (vm_map_lookup_entry(map, start, &tmp))
632 start = tmp->end;
633 entry = tmp;
634 }
635
636 /*
637 * Look through the rest of the map, trying to fit a new region in
638 * the gap between existing regions, or after the very last region.
639 */
640 for (;; start = (entry = next)->end) {
641 /*
642 * Find the end of the proposed new region. Be sure we didn't
643 * go beyond the end of the map, or wrap around the address;
644 * if so, we lose. Otherwise, if this is the last entry, or
645 * if the proposed new region fits before the next entry, we
646 * win.
647 */
648 end = start + length;
649 if (end > map->max_offset || end < start)
650 return (1);
651 next = entry->next;
652 if (next == &map->header || next->start >= end)
653 break;
654 }
655 SAVE_HINT(map, entry);
656 *addr = start;
657 return (0);
658 }
659
660 /*
661 * vm_map_find finds an unallocated region in the target address
662 * map with the given length. The search is defined to be
663 * first-fit from the specified address; the region found is
664 * returned in the same parameter.
665 *
666 */
667 int
vm_map_find(map,object,offset,addr,length,find_space)668 vm_map_find(map, object, offset, addr, length, find_space)
669 vm_map_t map;
670 vm_object_t object;
671 vm_offset_t offset;
672 vm_offset_t *addr; /* IN/OUT */
673 vm_size_t length;
674 boolean_t find_space;
675 {
676 register vm_offset_t start;
677 int result;
678
679 start = *addr;
680 vm_map_lock(map);
681 if (find_space) {
682 if (vm_map_findspace(map, start, length, addr)) {
683 vm_map_unlock(map);
684 return (KERN_NO_SPACE);
685 }
686 start = *addr;
687 }
688 result = vm_map_insert(map, object, offset, start, start + length);
689 vm_map_unlock(map);
690 return (result);
691 }
692
693 /*
694 * vm_map_simplify_entry: [ internal use only ]
695 *
696 * Simplify the given map entry by:
697 * removing extra sharing maps
698 * [XXX maybe later] merging with a neighbor
699 */
700 void
vm_map_simplify_entry(map,entry)701 vm_map_simplify_entry(map, entry)
702 vm_map_t map;
703 vm_map_entry_t entry;
704 {
705 #ifdef lint
706 map++;
707 #endif
708
709 /*
710 * If this entry corresponds to a sharing map, then
711 * see if we can remove the level of indirection.
712 * If it's not a sharing map, then it points to
713 * a VM object, so see if we can merge with either
714 * of our neighbors.
715 */
716
717 if (entry->is_sub_map)
718 return;
719 if (entry->is_a_map) {
720 #if 0
721 vm_map_t my_share_map;
722 int count;
723
724 my_share_map = entry->object.share_map;
725 simple_lock(&my_share_map->ref_lock);
726 count = my_share_map->ref_count;
727 simple_unlock(&my_share_map->ref_lock);
728
729 if (count == 1) {
730 /* Can move the region from
731 * entry->start to entry->end (+ entry->offset)
732 * in my_share_map into place of entry.
733 * Later.
734 */
735 }
736 #endif
737 }
738 else {
739 /*
740 * Try to merge with our neighbors.
741 *
742 * Conditions for merge are:
743 *
744 * 1. entries are adjacent.
745 * 2. both entries point to objects
746 * with null pagers.
747 *
748 * If a merge is possible, we replace the two
749 * entries with a single entry, then merge
750 * the two objects into a single object.
751 *
752 * Now, all that is left to do is write the
753 * code!
754 */
755 }
756 }
757
758 /*
759 * vm_map_clip_start: [ internal use only ]
760 *
761 * Asserts that the given entry begins at or after
762 * the specified address; if necessary,
763 * it splits the entry into two.
764 */
765 #define vm_map_clip_start(map, entry, startaddr) \
766 { \
767 if (startaddr > entry->start) \
768 _vm_map_clip_start(map, entry, startaddr); \
769 }
770
771 /*
772 * This routine is called only when it is known that
773 * the entry must be split.
774 */
775 static void
_vm_map_clip_start(map,entry,start)776 _vm_map_clip_start(map, entry, start)
777 register vm_map_t map;
778 register vm_map_entry_t entry;
779 register vm_offset_t start;
780 {
781 register vm_map_entry_t new_entry;
782
783 /*
784 * See if we can simplify this entry first
785 */
786
787 vm_map_simplify_entry(map, entry);
788
789 /*
790 * Split off the front portion --
791 * note that we must insert the new
792 * entry BEFORE this one, so that
793 * this entry has the specified starting
794 * address.
795 */
796
797 new_entry = vm_map_entry_create(map);
798 *new_entry = *entry;
799
800 new_entry->end = start;
801 entry->offset += (start - entry->start);
802 entry->start = start;
803
804 vm_map_entry_link(map, entry->prev, new_entry);
805
806 if (entry->is_a_map || entry->is_sub_map)
807 vm_map_reference(new_entry->object.share_map);
808 else
809 vm_object_reference(new_entry->object.vm_object);
810 }
811
812 /*
813 * vm_map_clip_end: [ internal use only ]
814 *
815 * Asserts that the given entry ends at or before
816 * the specified address; if necessary,
817 * it splits the entry into two.
818 */
819
820 #define vm_map_clip_end(map, entry, endaddr) \
821 { \
822 if (endaddr < entry->end) \
823 _vm_map_clip_end(map, entry, endaddr); \
824 }
825
826 /*
827 * This routine is called only when it is known that
828 * the entry must be split.
829 */
830 static void
_vm_map_clip_end(map,entry,end)831 _vm_map_clip_end(map, entry, end)
832 register vm_map_t map;
833 register vm_map_entry_t entry;
834 register vm_offset_t end;
835 {
836 register vm_map_entry_t new_entry;
837
838 /*
839 * Create a new entry and insert it
840 * AFTER the specified entry
841 */
842
843 new_entry = vm_map_entry_create(map);
844 *new_entry = *entry;
845
846 new_entry->start = entry->end = end;
847 new_entry->offset += (end - entry->start);
848
849 vm_map_entry_link(map, entry, new_entry);
850
851 if (entry->is_a_map || entry->is_sub_map)
852 vm_map_reference(new_entry->object.share_map);
853 else
854 vm_object_reference(new_entry->object.vm_object);
855 }
856
857 /*
858 * VM_MAP_RANGE_CHECK: [ internal use only ]
859 *
860 * Asserts that the starting and ending region
861 * addresses fall within the valid range of the map.
862 */
863 #define VM_MAP_RANGE_CHECK(map, start, end) \
864 { \
865 if (start < vm_map_min(map)) \
866 start = vm_map_min(map); \
867 if (end > vm_map_max(map)) \
868 end = vm_map_max(map); \
869 if (start > end) \
870 start = end; \
871 }
872
873 /*
874 * vm_map_submap: [ kernel use only ]
875 *
876 * Mark the given range as handled by a subordinate map.
877 *
878 * This range must have been created with vm_map_find,
879 * and no other operations may have been performed on this
880 * range prior to calling vm_map_submap.
881 *
882 * Only a limited number of operations can be performed
883 * within this rage after calling vm_map_submap:
884 * vm_fault
885 * [Don't try vm_map_copy!]
886 *
887 * To remove a submapping, one must first remove the
888 * range from the superior map, and then destroy the
889 * submap (if desired). [Better yet, don't try it.]
890 */
891 int
vm_map_submap(map,start,end,submap)892 vm_map_submap(map, start, end, submap)
893 register vm_map_t map;
894 register vm_offset_t start;
895 register vm_offset_t end;
896 vm_map_t submap;
897 {
898 vm_map_entry_t entry;
899 register int result = KERN_INVALID_ARGUMENT;
900
901 vm_map_lock(map);
902
903 VM_MAP_RANGE_CHECK(map, start, end);
904
905 if (vm_map_lookup_entry(map, start, &entry)) {
906 vm_map_clip_start(map, entry, start);
907 }
908 else
909 entry = entry->next;
910
911 vm_map_clip_end(map, entry, end);
912
913 if ((entry->start == start) && (entry->end == end) &&
914 (!entry->is_a_map) &&
915 (entry->object.vm_object == NULL) &&
916 (!entry->copy_on_write)) {
917 entry->is_a_map = FALSE;
918 entry->is_sub_map = TRUE;
919 vm_map_reference(entry->object.sub_map = submap);
920 result = KERN_SUCCESS;
921 }
922 vm_map_unlock(map);
923
924 return(result);
925 }
926
927 /*
928 * vm_map_protect:
929 *
930 * Sets the protection of the specified address
931 * region in the target map. If "set_max" is
932 * specified, the maximum protection is to be set;
933 * otherwise, only the current protection is affected.
934 */
935 int
vm_map_protect(map,start,end,new_prot,set_max)936 vm_map_protect(map, start, end, new_prot, set_max)
937 register vm_map_t map;
938 register vm_offset_t start;
939 register vm_offset_t end;
940 register vm_prot_t new_prot;
941 register boolean_t set_max;
942 {
943 register vm_map_entry_t current;
944 vm_map_entry_t entry;
945
946 vm_map_lock(map);
947
948 VM_MAP_RANGE_CHECK(map, start, end);
949
950 if (vm_map_lookup_entry(map, start, &entry)) {
951 vm_map_clip_start(map, entry, start);
952 }
953 else
954 entry = entry->next;
955
956 /*
957 * Make a first pass to check for protection
958 * violations.
959 */
960
961 current = entry;
962 while ((current != &map->header) && (current->start < end)) {
963 if (current->is_sub_map)
964 return(KERN_INVALID_ARGUMENT);
965 if ((new_prot & current->max_protection) != new_prot) {
966 vm_map_unlock(map);
967 return(KERN_PROTECTION_FAILURE);
968 }
969
970 current = current->next;
971 }
972
973 /*
974 * Go back and fix up protections.
975 * [Note that clipping is not necessary the second time.]
976 */
977
978 current = entry;
979
980 while ((current != &map->header) && (current->start < end)) {
981 vm_prot_t old_prot;
982
983 vm_map_clip_end(map, current, end);
984
985 old_prot = current->protection;
986 if (set_max)
987 current->protection =
988 (current->max_protection = new_prot) &
989 old_prot;
990 else
991 current->protection = new_prot;
992
993 /*
994 * Update physical map if necessary.
995 * Worry about copy-on-write here -- CHECK THIS XXX
996 */
997
998 if (current->protection != old_prot) {
999
1000 #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
1001 VM_PROT_ALL)
1002 #define max(a,b) ((a) > (b) ? (a) : (b))
1003
1004 if (current->is_a_map) {
1005 vm_map_entry_t share_entry;
1006 vm_offset_t share_end;
1007
1008 vm_map_lock(current->object.share_map);
1009 (void) vm_map_lookup_entry(
1010 current->object.share_map,
1011 current->offset,
1012 &share_entry);
1013 share_end = current->offset +
1014 (current->end - current->start);
1015 while ((share_entry !=
1016 ¤t->object.share_map->header) &&
1017 (share_entry->start < share_end)) {
1018
1019 pmap_protect(map->pmap,
1020 (max(share_entry->start,
1021 current->offset) -
1022 current->offset +
1023 current->start),
1024 min(share_entry->end,
1025 share_end) -
1026 current->offset +
1027 current->start,
1028 current->protection &
1029 MASK(share_entry));
1030
1031 share_entry = share_entry->next;
1032 }
1033 vm_map_unlock(current->object.share_map);
1034 }
1035 else
1036 pmap_protect(map->pmap, current->start,
1037 current->end,
1038 current->protection & MASK(entry));
1039 #undef max
1040 #undef MASK
1041 }
1042 current = current->next;
1043 }
1044
1045 vm_map_unlock(map);
1046 return(KERN_SUCCESS);
1047 }
1048
1049 /*
1050 * vm_map_inherit:
1051 *
1052 * Sets the inheritance of the specified address
1053 * range in the target map. Inheritance
1054 * affects how the map will be shared with
1055 * child maps at the time of vm_map_fork.
1056 */
1057 int
vm_map_inherit(map,start,end,new_inheritance)1058 vm_map_inherit(map, start, end, new_inheritance)
1059 register vm_map_t map;
1060 register vm_offset_t start;
1061 register vm_offset_t end;
1062 register vm_inherit_t new_inheritance;
1063 {
1064 register vm_map_entry_t entry;
1065 vm_map_entry_t temp_entry;
1066
1067 switch (new_inheritance) {
1068 case VM_INHERIT_NONE:
1069 case VM_INHERIT_COPY:
1070 case VM_INHERIT_SHARE:
1071 break;
1072 default:
1073 return(KERN_INVALID_ARGUMENT);
1074 }
1075
1076 vm_map_lock(map);
1077
1078 VM_MAP_RANGE_CHECK(map, start, end);
1079
1080 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1081 entry = temp_entry;
1082 vm_map_clip_start(map, entry, start);
1083 }
1084 else
1085 entry = temp_entry->next;
1086
1087 while ((entry != &map->header) && (entry->start < end)) {
1088 vm_map_clip_end(map, entry, end);
1089
1090 entry->inheritance = new_inheritance;
1091
1092 entry = entry->next;
1093 }
1094
1095 vm_map_unlock(map);
1096 return(KERN_SUCCESS);
1097 }
1098
1099 /*
1100 * vm_map_pageable:
1101 *
1102 * Sets the pageability of the specified address
1103 * range in the target map. Regions specified
1104 * as not pageable require locked-down physical
1105 * memory and physical page maps.
1106 *
1107 * The map must not be locked, but a reference
1108 * must remain to the map throughout the call.
1109 */
1110 int
vm_map_pageable(map,start,end,new_pageable)1111 vm_map_pageable(map, start, end, new_pageable)
1112 register vm_map_t map;
1113 register vm_offset_t start;
1114 register vm_offset_t end;
1115 register boolean_t new_pageable;
1116 {
1117 register vm_map_entry_t entry;
1118 vm_map_entry_t start_entry;
1119 register vm_offset_t failed;
1120 int rv;
1121
1122 vm_map_lock(map);
1123
1124 VM_MAP_RANGE_CHECK(map, start, end);
1125
1126 /*
1127 * Only one pageability change may take place at one
1128 * time, since vm_fault assumes it will be called
1129 * only once for each wiring/unwiring. Therefore, we
1130 * have to make sure we're actually changing the pageability
1131 * for the entire region. We do so before making any changes.
1132 */
1133
1134 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
1135 vm_map_unlock(map);
1136 return(KERN_INVALID_ADDRESS);
1137 }
1138 entry = start_entry;
1139
1140 /*
1141 * Actions are rather different for wiring and unwiring,
1142 * so we have two separate cases.
1143 */
1144
1145 if (new_pageable) {
1146
1147 vm_map_clip_start(map, entry, start);
1148
1149 /*
1150 * Unwiring. First ensure that the range to be
1151 * unwired is really wired down and that there
1152 * are no holes.
1153 */
1154 while ((entry != &map->header) && (entry->start < end)) {
1155
1156 if (entry->wired_count == 0 ||
1157 (entry->end < end &&
1158 (entry->next == &map->header ||
1159 entry->next->start > entry->end))) {
1160 vm_map_unlock(map);
1161 return(KERN_INVALID_ARGUMENT);
1162 }
1163 entry = entry->next;
1164 }
1165
1166 /*
1167 * Now decrement the wiring count for each region.
1168 * If a region becomes completely unwired,
1169 * unwire its physical pages and mappings.
1170 */
1171 vm_map_set_recursive(&map->lock);
1172
1173 entry = start_entry;
1174 while ((entry != &map->header) && (entry->start < end)) {
1175 vm_map_clip_end(map, entry, end);
1176
1177 entry->wired_count--;
1178 if (entry->wired_count == 0)
1179 vm_fault_unwire(map, entry->start, entry->end);
1180
1181 entry = entry->next;
1182 }
1183 vm_map_clear_recursive(&map->lock);
1184 }
1185
1186 else {
1187 /*
1188 * Wiring. We must do this in two passes:
1189 *
1190 * 1. Holding the write lock, we create any shadow
1191 * or zero-fill objects that need to be created.
1192 * Then we clip each map entry to the region to be
1193 * wired and increment its wiring count. We
1194 * create objects before clipping the map entries
1195 * to avoid object proliferation.
1196 *
1197 * 2. We downgrade to a read lock, and call
1198 * vm_fault_wire to fault in the pages for any
1199 * newly wired area (wired_count is 1).
1200 *
1201 * Downgrading to a read lock for vm_fault_wire avoids
1202 * a possible deadlock with another thread that may have
1203 * faulted on one of the pages to be wired (it would mark
1204 * the page busy, blocking us, then in turn block on the
1205 * map lock that we hold). Because of problems in the
1206 * recursive lock package, we cannot upgrade to a write
1207 * lock in vm_map_lookup. Thus, any actions that require
1208 * the write lock must be done beforehand. Because we
1209 * keep the read lock on the map, the copy-on-write status
1210 * of the entries we modify here cannot change.
1211 */
1212
1213 /*
1214 * Pass 1.
1215 */
1216 while ((entry != &map->header) && (entry->start < end)) {
1217 if (entry->wired_count == 0) {
1218
1219 /*
1220 * Perform actions of vm_map_lookup that need
1221 * the write lock on the map: create a shadow
1222 * object for a copy-on-write region, or an
1223 * object for a zero-fill region.
1224 *
1225 * We don't have to do this for entries that
1226 * point to sharing maps, because we won't hold
1227 * the lock on the sharing map.
1228 */
1229 if (!entry->is_a_map) {
1230 if (entry->needs_copy &&
1231 ((entry->protection & VM_PROT_WRITE) != 0)) {
1232
1233 vm_object_shadow(&entry->object.vm_object,
1234 &entry->offset,
1235 (vm_size_t)(entry->end
1236 - entry->start));
1237 entry->needs_copy = FALSE;
1238 }
1239 else if (entry->object.vm_object == NULL) {
1240 entry->object.vm_object =
1241 vm_object_allocate((vm_size_t)(entry->end
1242 - entry->start));
1243 entry->offset = (vm_offset_t)0;
1244 }
1245 }
1246 }
1247 vm_map_clip_start(map, entry, start);
1248 vm_map_clip_end(map, entry, end);
1249 entry->wired_count++;
1250
1251 /*
1252 * Check for holes
1253 */
1254 if (entry->end < end &&
1255 (entry->next == &map->header ||
1256 entry->next->start > entry->end)) {
1257 /*
1258 * Found one. Object creation actions
1259 * do not need to be undone, but the
1260 * wired counts need to be restored.
1261 */
1262 while (entry != &map->header && entry->end > start) {
1263 entry->wired_count--;
1264 entry = entry->prev;
1265 }
1266 vm_map_unlock(map);
1267 return(KERN_INVALID_ARGUMENT);
1268 }
1269 entry = entry->next;
1270 }
1271
1272 /*
1273 * Pass 2.
1274 */
1275
1276 /*
1277 * HACK HACK HACK HACK
1278 *
1279 * If we are wiring in the kernel map or a submap of it,
1280 * unlock the map to avoid deadlocks. We trust that the
1281 * kernel threads are well-behaved, and therefore will
1282 * not do anything destructive to this region of the map
1283 * while we have it unlocked. We cannot trust user threads
1284 * to do the same.
1285 *
1286 * HACK HACK HACK HACK
1287 */
1288 if (vm_map_pmap(map) == kernel_pmap) {
1289 vm_map_unlock(map); /* trust me ... */
1290 }
1291 else {
1292 vm_map_set_recursive(&map->lock);
1293 lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, curproc);
1294 }
1295
1296 rv = 0;
1297 entry = start_entry;
1298 while (entry != &map->header && entry->start < end) {
1299 /*
1300 * If vm_fault_wire fails for any page we need to
1301 * undo what has been done. We decrement the wiring
1302 * count for those pages which have not yet been
1303 * wired (now) and unwire those that have (later).
1304 *
1305 * XXX this violates the locking protocol on the map,
1306 * needs to be fixed.
1307 */
1308 if (rv)
1309 entry->wired_count--;
1310 else if (entry->wired_count == 1) {
1311 rv = vm_fault_wire(map, entry->start, entry->end);
1312 if (rv) {
1313 failed = entry->start;
1314 entry->wired_count--;
1315 }
1316 }
1317 entry = entry->next;
1318 }
1319
1320 if (vm_map_pmap(map) == kernel_pmap) {
1321 vm_map_lock(map);
1322 }
1323 else {
1324 vm_map_clear_recursive(&map->lock);
1325 }
1326 if (rv) {
1327 vm_map_unlock(map);
1328 (void) vm_map_pageable(map, start, failed, TRUE);
1329 return(rv);
1330 }
1331 }
1332
1333 vm_map_unlock(map);
1334
1335 return(KERN_SUCCESS);
1336 }
1337
1338 /*
1339 * vm_map_clean
1340 *
1341 * Push any dirty cached pages in the address range to their pager.
1342 * If syncio is TRUE, dirty pages are written synchronously.
1343 * If invalidate is TRUE, any cached pages are freed as well.
1344 *
1345 * Returns an error if any part of the specified range is not mapped.
1346 */
1347 int
vm_map_clean(map,start,end,syncio,invalidate)1348 vm_map_clean(map, start, end, syncio, invalidate)
1349 vm_map_t map;
1350 vm_offset_t start;
1351 vm_offset_t end;
1352 boolean_t syncio;
1353 boolean_t invalidate;
1354 {
1355 register vm_map_entry_t current;
1356 vm_map_entry_t entry;
1357 vm_size_t size;
1358 vm_object_t object;
1359 vm_offset_t offset;
1360
1361 vm_map_lock_read(map);
1362 VM_MAP_RANGE_CHECK(map, start, end);
1363 if (!vm_map_lookup_entry(map, start, &entry)) {
1364 vm_map_unlock_read(map);
1365 return(KERN_INVALID_ADDRESS);
1366 }
1367
1368 /*
1369 * Make a first pass to check for holes.
1370 */
1371 for (current = entry; current->start < end; current = current->next) {
1372 if (current->is_sub_map) {
1373 vm_map_unlock_read(map);
1374 return(KERN_INVALID_ARGUMENT);
1375 }
1376 if (end > current->end &&
1377 (current->next == &map->header ||
1378 current->end != current->next->start)) {
1379 vm_map_unlock_read(map);
1380 return(KERN_INVALID_ADDRESS);
1381 }
1382 }
1383
1384 /*
1385 * Make a second pass, cleaning/uncaching pages from the indicated
1386 * objects as we go.
1387 */
1388 for (current = entry; current->start < end; current = current->next) {
1389 offset = current->offset + (start - current->start);
1390 size = (end <= current->end ? end : current->end) - start;
1391 if (current->is_a_map) {
1392 register vm_map_t smap;
1393 vm_map_entry_t tentry;
1394 vm_size_t tsize;
1395
1396 smap = current->object.share_map;
1397 vm_map_lock_read(smap);
1398 (void) vm_map_lookup_entry(smap, offset, &tentry);
1399 tsize = tentry->end - offset;
1400 if (tsize < size)
1401 size = tsize;
1402 object = tentry->object.vm_object;
1403 offset = tentry->offset + (offset - tentry->start);
1404 vm_object_lock(object);
1405 vm_map_unlock_read(smap);
1406 } else {
1407 object = current->object.vm_object;
1408 vm_object_lock(object);
1409 }
1410 /*
1411 * Flush pages if writing is allowed.
1412 * XXX should we continue on an error?
1413 */
1414 if ((current->protection & VM_PROT_WRITE) &&
1415 !vm_object_page_clean(object, offset, offset+size,
1416 syncio, FALSE)) {
1417 vm_object_unlock(object);
1418 vm_map_unlock_read(map);
1419 return(KERN_FAILURE);
1420 }
1421 if (invalidate)
1422 vm_object_page_remove(object, offset, offset+size);
1423 vm_object_unlock(object);
1424 start += size;
1425 }
1426
1427 vm_map_unlock_read(map);
1428 return(KERN_SUCCESS);
1429 }
1430
1431 /*
1432 * vm_map_entry_unwire: [ internal use only ]
1433 *
1434 * Make the region specified by this entry pageable.
1435 *
1436 * The map in question should be locked.
1437 * [This is the reason for this routine's existence.]
1438 */
1439 void
vm_map_entry_unwire(map,entry)1440 vm_map_entry_unwire(map, entry)
1441 vm_map_t map;
1442 register vm_map_entry_t entry;
1443 {
1444 vm_fault_unwire(map, entry->start, entry->end);
1445 entry->wired_count = 0;
1446 }
1447
1448 /*
1449 * vm_map_entry_delete: [ internal use only ]
1450 *
1451 * Deallocate the given entry from the target map.
1452 */
1453 void
vm_map_entry_delete(map,entry)1454 vm_map_entry_delete(map, entry)
1455 register vm_map_t map;
1456 register vm_map_entry_t entry;
1457 {
1458 if (entry->wired_count != 0)
1459 vm_map_entry_unwire(map, entry);
1460
1461 vm_map_entry_unlink(map, entry);
1462 map->size -= entry->end - entry->start;
1463
1464 if (entry->is_a_map || entry->is_sub_map)
1465 vm_map_deallocate(entry->object.share_map);
1466 else
1467 vm_object_deallocate(entry->object.vm_object);
1468
1469 vm_map_entry_dispose(map, entry);
1470 }
1471
1472 /*
1473 * vm_map_delete: [ internal use only ]
1474 *
1475 * Deallocates the given address range from the target
1476 * map.
1477 *
1478 * When called with a sharing map, removes pages from
1479 * that region from all physical maps.
1480 */
1481 int
vm_map_delete(map,start,end)1482 vm_map_delete(map, start, end)
1483 register vm_map_t map;
1484 vm_offset_t start;
1485 register vm_offset_t end;
1486 {
1487 register vm_map_entry_t entry;
1488 vm_map_entry_t first_entry;
1489
1490 /*
1491 * Find the start of the region, and clip it
1492 */
1493
1494 if (!vm_map_lookup_entry(map, start, &first_entry))
1495 entry = first_entry->next;
1496 else {
1497 entry = first_entry;
1498 vm_map_clip_start(map, entry, start);
1499
1500 /*
1501 * Fix the lookup hint now, rather than each
1502 * time though the loop.
1503 */
1504
1505 SAVE_HINT(map, entry->prev);
1506 }
1507
1508 /*
1509 * Save the free space hint
1510 */
1511
1512 if (map->first_free->start >= start)
1513 map->first_free = entry->prev;
1514
1515 /*
1516 * Step through all entries in this region
1517 */
1518
1519 while ((entry != &map->header) && (entry->start < end)) {
1520 vm_map_entry_t next;
1521 register vm_offset_t s, e;
1522 register vm_object_t object;
1523
1524 vm_map_clip_end(map, entry, end);
1525
1526 next = entry->next;
1527 s = entry->start;
1528 e = entry->end;
1529
1530 /*
1531 * Unwire before removing addresses from the pmap;
1532 * otherwise, unwiring will put the entries back in
1533 * the pmap.
1534 */
1535
1536 object = entry->object.vm_object;
1537 if (entry->wired_count != 0)
1538 vm_map_entry_unwire(map, entry);
1539
1540 /*
1541 * If this is a sharing map, we must remove
1542 * *all* references to this data, since we can't
1543 * find all of the physical maps which are sharing
1544 * it.
1545 */
1546
1547 if (object == kernel_object || object == kmem_object)
1548 vm_object_page_remove(object, entry->offset,
1549 entry->offset + (e - s));
1550 else if (!map->is_main_map)
1551 vm_object_pmap_remove(object,
1552 entry->offset,
1553 entry->offset + (e - s));
1554 else
1555 pmap_remove(map->pmap, s, e);
1556
1557 /*
1558 * Delete the entry (which may delete the object)
1559 * only after removing all pmap entries pointing
1560 * to its pages. (Otherwise, its page frames may
1561 * be reallocated, and any modify bits will be
1562 * set in the wrong object!)
1563 */
1564
1565 vm_map_entry_delete(map, entry);
1566 entry = next;
1567 }
1568 return(KERN_SUCCESS);
1569 }
1570
1571 /*
1572 * vm_map_remove:
1573 *
1574 * Remove the given address range from the target map.
1575 * This is the exported form of vm_map_delete.
1576 */
1577 int
vm_map_remove(map,start,end)1578 vm_map_remove(map, start, end)
1579 register vm_map_t map;
1580 register vm_offset_t start;
1581 register vm_offset_t end;
1582 {
1583 register int result;
1584
1585 vm_map_lock(map);
1586 VM_MAP_RANGE_CHECK(map, start, end);
1587 result = vm_map_delete(map, start, end);
1588 vm_map_unlock(map);
1589
1590 return(result);
1591 }
1592
1593 /*
1594 * vm_map_check_protection:
1595 *
1596 * Assert that the target map allows the specified
1597 * privilege on the entire address region given.
1598 * The entire region must be allocated.
1599 */
1600 boolean_t
vm_map_check_protection(map,start,end,protection)1601 vm_map_check_protection(map, start, end, protection)
1602 register vm_map_t map;
1603 register vm_offset_t start;
1604 register vm_offset_t end;
1605 register vm_prot_t protection;
1606 {
1607 register vm_map_entry_t entry;
1608 vm_map_entry_t tmp_entry;
1609
1610 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1611 return(FALSE);
1612 }
1613
1614 entry = tmp_entry;
1615
1616 while (start < end) {
1617 if (entry == &map->header) {
1618 return(FALSE);
1619 }
1620
1621 /*
1622 * No holes allowed!
1623 */
1624
1625 if (start < entry->start) {
1626 return(FALSE);
1627 }
1628
1629 /*
1630 * Check protection associated with entry.
1631 */
1632
1633 if ((entry->protection & protection) != protection) {
1634 return(FALSE);
1635 }
1636
1637 /* go to next entry */
1638
1639 start = entry->end;
1640 entry = entry->next;
1641 }
1642 return(TRUE);
1643 }
1644
1645 /*
1646 * vm_map_copy_entry:
1647 *
1648 * Copies the contents of the source entry to the destination
1649 * entry. The entries *must* be aligned properly.
1650 */
1651 void
vm_map_copy_entry(src_map,dst_map,src_entry,dst_entry)1652 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
1653 vm_map_t src_map, dst_map;
1654 register vm_map_entry_t src_entry, dst_entry;
1655 {
1656 vm_object_t temp_object;
1657
1658 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1659 return;
1660
1661 if (dst_entry->object.vm_object != NULL &&
1662 (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)
1663 printf("vm_map_copy_entry: copying over permanent data!\n");
1664
1665 /*
1666 * If our destination map was wired down,
1667 * unwire it now.
1668 */
1669
1670 if (dst_entry->wired_count != 0)
1671 vm_map_entry_unwire(dst_map, dst_entry);
1672
1673 /*
1674 * If we're dealing with a sharing map, we
1675 * must remove the destination pages from
1676 * all maps (since we cannot know which maps
1677 * this sharing map belongs in).
1678 */
1679
1680 if (dst_map->is_main_map)
1681 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
1682 else
1683 vm_object_pmap_remove(dst_entry->object.vm_object,
1684 dst_entry->offset,
1685 dst_entry->offset +
1686 (dst_entry->end - dst_entry->start));
1687
1688 if (src_entry->wired_count == 0) {
1689
1690 boolean_t src_needs_copy;
1691
1692 /*
1693 * If the source entry is marked needs_copy,
1694 * it is already write-protected.
1695 */
1696 if (!src_entry->needs_copy) {
1697
1698 boolean_t su;
1699
1700 /*
1701 * If the source entry has only one mapping,
1702 * we can just protect the virtual address
1703 * range.
1704 */
1705 if (!(su = src_map->is_main_map)) {
1706 simple_lock(&src_map->ref_lock);
1707 su = (src_map->ref_count == 1);
1708 simple_unlock(&src_map->ref_lock);
1709 }
1710
1711 if (su) {
1712 pmap_protect(src_map->pmap,
1713 src_entry->start,
1714 src_entry->end,
1715 src_entry->protection & ~VM_PROT_WRITE);
1716 }
1717 else {
1718 vm_object_pmap_copy(src_entry->object.vm_object,
1719 src_entry->offset,
1720 src_entry->offset + (src_entry->end
1721 -src_entry->start));
1722 }
1723 }
1724
1725 /*
1726 * Make a copy of the object.
1727 */
1728 temp_object = dst_entry->object.vm_object;
1729 vm_object_copy(src_entry->object.vm_object,
1730 src_entry->offset,
1731 (vm_size_t)(src_entry->end -
1732 src_entry->start),
1733 &dst_entry->object.vm_object,
1734 &dst_entry->offset,
1735 &src_needs_copy);
1736 /*
1737 * If we didn't get a copy-object now, mark the
1738 * source map entry so that a shadow will be created
1739 * to hold its changed pages.
1740 */
1741 if (src_needs_copy)
1742 src_entry->needs_copy = TRUE;
1743
1744 /*
1745 * The destination always needs to have a shadow
1746 * created.
1747 */
1748 dst_entry->needs_copy = TRUE;
1749
1750 /*
1751 * Mark the entries copy-on-write, so that write-enabling
1752 * the entry won't make copy-on-write pages writable.
1753 */
1754 src_entry->copy_on_write = TRUE;
1755 dst_entry->copy_on_write = TRUE;
1756 /*
1757 * Get rid of the old object.
1758 */
1759 vm_object_deallocate(temp_object);
1760
1761 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1762 dst_entry->end - dst_entry->start, src_entry->start);
1763 }
1764 else {
1765 /*
1766 * Of course, wired down pages can't be set copy-on-write.
1767 * Cause wired pages to be copied into the new
1768 * map by simulating faults (the new pages are
1769 * pageable)
1770 */
1771 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1772 }
1773 }
1774
1775 /*
1776 * vm_map_copy:
1777 *
1778 * Perform a virtual memory copy from the source
1779 * address map/range to the destination map/range.
1780 *
1781 * If src_destroy or dst_alloc is requested,
1782 * the source and destination regions should be
1783 * disjoint, not only in the top-level map, but
1784 * in the sharing maps as well. [The best way
1785 * to guarantee this is to use a new intermediate
1786 * map to make copies. This also reduces map
1787 * fragmentation.]
1788 */
1789 int
vm_map_copy(dst_map,src_map,dst_addr,len,src_addr,dst_alloc,src_destroy)1790 vm_map_copy(dst_map, src_map,
1791 dst_addr, len, src_addr,
1792 dst_alloc, src_destroy)
1793 vm_map_t dst_map;
1794 vm_map_t src_map;
1795 vm_offset_t dst_addr;
1796 vm_size_t len;
1797 vm_offset_t src_addr;
1798 boolean_t dst_alloc;
1799 boolean_t src_destroy;
1800 {
1801 register
1802 vm_map_entry_t src_entry;
1803 register
1804 vm_map_entry_t dst_entry;
1805 vm_map_entry_t tmp_entry;
1806 vm_offset_t src_start;
1807 vm_offset_t src_end;
1808 vm_offset_t dst_start;
1809 vm_offset_t dst_end;
1810 vm_offset_t src_clip;
1811 vm_offset_t dst_clip;
1812 int result;
1813 boolean_t old_src_destroy;
1814
1815 /*
1816 * XXX While we figure out why src_destroy screws up,
1817 * we'll do it by explicitly vm_map_delete'ing at the end.
1818 */
1819
1820 old_src_destroy = src_destroy;
1821 src_destroy = FALSE;
1822
1823 /*
1824 * Compute start and end of region in both maps
1825 */
1826
1827 src_start = src_addr;
1828 src_end = src_start + len;
1829 dst_start = dst_addr;
1830 dst_end = dst_start + len;
1831
1832 /*
1833 * Check that the region can exist in both source
1834 * and destination.
1835 */
1836
1837 if ((dst_end < dst_start) || (src_end < src_start))
1838 return(KERN_NO_SPACE);
1839
1840 /*
1841 * Lock the maps in question -- we avoid deadlock
1842 * by ordering lock acquisition by map value
1843 */
1844
1845 if (src_map == dst_map) {
1846 vm_map_lock(src_map);
1847 }
1848 else if ((long) src_map < (long) dst_map) {
1849 vm_map_lock(src_map);
1850 vm_map_lock(dst_map);
1851 } else {
1852 vm_map_lock(dst_map);
1853 vm_map_lock(src_map);
1854 }
1855
1856 result = KERN_SUCCESS;
1857
1858 /*
1859 * Check protections... source must be completely readable and
1860 * destination must be completely writable. [Note that if we're
1861 * allocating the destination region, we don't have to worry
1862 * about protection, but instead about whether the region
1863 * exists.]
1864 */
1865
1866 if (src_map->is_main_map && dst_map->is_main_map) {
1867 if (!vm_map_check_protection(src_map, src_start, src_end,
1868 VM_PROT_READ)) {
1869 result = KERN_PROTECTION_FAILURE;
1870 goto Return;
1871 }
1872
1873 if (dst_alloc) {
1874 /* XXX Consider making this a vm_map_find instead */
1875 if ((result = vm_map_insert(dst_map, NULL,
1876 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1877 goto Return;
1878 }
1879 else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1880 VM_PROT_WRITE)) {
1881 result = KERN_PROTECTION_FAILURE;
1882 goto Return;
1883 }
1884 }
1885
1886 /*
1887 * Find the start entries and clip.
1888 *
1889 * Note that checking protection asserts that the
1890 * lookup cannot fail.
1891 *
1892 * Also note that we wait to do the second lookup
1893 * until we have done the first clip, as the clip
1894 * may affect which entry we get!
1895 */
1896
1897 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1898 src_entry = tmp_entry;
1899 vm_map_clip_start(src_map, src_entry, src_start);
1900
1901 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1902 dst_entry = tmp_entry;
1903 vm_map_clip_start(dst_map, dst_entry, dst_start);
1904
1905 /*
1906 * If both source and destination entries are the same,
1907 * retry the first lookup, as it may have changed.
1908 */
1909
1910 if (src_entry == dst_entry) {
1911 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1912 src_entry = tmp_entry;
1913 }
1914
1915 /*
1916 * If source and destination entries are still the same,
1917 * a null copy is being performed.
1918 */
1919
1920 if (src_entry == dst_entry)
1921 goto Return;
1922
1923 /*
1924 * Go through entries until we get to the end of the
1925 * region.
1926 */
1927
1928 while (src_start < src_end) {
1929 /*
1930 * Clip the entries to the endpoint of the entire region.
1931 */
1932
1933 vm_map_clip_end(src_map, src_entry, src_end);
1934 vm_map_clip_end(dst_map, dst_entry, dst_end);
1935
1936 /*
1937 * Clip each entry to the endpoint of the other entry.
1938 */
1939
1940 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
1941 vm_map_clip_end(src_map, src_entry, src_clip);
1942
1943 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
1944 vm_map_clip_end(dst_map, dst_entry, dst_clip);
1945
1946 /*
1947 * Both entries now match in size and relative endpoints.
1948 *
1949 * If both entries refer to a VM object, we can
1950 * deal with them now.
1951 */
1952
1953 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
1954 vm_map_copy_entry(src_map, dst_map, src_entry,
1955 dst_entry);
1956 }
1957 else {
1958 register vm_map_t new_dst_map;
1959 vm_offset_t new_dst_start;
1960 vm_size_t new_size;
1961 vm_map_t new_src_map;
1962 vm_offset_t new_src_start;
1963
1964 /*
1965 * We have to follow at least one sharing map.
1966 */
1967
1968 new_size = (dst_entry->end - dst_entry->start);
1969
1970 if (src_entry->is_a_map) {
1971 new_src_map = src_entry->object.share_map;
1972 new_src_start = src_entry->offset;
1973 }
1974 else {
1975 new_src_map = src_map;
1976 new_src_start = src_entry->start;
1977 vm_map_set_recursive(&src_map->lock);
1978 }
1979
1980 if (dst_entry->is_a_map) {
1981 vm_offset_t new_dst_end;
1982
1983 new_dst_map = dst_entry->object.share_map;
1984 new_dst_start = dst_entry->offset;
1985
1986 /*
1987 * Since the destination sharing entries
1988 * will be merely deallocated, we can
1989 * do that now, and replace the region
1990 * with a null object. [This prevents
1991 * splitting the source map to match
1992 * the form of the destination map.]
1993 * Note that we can only do so if the
1994 * source and destination do not overlap.
1995 */
1996
1997 new_dst_end = new_dst_start + new_size;
1998
1999 if (new_dst_map != new_src_map) {
2000 vm_map_lock(new_dst_map);
2001 (void) vm_map_delete(new_dst_map,
2002 new_dst_start,
2003 new_dst_end);
2004 (void) vm_map_insert(new_dst_map,
2005 NULL,
2006 (vm_offset_t) 0,
2007 new_dst_start,
2008 new_dst_end);
2009 vm_map_unlock(new_dst_map);
2010 }
2011 }
2012 else {
2013 new_dst_map = dst_map;
2014 new_dst_start = dst_entry->start;
2015 vm_map_set_recursive(&dst_map->lock);
2016 }
2017
2018 /*
2019 * Recursively copy the sharing map.
2020 */
2021
2022 (void) vm_map_copy(new_dst_map, new_src_map,
2023 new_dst_start, new_size, new_src_start,
2024 FALSE, FALSE);
2025
2026 if (dst_map == new_dst_map)
2027 vm_map_clear_recursive(&dst_map->lock);
2028 if (src_map == new_src_map)
2029 vm_map_clear_recursive(&src_map->lock);
2030 }
2031
2032 /*
2033 * Update variables for next pass through the loop.
2034 */
2035
2036 src_start = src_entry->end;
2037 src_entry = src_entry->next;
2038 dst_start = dst_entry->end;
2039 dst_entry = dst_entry->next;
2040
2041 /*
2042 * If the source is to be destroyed, here is the
2043 * place to do it.
2044 */
2045
2046 if (src_destroy && src_map->is_main_map &&
2047 dst_map->is_main_map)
2048 vm_map_entry_delete(src_map, src_entry->prev);
2049 }
2050
2051 /*
2052 * Update the physical maps as appropriate
2053 */
2054
2055 if (src_map->is_main_map && dst_map->is_main_map) {
2056 if (src_destroy)
2057 pmap_remove(src_map->pmap, src_addr, src_addr + len);
2058 }
2059
2060 /*
2061 * Unlock the maps
2062 */
2063
2064 Return: ;
2065
2066 if (old_src_destroy)
2067 vm_map_delete(src_map, src_addr, src_addr + len);
2068
2069 vm_map_unlock(src_map);
2070 if (src_map != dst_map)
2071 vm_map_unlock(dst_map);
2072
2073 return(result);
2074 }
2075
2076 /*
2077 * vmspace_fork:
2078 * Create a new process vmspace structure and vm_map
2079 * based on those of an existing process. The new map
2080 * is based on the old map, according to the inheritance
2081 * values on the regions in that map.
2082 *
2083 * The source map must not be locked.
2084 */
2085 struct vmspace *
vmspace_fork(vm1)2086 vmspace_fork(vm1)
2087 register struct vmspace *vm1;
2088 {
2089 register struct vmspace *vm2;
2090 vm_map_t old_map = &vm1->vm_map;
2091 vm_map_t new_map;
2092 vm_map_entry_t old_entry;
2093 vm_map_entry_t new_entry;
2094 pmap_t new_pmap;
2095
2096 vm_map_lock(old_map);
2097
2098 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
2099 old_map->entries_pageable);
2100 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2101 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2102 new_pmap = &vm2->vm_pmap; /* XXX */
2103 new_map = &vm2->vm_map; /* XXX */
2104
2105 old_entry = old_map->header.next;
2106
2107 while (old_entry != &old_map->header) {
2108 if (old_entry->is_sub_map)
2109 panic("vm_map_fork: encountered a submap");
2110
2111 switch (old_entry->inheritance) {
2112 case VM_INHERIT_NONE:
2113 break;
2114
2115 case VM_INHERIT_SHARE:
2116 /*
2117 * If we don't already have a sharing map:
2118 */
2119
2120 if (!old_entry->is_a_map) {
2121 vm_map_t new_share_map;
2122 vm_map_entry_t new_share_entry;
2123
2124 /*
2125 * Create a new sharing map
2126 */
2127
2128 new_share_map = vm_map_create(NULL,
2129 old_entry->start,
2130 old_entry->end,
2131 TRUE);
2132 new_share_map->is_main_map = FALSE;
2133
2134 /*
2135 * Create the only sharing entry from the
2136 * old task map entry.
2137 */
2138
2139 new_share_entry =
2140 vm_map_entry_create(new_share_map);
2141 *new_share_entry = *old_entry;
2142 new_share_entry->wired_count = 0;
2143
2144 /*
2145 * Insert the entry into the new sharing
2146 * map
2147 */
2148
2149 vm_map_entry_link(new_share_map,
2150 new_share_map->header.prev,
2151 new_share_entry);
2152
2153 /*
2154 * Fix up the task map entry to refer
2155 * to the sharing map now.
2156 */
2157
2158 old_entry->is_a_map = TRUE;
2159 old_entry->object.share_map = new_share_map;
2160 old_entry->offset = old_entry->start;
2161 }
2162
2163 /*
2164 * Clone the entry, referencing the sharing map.
2165 */
2166
2167 new_entry = vm_map_entry_create(new_map);
2168 *new_entry = *old_entry;
2169 new_entry->wired_count = 0;
2170 vm_map_reference(new_entry->object.share_map);
2171
2172 /*
2173 * Insert the entry into the new map -- we
2174 * know we're inserting at the end of the new
2175 * map.
2176 */
2177
2178 vm_map_entry_link(new_map, new_map->header.prev,
2179 new_entry);
2180
2181 /*
2182 * Update the physical map
2183 */
2184
2185 pmap_copy(new_map->pmap, old_map->pmap,
2186 new_entry->start,
2187 (old_entry->end - old_entry->start),
2188 old_entry->start);
2189 break;
2190
2191 case VM_INHERIT_COPY:
2192 /*
2193 * Clone the entry and link into the map.
2194 */
2195
2196 new_entry = vm_map_entry_create(new_map);
2197 *new_entry = *old_entry;
2198 new_entry->wired_count = 0;
2199 new_entry->object.vm_object = NULL;
2200 new_entry->is_a_map = FALSE;
2201 vm_map_entry_link(new_map, new_map->header.prev,
2202 new_entry);
2203 if (old_entry->is_a_map) {
2204 int check;
2205
2206 check = vm_map_copy(new_map,
2207 old_entry->object.share_map,
2208 new_entry->start,
2209 (vm_size_t)(new_entry->end -
2210 new_entry->start),
2211 old_entry->offset,
2212 FALSE, FALSE);
2213 if (check != KERN_SUCCESS)
2214 printf("vm_map_fork: copy in share_map region failed\n");
2215 }
2216 else {
2217 vm_map_copy_entry(old_map, new_map, old_entry,
2218 new_entry);
2219 }
2220 break;
2221 }
2222 old_entry = old_entry->next;
2223 }
2224
2225 new_map->size = old_map->size;
2226 vm_map_unlock(old_map);
2227
2228 return(vm2);
2229 }
2230
2231 /*
2232 * vm_map_lookup:
2233 *
2234 * Finds the VM object, offset, and
2235 * protection for a given virtual address in the
2236 * specified map, assuming a page fault of the
2237 * type specified.
2238 *
2239 * Leaves the map in question locked for read; return
2240 * values are guaranteed until a vm_map_lookup_done
2241 * call is performed. Note that the map argument
2242 * is in/out; the returned map must be used in
2243 * the call to vm_map_lookup_done.
2244 *
2245 * A handle (out_entry) is returned for use in
2246 * vm_map_lookup_done, to make that fast.
2247 *
2248 * If a lookup is requested with "write protection"
2249 * specified, the map may be changed to perform virtual
2250 * copying operations, although the data referenced will
2251 * remain the same.
2252 */
2253 int
vm_map_lookup(var_map,vaddr,fault_type,out_entry,object,offset,out_prot,wired,single_use)2254 vm_map_lookup(var_map, vaddr, fault_type, out_entry,
2255 object, offset, out_prot, wired, single_use)
2256 vm_map_t *var_map; /* IN/OUT */
2257 register vm_offset_t vaddr;
2258 register vm_prot_t fault_type;
2259
2260 vm_map_entry_t *out_entry; /* OUT */
2261 vm_object_t *object; /* OUT */
2262 vm_offset_t *offset; /* OUT */
2263 vm_prot_t *out_prot; /* OUT */
2264 boolean_t *wired; /* OUT */
2265 boolean_t *single_use; /* OUT */
2266 {
2267 vm_map_t share_map;
2268 vm_offset_t share_offset;
2269 register vm_map_entry_t entry;
2270 register vm_map_t map = *var_map;
2271 register vm_prot_t prot;
2272 register boolean_t su;
2273
2274 RetryLookup: ;
2275
2276 /*
2277 * Lookup the faulting address.
2278 */
2279
2280 vm_map_lock_read(map);
2281
2282 #define RETURN(why) \
2283 { \
2284 vm_map_unlock_read(map); \
2285 return(why); \
2286 }
2287
2288 /*
2289 * If the map has an interesting hint, try it before calling
2290 * full blown lookup routine.
2291 */
2292
2293 simple_lock(&map->hint_lock);
2294 entry = map->hint;
2295 simple_unlock(&map->hint_lock);
2296
2297 *out_entry = entry;
2298
2299 if ((entry == &map->header) ||
2300 (vaddr < entry->start) || (vaddr >= entry->end)) {
2301 vm_map_entry_t tmp_entry;
2302
2303 /*
2304 * Entry was either not a valid hint, or the vaddr
2305 * was not contained in the entry, so do a full lookup.
2306 */
2307 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
2308 RETURN(KERN_INVALID_ADDRESS);
2309
2310 entry = tmp_entry;
2311 *out_entry = entry;
2312 }
2313
2314 /*
2315 * Handle submaps.
2316 */
2317
2318 if (entry->is_sub_map) {
2319 vm_map_t old_map = map;
2320
2321 *var_map = map = entry->object.sub_map;
2322 vm_map_unlock_read(old_map);
2323 goto RetryLookup;
2324 }
2325
2326 /*
2327 * Check whether this task is allowed to have
2328 * this page.
2329 */
2330
2331 prot = entry->protection;
2332 if ((fault_type & (prot)) != fault_type)
2333 RETURN(KERN_PROTECTION_FAILURE);
2334
2335 /*
2336 * If this page is not pageable, we have to get
2337 * it for all possible accesses.
2338 */
2339
2340 if (*wired = (entry->wired_count != 0))
2341 prot = fault_type = entry->protection;
2342
2343 /*
2344 * If we don't already have a VM object, track
2345 * it down.
2346 */
2347
2348 if (su = !entry->is_a_map) {
2349 share_map = map;
2350 share_offset = vaddr;
2351 }
2352 else {
2353 vm_map_entry_t share_entry;
2354
2355 /*
2356 * Compute the sharing map, and offset into it.
2357 */
2358
2359 share_map = entry->object.share_map;
2360 share_offset = (vaddr - entry->start) + entry->offset;
2361
2362 /*
2363 * Look for the backing store object and offset
2364 */
2365
2366 vm_map_lock_read(share_map);
2367
2368 if (!vm_map_lookup_entry(share_map, share_offset,
2369 &share_entry)) {
2370 vm_map_unlock_read(share_map);
2371 RETURN(KERN_INVALID_ADDRESS);
2372 }
2373 entry = share_entry;
2374 }
2375
2376 /*
2377 * If the entry was copy-on-write, we either ...
2378 */
2379
2380 if (entry->needs_copy) {
2381 /*
2382 * If we want to write the page, we may as well
2383 * handle that now since we've got the sharing
2384 * map locked.
2385 *
2386 * If we don't need to write the page, we just
2387 * demote the permissions allowed.
2388 */
2389
2390 if (fault_type & VM_PROT_WRITE) {
2391 /*
2392 * Make a new object, and place it in the
2393 * object chain. Note that no new references
2394 * have appeared -- one just moved from the
2395 * share map to the new object.
2396 */
2397
2398 if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
2399 (void *)0, curproc)) {
2400 if (share_map != map)
2401 vm_map_unlock_read(map);
2402 goto RetryLookup;
2403 }
2404
2405 vm_object_shadow(
2406 &entry->object.vm_object,
2407 &entry->offset,
2408 (vm_size_t) (entry->end - entry->start));
2409
2410 entry->needs_copy = FALSE;
2411
2412 lockmgr(&share_map->lock, LK_DOWNGRADE,
2413 (void *)0, curproc);
2414 }
2415 else {
2416 /*
2417 * We're attempting to read a copy-on-write
2418 * page -- don't allow writes.
2419 */
2420
2421 prot &= (~VM_PROT_WRITE);
2422 }
2423 }
2424
2425 /*
2426 * Create an object if necessary.
2427 */
2428 if (entry->object.vm_object == NULL) {
2429
2430 if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
2431 (void *)0, curproc)) {
2432 if (share_map != map)
2433 vm_map_unlock_read(map);
2434 goto RetryLookup;
2435 }
2436
2437 entry->object.vm_object = vm_object_allocate(
2438 (vm_size_t)(entry->end - entry->start));
2439 entry->offset = 0;
2440 lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, curproc);
2441 }
2442
2443 /*
2444 * Return the object/offset from this entry. If the entry
2445 * was copy-on-write or empty, it has been fixed up.
2446 */
2447
2448 *offset = (share_offset - entry->start) + entry->offset;
2449 *object = entry->object.vm_object;
2450
2451 /*
2452 * Return whether this is the only map sharing this data.
2453 */
2454
2455 if (!su) {
2456 simple_lock(&share_map->ref_lock);
2457 su = (share_map->ref_count == 1);
2458 simple_unlock(&share_map->ref_lock);
2459 }
2460
2461 *out_prot = prot;
2462 *single_use = su;
2463
2464 return(KERN_SUCCESS);
2465
2466 #undef RETURN
2467 }
2468
2469 /*
2470 * vm_map_lookup_done:
2471 *
2472 * Releases locks acquired by a vm_map_lookup
2473 * (according to the handle returned by that lookup).
2474 */
2475
2476 void
vm_map_lookup_done(map,entry)2477 vm_map_lookup_done(map, entry)
2478 register vm_map_t map;
2479 vm_map_entry_t entry;
2480 {
2481 /*
2482 * If this entry references a map, unlock it first.
2483 */
2484
2485 if (entry->is_a_map)
2486 vm_map_unlock_read(entry->object.share_map);
2487
2488 /*
2489 * Unlock the main-level map
2490 */
2491
2492 vm_map_unlock_read(map);
2493 }
2494
2495 /*
2496 * Routine: vm_map_simplify
2497 * Purpose:
2498 * Attempt to simplify the map representation in
2499 * the vicinity of the given starting address.
2500 * Note:
2501 * This routine is intended primarily to keep the
2502 * kernel maps more compact -- they generally don't
2503 * benefit from the "expand a map entry" technology
2504 * at allocation time because the adjacent entry
2505 * is often wired down.
2506 */
2507 void
vm_map_simplify(map,start)2508 vm_map_simplify(map, start)
2509 vm_map_t map;
2510 vm_offset_t start;
2511 {
2512 vm_map_entry_t this_entry;
2513 vm_map_entry_t prev_entry;
2514
2515 vm_map_lock(map);
2516 if (
2517 (vm_map_lookup_entry(map, start, &this_entry)) &&
2518 ((prev_entry = this_entry->prev) != &map->header) &&
2519
2520 (prev_entry->end == start) &&
2521 (map->is_main_map) &&
2522
2523 (prev_entry->is_a_map == FALSE) &&
2524 (prev_entry->is_sub_map == FALSE) &&
2525
2526 (this_entry->is_a_map == FALSE) &&
2527 (this_entry->is_sub_map == FALSE) &&
2528
2529 (prev_entry->inheritance == this_entry->inheritance) &&
2530 (prev_entry->protection == this_entry->protection) &&
2531 (prev_entry->max_protection == this_entry->max_protection) &&
2532 (prev_entry->wired_count == this_entry->wired_count) &&
2533
2534 (prev_entry->copy_on_write == this_entry->copy_on_write) &&
2535 (prev_entry->needs_copy == this_entry->needs_copy) &&
2536
2537 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
2538 ((prev_entry->offset + (prev_entry->end - prev_entry->start))
2539 == this_entry->offset)
2540 ) {
2541 if (map->first_free == this_entry)
2542 map->first_free = prev_entry;
2543
2544 SAVE_HINT(map, prev_entry);
2545 vm_map_entry_unlink(map, this_entry);
2546 prev_entry->end = this_entry->end;
2547 vm_object_deallocate(this_entry->object.vm_object);
2548 vm_map_entry_dispose(map, this_entry);
2549 }
2550 vm_map_unlock(map);
2551 }
2552
2553 /*
2554 * vm_map_print: [ debug ]
2555 */
2556 void
vm_map_print(map,full)2557 vm_map_print(map, full)
2558 register vm_map_t map;
2559 boolean_t full;
2560 {
2561 register vm_map_entry_t entry;
2562 extern int indent;
2563
2564 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n",
2565 (map->is_main_map ? "Task" : "Share"),
2566 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2567 map->timestamp);
2568
2569 if (!full && indent)
2570 return;
2571
2572 indent += 2;
2573 for (entry = map->header.next; entry != &map->header;
2574 entry = entry->next) {
2575 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ",
2576 (int) entry, (int) entry->start, (int) entry->end);
2577 if (map->is_main_map) {
2578 static char *inheritance_name[4] =
2579 { "share", "copy", "none", "donate_copy"};
2580 printf("prot=%x/%x/%s, ",
2581 entry->protection,
2582 entry->max_protection,
2583 inheritance_name[entry->inheritance]);
2584 if (entry->wired_count != 0)
2585 printf("wired, ");
2586 }
2587
2588 if (entry->is_a_map || entry->is_sub_map) {
2589 printf("share=0x%x, offset=0x%x\n",
2590 (int) entry->object.share_map,
2591 (int) entry->offset);
2592 if ((entry->prev == &map->header) ||
2593 (!entry->prev->is_a_map) ||
2594 (entry->prev->object.share_map !=
2595 entry->object.share_map)) {
2596 indent += 2;
2597 vm_map_print(entry->object.share_map, full);
2598 indent -= 2;
2599 }
2600
2601 }
2602 else {
2603 printf("object=0x%x, offset=0x%x",
2604 (int) entry->object.vm_object,
2605 (int) entry->offset);
2606 if (entry->copy_on_write)
2607 printf(", copy (%s)",
2608 entry->needs_copy ? "needed" : "done");
2609 printf("\n");
2610
2611 if ((entry->prev == &map->header) ||
2612 (entry->prev->is_a_map) ||
2613 (entry->prev->object.vm_object !=
2614 entry->object.vm_object)) {
2615 indent += 2;
2616 vm_object_print(entry->object.vm_object, full);
2617 indent -= 2;
2618 }
2619 }
2620 }
2621 indent -= 2;
2622 }
2623