xref: /minix/minix/servers/vm/region.c (revision 83133719)
1 
2 #include <minix/com.h>
3 #include <minix/callnr.h>
4 #include <minix/type.h>
5 #include <minix/config.h>
6 #include <minix/const.h>
7 #include <minix/sysutil.h>
8 #include <minix/syslib.h>
9 #include <minix/debug.h>
10 #include <minix/bitmap.h>
11 #include <minix/hash.h>
12 #include <machine/multiboot.h>
13 
14 #include <sys/mman.h>
15 
16 #include <limits.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <assert.h>
20 #include <stdint.h>
21 #include <sys/param.h>
22 
23 #include "vm.h"
24 #include "proto.h"
25 #include "util.h"
26 #include "glo.h"
27 #include "region.h"
28 #include "sanitycheck.h"
29 #include "memlist.h"
30 #include "memtype.h"
31 #include "regionavl.h"
32 
33 static struct vir_region *map_copy_region(struct vmproc *vmp, struct
34 	vir_region *vr);
35 
36 void map_region_init(void)
37 {
38 }
39 
40 static void map_printregion(struct vir_region *vr)
41 {
42 	unsigned int i;
43 	struct phys_region *ph;
44 	printf("map_printmap: map_name: %s\n", vr->def_memtype->name);
45 	printf("\t%lx (len 0x%lx, %lukB), %p, %s\n",
46 		vr->vaddr, vr->length, vr->length/1024,
47 		vr->def_memtype->name,
48 		(vr->flags & VR_WRITABLE) ? "writable" : "readonly");
49 	printf("\t\tphysblocks:\n");
50 	for(i = 0; i < vr->length/VM_PAGE_SIZE; i++) {
51 		if(!(ph=vr->physblocks[i])) continue;
52 		printf("\t\t@ %lx (refs %d): phys 0x%lx, %s\n",
53 			(vr->vaddr + ph->offset),
54 			ph->ph->refcount, ph->ph->phys,
55 		pt_writable(vr->parent, vr->vaddr + ph->offset) ? "W" : "R");
56 
57 	}
58 }
59 
60 struct phys_region *physblock_get(struct vir_region *region, vir_bytes offset)
61 {
62 	int i;
63 	struct phys_region *foundregion;
64 	assert(!(offset % VM_PAGE_SIZE));
65 	assert( /* offset >= 0 && */ offset < region->length);
66 	i = offset/VM_PAGE_SIZE;
67 	if((foundregion =  region->physblocks[i]))
68 		assert(foundregion->offset == offset);
69 	return foundregion;
70 }
71 
72 void physblock_set(struct vir_region *region, vir_bytes offset,
73 	struct phys_region *newphysr)
74 {
75 	int i;
76 	struct vmproc *proc;
77 	assert(!(offset % VM_PAGE_SIZE));
78 	assert( /* offset >= 0 && */ offset < region->length);
79 	i = offset/VM_PAGE_SIZE;
80 	proc = region->parent;
81 	assert(proc);
82 	if(newphysr) {
83 		assert(!region->physblocks[i]);
84 		assert(newphysr->offset == offset);
85 		proc->vm_total += VM_PAGE_SIZE;
86 		if (proc->vm_total > proc->vm_total_max)
87 			proc->vm_total_max = proc->vm_total;
88 	} else {
89 		assert(region->physblocks[i]);
90 		proc->vm_total -= VM_PAGE_SIZE;
91 	}
92 	region->physblocks[i] = newphysr;
93 }
94 
95 /*===========================================================================*
96  *				map_printmap				     *
97  *===========================================================================*/
98 void map_printmap(struct vmproc *vmp)
99 {
100 	struct vir_region *vr;
101 	region_iter iter;
102 
103 	printf("memory regions in process %d:\n", vmp->vm_endpoint);
104 
105 	region_start_iter_least(&vmp->vm_regions_avl, &iter);
106 	while((vr = region_get_iter(&iter))) {
107 		map_printregion(vr);
108 		region_incr_iter(&iter);
109 	}
110 }
111 
112 static struct vir_region *getnextvr(struct vir_region *vr)
113 {
114 	struct vir_region *nextvr;
115 	region_iter v_iter;
116 	SLABSANE(vr);
117 	region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
118 	assert(region_get_iter(&v_iter));
119 	assert(region_get_iter(&v_iter) == vr);
120 	region_incr_iter(&v_iter);
121 	nextvr = region_get_iter(&v_iter);
122 	if(!nextvr) return NULL;
123 	SLABSANE(nextvr);
124 	assert(vr->parent == nextvr->parent);
125 	assert(vr->vaddr < nextvr->vaddr);
126 	assert(vr->vaddr + vr->length <= nextvr->vaddr);
127 	return nextvr;
128 }
129 
130 static int pr_writable(struct vir_region *vr, struct phys_region *pr)
131 {
132 	assert(pr->memtype->writable);
133 	return ((vr->flags & VR_WRITABLE) && pr->memtype->writable(pr));
134 }
135 
136 #if SANITYCHECKS
137 
138 /*===========================================================================*
139  *				map_sanitycheck_pt			     *
140  *===========================================================================*/
141 static int map_sanitycheck_pt(struct vmproc *vmp,
142 	struct vir_region *vr, struct phys_region *pr)
143 {
144 	struct phys_block *pb = pr->ph;
145 	int rw;
146 	int r;
147 
148 	if(pr_writable(vr, pr))
149 		rw = PTF_WRITE;
150 	else
151 		rw = PTF_READ;
152 
153 	r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
154 	  pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
155 
156 	if(r != OK) {
157 		printf("proc %d phys_region 0x%lx sanity check failed\n",
158 			vmp->vm_endpoint, pr->offset);
159 		map_printregion(vr);
160 	}
161 
162 	return r;
163 }
164 
165 /*===========================================================================*
166  *				map_sanitycheck			     *
167  *===========================================================================*/
168 void map_sanitycheck(const char *file, int line)
169 {
170 	struct vmproc *vmp;
171 
172 /* Macro for looping over all physical blocks of all regions of
173  * all processes.
174  */
175 #define ALLREGIONS(regioncode, physcode)			\
176 	for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) {	\
177 		vir_bytes voffset;				\
178 		region_iter v_iter;				\
179 		struct vir_region *vr;				\
180 		if(!(vmp->vm_flags & VMF_INUSE))		\
181 			continue;				\
182 		region_start_iter_least(&vmp->vm_regions_avl, &v_iter);	\
183 		while((vr = region_get_iter(&v_iter))) {	\
184 			struct phys_region *pr;			\
185 			regioncode;				\
186 			for(voffset = 0; voffset < vr->length; \
187 				voffset += VM_PAGE_SIZE) {	\
188 				if(!(pr = physblock_get(vr, voffset))) 	\
189 					continue;	\
190 				physcode;			\
191 			}					\
192 			region_incr_iter(&v_iter);		\
193 		}						\
194 	}
195 
196 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
197 	/* Basic pointers check. */
198 	ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
199 	ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
200 
201 	/* Do counting for consistency check. */
202 	ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
203 	ALLREGIONS(;,MYASSERT(pr->offset == voffset););
204 	ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
205 		if(pr->ph->seencount == 1) {
206 			if(pr->memtype->ev_sanitycheck)
207 				pr->memtype->ev_sanitycheck(pr, file, line);
208 		}
209 	);
210 
211 	/* Do consistency check. */
212 	ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
213 		if(nextvr) {
214 			MYASSERT(vr->vaddr < nextvr->vaddr);
215 			MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
216 		}
217 		}
218 		MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
219 		if(pr->ph->flags & PBF_INCACHE) pr->ph->seencount++;
220 		if(pr->ph->refcount != pr->ph->seencount) {
221 			map_printmap(vmp);
222 			printf("ph in vr %p: 0x%lx  refcount %u "
223 				"but seencount %u\n",
224 				vr, pr->offset,
225 				pr->ph->refcount, pr->ph->seencount);
226 		}
227 		{
228 			int n_others = 0;
229 			struct phys_region *others;
230 			if(pr->ph->refcount > 0) {
231 				MYASSERT(pr->ph->firstregion);
232 				if(pr->ph->refcount == 1) {
233 					MYASSERT(pr->ph->firstregion == pr);
234 				}
235 			} else {
236 				MYASSERT(!pr->ph->firstregion);
237 			}
238 			for(others = pr->ph->firstregion; others;
239 				others = others->next_ph_list) {
240 				MYSLABSANE(others);
241 				MYASSERT(others->ph == pr->ph);
242 				n_others++;
243 			}
244 			if(pr->ph->flags & PBF_INCACHE) n_others++;
245 			MYASSERT(pr->ph->refcount == n_others);
246 		}
247 		MYASSERT(pr->ph->refcount == pr->ph->seencount);
248 		MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
249 	ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
250 }
251 
252 #endif
253 
254 /*=========================================================================*
255  *				map_ph_writept				*
256  *=========================================================================*/
257 int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
258 	struct phys_region *pr)
259 {
260 	int flags = PTF_PRESENT | PTF_USER;
261 	struct phys_block *pb = pr->ph;
262 
263 	assert(vr);
264 	assert(pr);
265 	assert(pb);
266 
267 	assert(!(vr->vaddr % VM_PAGE_SIZE));
268 	assert(!(pr->offset % VM_PAGE_SIZE));
269 	assert(pb->refcount > 0);
270 
271 	if(pr_writable(vr, pr))
272 		flags |= PTF_WRITE;
273 	else
274 		flags |= PTF_READ;
275 
276 
277 	if(vr->def_memtype->pt_flags)
278 		flags |= vr->def_memtype->pt_flags(vr);
279 
280 	if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
281 			pb->phys, VM_PAGE_SIZE, flags,
282 #if SANITYCHECKS
283 	  	!pr->written ? 0 :
284 #endif
285 	  	WMF_OVERWRITE) != OK) {
286 	    printf("VM: map_writept: pt_writemap failed\n");
287 	    return ENOMEM;
288 	}
289 
290 #if SANITYCHECKS
291 	USE(pr, pr->written = 1;);
292 #endif
293 
294 	return OK;
295 }
296 
297 #define SLOT_FAIL ((vir_bytes) -1)
298 
299 /*===========================================================================*
300  *				region_find_slot_range			     *
301  *===========================================================================*/
302 static vir_bytes region_find_slot_range(struct vmproc *vmp,
303 		vir_bytes minv, vir_bytes maxv, vir_bytes length)
304 {
305 	struct vir_region *lastregion;
306 	vir_bytes startv = 0;
307 	int foundflag = 0;
308 	region_iter iter;
309 
310 	SANITYCHECK(SCL_FUNCTIONS);
311 
312 	/* Length must be reasonable. */
313 	assert(length > 0);
314 
315 	/* Special case: allow caller to set maxv to 0 meaning 'I want
316 	 * it to be mapped in right here.'
317 	 */
318         if(maxv == 0) {
319                 maxv = minv + length;
320 
321                 /* Sanity check. */
322                 if(maxv <= minv) {
323                         printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
324                                 minv, length);
325                         return SLOT_FAIL;
326                 }
327         }
328 
329 	/* Basic input sanity checks. */
330 	assert(!(length % VM_PAGE_SIZE));
331 	if(minv >= maxv) {
332 		printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
333 			minv, maxv, length);
334 	}
335 
336 	assert(minv < maxv);
337 
338 	if(minv + length > maxv)
339 		return SLOT_FAIL;
340 
341 #define FREEVRANGE_TRY(rangestart, rangeend) {		\
342 	vir_bytes frstart = (rangestart), frend = (rangeend);	\
343 	frstart = MAX(frstart, minv);				\
344 	frend   = MIN(frend, maxv);				\
345 	if(frend > frstart && (frend - frstart) >= length) {	\
346 		startv = frend-length;				\
347 		foundflag = 1;					\
348 	} }
349 
350 #define FREEVRANGE(start, end) {					\
351 	assert(!foundflag);						\
352 	FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE));	\
353 	if(!foundflag) {						\
354 		FREEVRANGE_TRY((start), (end));				\
355 	}								\
356 }
357 
358 	/* find region after maxv. */
359 	region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
360 	lastregion = region_get_iter(&iter);
361 
362 	if(!lastregion) {
363 		/* This is the free virtual address space after the last region. */
364 		region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
365 		lastregion = region_get_iter(&iter);
366 		FREEVRANGE(lastregion ?
367 			lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
368 	}
369 
370 	if(!foundflag) {
371 		struct vir_region *vr;
372 		while((vr = region_get_iter(&iter)) && !foundflag) {
373 			struct vir_region *nextvr;
374 			region_decr_iter(&iter);
375 			nextvr = region_get_iter(&iter);
376 			FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
377 			  vr->vaddr);
378 		}
379 	}
380 
381 	if(!foundflag) {
382 		return SLOT_FAIL;
383 	}
384 
385 	/* However we got it, startv must be in the requested range. */
386 	assert(startv >= minv);
387 	assert(startv < maxv);
388 	assert(startv + length <= maxv);
389 
390 	/* remember this position as a hint for next time. */
391 	vmp->vm_region_top = startv + length;
392 
393 	return startv;
394 }
395 
396 /*===========================================================================*
397  *				region_find_slot			     *
398  *===========================================================================*/
399 static vir_bytes region_find_slot(struct vmproc *vmp,
400 		vir_bytes minv, vir_bytes maxv, vir_bytes length)
401 {
402 	vir_bytes v, hint = vmp->vm_region_top;
403 
404 	/* use the top of the last inserted region as a minv hint if
405 	 * possible. remember that a zero maxv is a special case.
406 	 */
407 
408 	if(maxv && hint < maxv && hint >= minv) {
409 		v = region_find_slot_range(vmp, minv, hint, length);
410 
411 		if(v != SLOT_FAIL)
412 			return v;
413 	}
414 
415 	return region_find_slot_range(vmp, minv, maxv, length);
416 }
417 
418 static unsigned int phys_slot(vir_bytes len)
419 {
420 	assert(!(len % VM_PAGE_SIZE));
421 	return len / VM_PAGE_SIZE;
422 }
423 
424 static struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
425 	int flags, mem_type_t *memtype)
426 {
427 	struct vir_region *newregion;
428 	struct phys_region **newphysregions;
429 	static u32_t id;
430 	int slots = phys_slot(length);
431 
432 	if(!(SLABALLOC(newregion))) {
433 		printf("vm: region_new: could not allocate\n");
434 		return NULL;
435 	}
436 
437 	/* Fill in node details. */
438 USE(newregion,
439 	memset(newregion, 0, sizeof(*newregion));
440 	newregion->vaddr = startv;
441 	newregion->length = length;
442 	newregion->flags = flags;
443 	newregion->def_memtype = memtype;
444 	newregion->remaps = 0;
445 	newregion->id = id++;
446 	newregion->lower = newregion->higher = NULL;
447 	newregion->parent = vmp;);
448 
449 	if(!(newphysregions = calloc(slots, sizeof(struct phys_region *)))) {
450 		printf("VM: region_new: allocating phys blocks failed\n");
451 		SLABFREE(newregion);
452 		return NULL;
453 	}
454 
455 	USE(newregion, newregion->physblocks = newphysregions;);
456 
457 	return newregion;
458 }
459 
460 /*===========================================================================*
461  *				map_page_region				     *
462  *===========================================================================*/
463 struct vir_region *map_page_region(struct vmproc *vmp, vir_bytes minv,
464 	vir_bytes maxv, vir_bytes length, u32_t flags, int mapflags,
465 	mem_type_t *memtype)
466 {
467 	struct vir_region *newregion;
468 	vir_bytes startv;
469 
470 	assert(!(length % VM_PAGE_SIZE));
471 
472 	SANITYCHECK(SCL_FUNCTIONS);
473 
474 	startv = region_find_slot(vmp, minv, maxv, length);
475 	if (startv == SLOT_FAIL)
476 		return NULL;
477 
478 	/* Now we want a new region. */
479 	if(!(newregion = region_new(vmp, startv, length, flags, memtype))) {
480 		printf("VM: map_page_region: allocating region failed\n");
481 		return NULL;
482 	}
483 
484 	/* If a new event is specified, invoke it. */
485 	if(newregion->def_memtype->ev_new) {
486 		if(newregion->def_memtype->ev_new(newregion) != OK) {
487 			/* ev_new will have freed and removed the region */
488 			return NULL;
489 		}
490 	}
491 
492 	if(mapflags & MF_PREALLOC) {
493 		if(map_handle_memory(vmp, newregion, 0, length, 1,
494 			NULL, 0, 0) != OK) {
495 			printf("VM: map_page_region: prealloc failed\n");
496 			free(newregion->physblocks);
497 			USE(newregion,
498 				newregion->physblocks = NULL;);
499 			SLABFREE(newregion);
500 			return NULL;
501 		}
502 	}
503 
504 	/* Pre-allocations should be uninitialized, but after that it's a
505 	 * different story.
506 	 */
507 	USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
508 
509 	/* Link it. */
510 	region_insert(&vmp->vm_regions_avl, newregion);
511 
512 #if SANITYCHECKS
513 	assert(startv == newregion->vaddr);
514 	{
515 		struct vir_region *nextvr;
516 		if((nextvr = getnextvr(newregion))) {
517 			assert(newregion->vaddr < nextvr->vaddr);
518 		}
519 	}
520 #endif
521 
522 	SANITYCHECK(SCL_FUNCTIONS);
523 
524 	return newregion;
525 }
526 
527 /*===========================================================================*
528  *				map_subfree				     *
529  *===========================================================================*/
530 static int map_subfree(struct vir_region *region,
531 	vir_bytes start, vir_bytes len)
532 {
533 	struct phys_region *pr;
534 	vir_bytes end = start+len;
535 	vir_bytes voffset;
536 
537 #if SANITYCHECKS
538 	SLABSANE(region);
539 	for(voffset = 0; voffset < phys_slot(region->length);
540 		voffset += VM_PAGE_SIZE) {
541 		struct phys_region *others;
542 		struct phys_block *pb;
543 
544 		if(!(pr = physblock_get(region, voffset)))
545 			continue;
546 
547 		pb = pr->ph;
548 
549 		for(others = pb->firstregion; others;
550 			others = others->next_ph_list) {
551 			assert(others->ph == pb);
552 		}
553 	}
554 #endif
555 
556 	for(voffset = start; voffset < end; voffset+=VM_PAGE_SIZE) {
557 		if(!(pr = physblock_get(region, voffset)))
558 			continue;
559 		assert(pr->offset >= start);
560 		assert(pr->offset < end);
561 		pb_unreferenced(region, pr, 1);
562 		SLABFREE(pr);
563 	}
564 
565 	return OK;
566 }
567 
568 /*===========================================================================*
569  *				map_free				     *
570  *===========================================================================*/
571 int map_free(struct vir_region *region)
572 {
573 	int r;
574 
575 	if((r=map_subfree(region, 0, region->length)) != OK) {
576 		printf("%d\n", __LINE__);
577 		return r;
578 	}
579 
580 	if(region->def_memtype->ev_delete)
581 		region->def_memtype->ev_delete(region);
582 	free(region->physblocks);
583 	region->physblocks = NULL;
584 	SLABFREE(region);
585 
586 	return OK;
587 }
588 
589 /*========================================================================*
590  *				map_free_proc				  *
591  *========================================================================*/
592 int map_free_proc(struct vmproc *vmp)
593 {
594 	struct vir_region *r;
595 
596 	while((r = region_search_root(&vmp->vm_regions_avl))) {
597 		SANITYCHECK(SCL_DETAIL);
598 #if SANITYCHECKS
599 		nocheck++;
600 #endif
601 		region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
602 		map_free(r);
603 #if SANITYCHECKS
604 		nocheck--;
605 #endif
606 		SANITYCHECK(SCL_DETAIL);
607 	}
608 
609 	region_init(&vmp->vm_regions_avl);
610 
611 	SANITYCHECK(SCL_FUNCTIONS);
612 
613 	return OK;
614 }
615 
616 /*===========================================================================*
617  *				map_lookup				     *
618  *===========================================================================*/
619 struct vir_region *map_lookup(struct vmproc *vmp,
620 	vir_bytes offset, struct phys_region **physr)
621 {
622 	struct vir_region *r;
623 
624 	SANITYCHECK(SCL_FUNCTIONS);
625 
626 #if SANITYCHECKS
627 	if(!region_search_root(&vmp->vm_regions_avl))
628 		panic("process has no regions: %d", vmp->vm_endpoint);
629 #endif
630 
631 	if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
632 		vir_bytes ph;
633 		if(offset >= r->vaddr && offset < r->vaddr + r->length) {
634 			ph = offset - r->vaddr;
635 			if(physr) {
636 				*physr = physblock_get(r, ph);
637 				if(*physr) assert((*physr)->offset == ph);
638 			}
639 			return r;
640 		}
641 	}
642 
643 	SANITYCHECK(SCL_FUNCTIONS);
644 
645 	return NULL;
646 }
647 
648 u32_t vrallocflags(u32_t flags)
649 {
650 	u32_t allocflags = 0;
651 
652 	if(flags & VR_PHYS64K)
653 		allocflags |= PAF_ALIGN64K;
654 	if(flags & VR_LOWER16MB)
655 		allocflags |= PAF_LOWER16MB;
656 	if(flags & VR_LOWER1MB)
657 		allocflags |= PAF_LOWER1MB;
658 	if(!(flags & VR_UNINITIALIZED))
659 		allocflags |= PAF_CLEAR;
660 
661 	return allocflags;
662 }
663 
664 /*===========================================================================*
665  *				map_pf			     *
666  *===========================================================================*/
667 int map_pf(struct vmproc *vmp,
668 	struct vir_region *region,
669 	vir_bytes offset,
670 	int write,
671 	vfs_callback_t pf_callback,
672 	void *state,
673 	int len,
674 	int *io)
675 {
676 	struct phys_region *ph;
677 	int r = OK;
678 
679 	offset -= offset % VM_PAGE_SIZE;
680 
681 /*	assert(offset >= 0); */ /* always true */
682 	assert(offset < region->length);
683 
684 	assert(!(region->vaddr % VM_PAGE_SIZE));
685 	assert(!(write && !(region->flags & VR_WRITABLE)));
686 
687 	SANITYCHECK(SCL_FUNCTIONS);
688 
689 	if(!(ph = physblock_get(region, offset))) {
690 		struct phys_block *pb;
691 
692 		/* New block. */
693 
694 		if(!(pb = pb_new(MAP_NONE))) {
695 			printf("map_pf: pb_new failed\n");
696 			return ENOMEM;
697 		}
698 
699 		if(!(ph = pb_reference(pb, offset, region,
700 			region->def_memtype))) {
701 			printf("map_pf: pb_reference failed\n");
702 			pb_free(pb);
703 			return ENOMEM;
704 		}
705 	}
706 
707 	assert(ph);
708 	assert(ph->ph);
709 
710 	/* If we're writing and the block is already
711 	 * writable, nothing to do.
712 	 */
713 
714 	assert(ph->memtype->writable);
715 
716 	if(!write || !ph->memtype->writable(ph)) {
717 		assert(ph->memtype->ev_pagefault);
718 		assert(ph->ph);
719 
720 		if((r = ph->memtype->ev_pagefault(vmp,
721 			region, ph, write, pf_callback, state, len, io)) == SUSPEND) {
722 			return SUSPEND;
723 		}
724 
725 		if(r != OK) {
726 #if 0
727 			printf("map_pf: pagefault in %s failed\n", ph->memtype->name);
728 #endif
729 			if(ph)
730 				pb_unreferenced(region, ph, 1);
731 			return r;
732 		}
733 
734 		assert(ph);
735 		assert(ph->ph);
736 		assert(ph->ph->phys != MAP_NONE);
737 	}
738 
739 	assert(ph->ph);
740 	assert(ph->ph->phys != MAP_NONE);
741 
742 	if((r = map_ph_writept(vmp, region, ph)) != OK) {
743 		printf("map_pf: writept failed\n");
744 		return r;
745 	}
746 
747 	SANITYCHECK(SCL_FUNCTIONS);
748 
749 #if SANITYCHECKS
750 	if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset,
751 		VM_PAGE_SIZE, write)) {
752 		panic("map_pf: pt_checkrange failed: %d", r);
753 	}
754 #endif
755 
756 	return r;
757 }
758 
759 int map_handle_memory(struct vmproc *vmp,
760 	struct vir_region *region, vir_bytes start_offset, vir_bytes length,
761 	int write, vfs_callback_t cb, void *state, int statelen)
762 {
763 	vir_bytes offset, lim;
764 	int r;
765 	int io = 0;
766 
767 	assert(length > 0);
768 	lim = start_offset + length;
769 	assert(lim > start_offset);
770 
771 	for(offset = start_offset; offset < lim; offset += VM_PAGE_SIZE)
772 		if((r = map_pf(vmp, region, offset, write,
773 		   cb, state, statelen, &io)) != OK)
774 			return r;
775 
776 	return OK;
777 }
778 
779 /*===========================================================================*
780  *				map_pin_memory      			     *
781  *===========================================================================*/
782 int map_pin_memory(struct vmproc *vmp)
783 {
784 	struct vir_region *vr;
785 	int r;
786 	region_iter iter;
787 	region_start_iter_least(&vmp->vm_regions_avl, &iter);
788 	/* Scan all memory regions. */
789 	while((vr = region_get_iter(&iter))) {
790 		/* Make sure region is mapped to physical memory and writable.*/
791 		r = map_handle_memory(vmp, vr, 0, vr->length, 1, NULL, 0, 0);
792 		if(r != OK) {
793 		    panic("map_pin_memory: map_handle_memory failed: %d", r);
794 		}
795 		region_incr_iter(&iter);
796 	}
797 	return OK;
798 }
799 
800 /*===========================================================================*
801  *				map_copy_region			     	*
802  *===========================================================================*/
803 struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
804 {
805 	/* map_copy_region creates a complete copy of the vir_region
806 	 * data structure, linking in the same phys_blocks directly,
807 	 * but all in limbo, i.e., the caller has to link the vir_region
808 	 * to a process. Therefore it doesn't increase the refcount in
809 	 * the phys_block; the caller has to do this once it's linked.
810 	 * The reason for this is to keep the sanity checks working
811 	 * within this function.
812 	 */
813 	struct vir_region *newvr;
814 	struct phys_region *ph;
815 	int r;
816 #if SANITYCHECKS
817 	unsigned int cr;
818 	cr = physregions(vr);
819 #endif
820 	vir_bytes p;
821 
822 	if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags, vr->def_memtype)))
823 		return NULL;
824 
825 	USE(newvr, newvr->parent = vmp;);
826 
827 	if(vr->def_memtype->ev_copy && (r=vr->def_memtype->ev_copy(vr, newvr)) != OK) {
828 		map_free(newvr);
829 		printf("VM: memtype-specific copy failed (%d)\n", r);
830 		return NULL;
831 	}
832 
833 	for(p = 0; p < phys_slot(vr->length); p++) {
834 		struct phys_region *newph;
835 
836 		if(!(ph = physblock_get(vr, p*VM_PAGE_SIZE))) continue;
837 		newph = pb_reference(ph->ph, ph->offset, newvr,
838 			vr->def_memtype);
839 
840 		if(!newph) { map_free(newvr); return NULL; }
841 
842 		if(ph->memtype->ev_reference)
843 			ph->memtype->ev_reference(ph, newph);
844 
845 #if SANITYCHECKS
846 		USE(newph, newph->written = 0;);
847 		assert(physregions(vr) == cr);
848 #endif
849 	}
850 
851 #if SANITYCHECKS
852 	assert(physregions(vr) == physregions(newvr));
853 #endif
854 
855 	return newvr;
856 }
857 
858 /*===========================================================================*
859  *				copy_abs2region			     	*
860  *===========================================================================*/
861 int copy_abs2region(phys_bytes absaddr, struct vir_region *destregion,
862 	phys_bytes offset, phys_bytes len)
863 
864 {
865 	assert(destregion);
866 	assert(destregion->physblocks);
867 	while(len > 0) {
868 		phys_bytes sublen, suboffset;
869 		struct phys_region *ph;
870 		assert(destregion);
871 		assert(destregion->physblocks);
872 		if(!(ph = physblock_get(destregion, offset))) {
873 			printf("VM: copy_abs2region: no phys region found (1).\n");
874 			return EFAULT;
875 		}
876 		assert(ph->offset <= offset);
877 		if(ph->offset+VM_PAGE_SIZE <= offset) {
878 			printf("VM: copy_abs2region: no phys region found (2).\n");
879 			return EFAULT;
880 		}
881 		suboffset = offset - ph->offset;
882 		assert(suboffset < VM_PAGE_SIZE);
883 		sublen = len;
884 		if(sublen > VM_PAGE_SIZE - suboffset)
885 			sublen = VM_PAGE_SIZE - suboffset;
886 		assert(suboffset + sublen <= VM_PAGE_SIZE);
887 		if(ph->ph->refcount != 1) {
888 			printf("VM: copy_abs2region: refcount not 1.\n");
889 			return EFAULT;
890 		}
891 
892 		if(sys_abscopy(absaddr, ph->ph->phys + suboffset, sublen) != OK) {
893 			printf("VM: copy_abs2region: abscopy failed.\n");
894 			return EFAULT;
895 		}
896 		absaddr += sublen;
897 		offset += sublen;
898 		len -= sublen;
899 	}
900 
901 	return OK;
902 }
903 
904 /*=========================================================================*
905  *				map_writept				*
906  *=========================================================================*/
907 int map_writept(struct vmproc *vmp)
908 {
909 	struct vir_region *vr;
910 	struct phys_region *ph;
911 	int r;
912 	region_iter v_iter;
913 	region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
914 
915 	while((vr = region_get_iter(&v_iter))) {
916 		vir_bytes p;
917 		for(p = 0; p < vr->length; p += VM_PAGE_SIZE) {
918 			if(!(ph = physblock_get(vr, p))) continue;
919 
920 			if((r=map_ph_writept(vmp, vr, ph)) != OK) {
921 				printf("VM: map_writept: failed\n");
922 				return r;
923 			}
924 		}
925 		region_incr_iter(&v_iter);
926 	}
927 
928 	return OK;
929 }
930 
931 /*========================================================================*
932  *			       map_proc_copy			     	  *
933  *========================================================================*/
934 int map_proc_copy(struct vmproc *dst, struct vmproc *src)
935 {
936 /* Copy all the memory regions from the src process to the dst process. */
937 	region_init(&dst->vm_regions_avl);
938 
939 	return map_proc_copy_from(dst, src, NULL);
940 }
941 
942 /*========================================================================*
943  *			     map_proc_copy_from			     	  *
944  *========================================================================*/
945 int map_proc_copy_from(struct vmproc *dst, struct vmproc *src,
946 	struct vir_region *start_src_vr)
947 {
948 	struct vir_region *vr;
949 	region_iter v_iter;
950 
951 	if(!start_src_vr)
952 		start_src_vr = region_search_least(&src->vm_regions_avl);
953 
954 	assert(start_src_vr);
955 	assert(start_src_vr->parent == src);
956 	region_start_iter(&src->vm_regions_avl, &v_iter,
957 		start_src_vr->vaddr, AVL_EQUAL);
958 	assert(region_get_iter(&v_iter) == start_src_vr);
959 
960 	/* Copy source regions after the destination's last region (if any). */
961 
962 	SANITYCHECK(SCL_FUNCTIONS);
963 
964 	while((vr = region_get_iter(&v_iter))) {
965 		struct vir_region *newvr;
966 		if(!(newvr = map_copy_region(dst, vr))) {
967 			map_free_proc(dst);
968 			return ENOMEM;
969 		}
970 		region_insert(&dst->vm_regions_avl, newvr);
971 		assert(vr->length == newvr->length);
972 
973 #if SANITYCHECKS
974 	{
975 		vir_bytes vaddr;
976 		struct phys_region *orig_ph, *new_ph;
977 		assert(vr->physblocks != newvr->physblocks);
978 		for(vaddr = 0; vaddr < vr->length; vaddr += VM_PAGE_SIZE) {
979 			orig_ph = physblock_get(vr, vaddr);
980 			new_ph = physblock_get(newvr, vaddr);
981 			if(!orig_ph) { assert(!new_ph); continue;}
982 			assert(new_ph);
983 			assert(orig_ph != new_ph);
984 			assert(orig_ph->ph == new_ph->ph);
985 		}
986 	}
987 #endif
988 		region_incr_iter(&v_iter);
989 	}
990 
991 	map_writept(src);
992 	map_writept(dst);
993 
994 	SANITYCHECK(SCL_FUNCTIONS);
995 	return OK;
996 }
997 
998 int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
999 {
1000 	vir_bytes offset = v, limit, extralen;
1001 	struct vir_region *vr, *nextvr;
1002 	struct phys_region **newpr;
1003 	int newslots, prevslots, addedslots, r;
1004 
1005 	offset = roundup(offset, VM_PAGE_SIZE);
1006 
1007 	if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
1008 		printf("VM: nothing to extend\n");
1009 		return ENOMEM;
1010 	}
1011 
1012 	if(vr->vaddr + vr->length >= v) return OK;
1013 
1014 	limit = vr->vaddr + vr->length;
1015 
1016 	assert(vr->vaddr <= offset);
1017 	newslots = phys_slot(offset - vr->vaddr);
1018 	prevslots = phys_slot(vr->length);
1019 	assert(newslots >= prevslots);
1020 	addedslots = newslots - prevslots;
1021 	extralen = offset - limit;
1022 	assert(extralen > 0);
1023 
1024 	if((nextvr = getnextvr(vr))) {
1025 		assert(offset <= nextvr->vaddr);
1026 	}
1027 
1028 	if(nextvr && nextvr->vaddr < offset) {
1029 		printf("VM: can't grow into next region\n");
1030 		return ENOMEM;
1031 	}
1032 
1033 	if(!vr->def_memtype->ev_resize) {
1034 		if(!map_page_region(vmp, limit, 0, extralen,
1035 			VR_WRITABLE | VR_ANON,
1036 			0, &mem_type_anon)) {
1037 			printf("resize: couldn't put anon memory there\n");
1038 			return ENOMEM;
1039 		}
1040 		return OK;
1041 	}
1042 
1043 	if(!(newpr = realloc(vr->physblocks,
1044 		newslots * sizeof(struct phys_region *)))) {
1045 		printf("VM: map_region_extend_upto_v: realloc failed\n");
1046 		return ENOMEM;
1047 	}
1048 
1049 	vr->physblocks = newpr;
1050 	memset(vr->physblocks + prevslots, 0,
1051 		addedslots * sizeof(struct phys_region *));
1052 
1053 	r = vr->def_memtype->ev_resize(vmp, vr, offset - vr->vaddr);
1054 
1055 	return r;
1056 }
1057 
1058 /*========================================================================*
1059  *				map_unmap_region	     	  	*
1060  *========================================================================*/
1061 int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
1062 	vir_bytes offset, vir_bytes len)
1063 {
1064 /* Shrink the region by 'len' bytes, from the start. Unreference
1065  * memory it used to reference if any.
1066  */
1067 	vir_bytes regionstart;
1068 	int freeslots = phys_slot(len);
1069 
1070 	SANITYCHECK(SCL_FUNCTIONS);
1071 
1072 	if(offset+len > r->length || (len % VM_PAGE_SIZE)) {
1073 		printf("VM: bogus length 0x%lx\n", len);
1074 		return EINVAL;
1075 	}
1076 
1077 	regionstart = r->vaddr + offset;
1078 
1079 	/* unreference its memory */
1080 	map_subfree(r, offset, len);
1081 
1082 	/* if unmap was at start/end of this region, it actually shrinks */
1083 	if(r->length == len) {
1084 		/* Whole region disappears. Unlink and free it. */
1085 		region_remove(&vmp->vm_regions_avl, r->vaddr);
1086 		map_free(r);
1087 	} else if(offset == 0) {
1088 		struct phys_region *pr;
1089 		vir_bytes voffset;
1090 		int remslots;
1091 
1092 		if(!r->def_memtype->ev_lowshrink) {
1093 			printf("VM: low-shrinking not implemented for %s\n",
1094 				r->def_memtype->name);
1095 			return EINVAL;
1096 		}
1097 
1098 		if(r->def_memtype->ev_lowshrink(r, len) != OK) {
1099 			printf("VM: low-shrinking failed for %s\n",
1100 				r->def_memtype->name);
1101 			return EINVAL;
1102 		}
1103 
1104 		region_remove(&vmp->vm_regions_avl, r->vaddr);
1105 
1106 		USE(r,
1107 		r->vaddr += len;);
1108 
1109 		remslots = phys_slot(r->length);
1110 
1111 		region_insert(&vmp->vm_regions_avl, r);
1112 
1113 		/* vaddr has increased; to make all the phys_regions
1114 		 * point to the same addresses, make them shrink by the
1115 		 * same amount.
1116 		 */
1117 		for(voffset = len; voffset < r->length;
1118 			voffset += VM_PAGE_SIZE) {
1119 			if(!(pr = physblock_get(r, voffset))) continue;
1120 			assert(pr->offset >= offset);
1121 			assert(pr->offset >= len);
1122 			USE(pr, pr->offset -= len;);
1123 		}
1124 		if(remslots)
1125 			memmove(r->physblocks, r->physblocks + freeslots,
1126 				remslots * sizeof(struct phys_region *));
1127 		USE(r, r->length -= len;);
1128 	} else if(offset + len == r->length) {
1129 		assert(len <= r->length);
1130 		r->length -= len;
1131 	}
1132 
1133 	SANITYCHECK(SCL_DETAIL);
1134 
1135 	if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
1136 	  MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1137 	    printf("VM: map_unmap_region: pt_writemap failed\n");
1138 	    return ENOMEM;
1139 	}
1140 
1141 	SANITYCHECK(SCL_FUNCTIONS);
1142 
1143 	return OK;
1144 }
1145 
1146 static int split_region(struct vmproc *vmp, struct vir_region *vr,
1147 	struct vir_region **vr1, struct vir_region **vr2, vir_bytes split_len)
1148 {
1149 	struct vir_region *r1 = NULL, *r2 = NULL;
1150 	vir_bytes rem_len = vr->length - split_len;
1151 	int slots1, slots2;
1152 	vir_bytes voffset;
1153 	int n1 = 0, n2 = 0;
1154 
1155 	assert(!(split_len % VM_PAGE_SIZE));
1156 	assert(!(rem_len % VM_PAGE_SIZE));
1157 	assert(!(vr->vaddr % VM_PAGE_SIZE));
1158 	assert(!(vr->length % VM_PAGE_SIZE));
1159 
1160 	if(!vr->def_memtype->ev_split) {
1161 		printf("VM: split region not implemented for %s\n",
1162 			vr->def_memtype->name);
1163 		sys_diagctl_stacktrace(vmp->vm_endpoint);
1164 		return EINVAL;
1165 	}
1166 
1167 	slots1 = phys_slot(split_len);
1168 	slots2 = phys_slot(rem_len);
1169 
1170 	if(!(r1 = region_new(vmp, vr->vaddr, split_len, vr->flags,
1171 		vr->def_memtype))) {
1172 		goto bail;
1173 	}
1174 
1175 	if(!(r2 = region_new(vmp, vr->vaddr+split_len, rem_len, vr->flags,
1176 		vr->def_memtype))) {
1177 		map_free(r1);
1178 		goto bail;
1179 	}
1180 
1181 	for(voffset = 0; voffset < r1->length; voffset += VM_PAGE_SIZE) {
1182 		struct phys_region *ph, *phn;
1183 		if(!(ph = physblock_get(vr, voffset))) continue;
1184 		if(!(phn = pb_reference(ph->ph, voffset, r1, ph->memtype)))
1185 			goto bail;
1186 		n1++;
1187 	}
1188 
1189 	for(voffset = 0; voffset < r2->length; voffset += VM_PAGE_SIZE) {
1190 		struct phys_region *ph, *phn;
1191 		if(!(ph = physblock_get(vr, split_len + voffset))) continue;
1192 		if(!(phn = pb_reference(ph->ph, voffset, r2, ph->memtype)))
1193 			goto bail;
1194 		n2++;
1195 	}
1196 
1197 	vr->def_memtype->ev_split(vmp, vr, r1, r2);
1198 
1199 	region_remove(&vmp->vm_regions_avl, vr->vaddr);
1200 	map_free(vr);
1201 	region_insert(&vmp->vm_regions_avl, r1);
1202 	region_insert(&vmp->vm_regions_avl, r2);
1203 
1204 	*vr1 = r1;
1205 	*vr2 = r2;
1206 
1207 	return OK;
1208 
1209   bail:
1210 	if(r1) map_free(r1);
1211 	if(r2) map_free(r2);
1212 
1213 	printf("split_region: failed\n");
1214 
1215 	return ENOMEM;
1216 }
1217 
1218 int map_unmap_range(struct vmproc *vmp, vir_bytes unmap_start, vir_bytes length)
1219 {
1220 	vir_bytes o = unmap_start % VM_PAGE_SIZE, unmap_limit;
1221 	region_iter v_iter;
1222 	struct vir_region *vr, *nextvr;
1223 
1224 	unmap_start -= o;
1225 	length += o;
1226 	length = roundup(length, VM_PAGE_SIZE);
1227 	unmap_limit = length + unmap_start;
1228 
1229 	if(length < VM_PAGE_SIZE) return EINVAL;
1230 	if(unmap_limit <= unmap_start) return EINVAL;
1231 
1232 	region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_LESS_EQUAL);
1233 
1234 	if(!(vr = region_get_iter(&v_iter))) {
1235 		region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_GREATER);
1236 		if(!(vr = region_get_iter(&v_iter))) {
1237 			return OK;
1238 		}
1239 	}
1240 
1241 	assert(vr);
1242 
1243 	for(; vr && vr->vaddr < unmap_limit; vr = nextvr) {
1244 		vir_bytes thislimit = vr->vaddr + vr->length;
1245 		vir_bytes this_unmap_start, this_unmap_limit;
1246 		vir_bytes remainlen;
1247 		int r;
1248 
1249 		region_incr_iter(&v_iter);
1250 		nextvr = region_get_iter(&v_iter);
1251 
1252 		assert(thislimit > vr->vaddr);
1253 
1254 		this_unmap_start = MAX(unmap_start, vr->vaddr);
1255 		this_unmap_limit = MIN(unmap_limit, thislimit);
1256 
1257 		if(this_unmap_start >= this_unmap_limit) continue;
1258 
1259 		if(this_unmap_start > vr->vaddr && this_unmap_limit < thislimit) {
1260 			struct vir_region *vr1, *vr2;
1261 			vir_bytes split_len = this_unmap_limit - vr->vaddr;
1262 			assert(split_len > 0);
1263 			assert(split_len < vr->length);
1264 			if((r=split_region(vmp, vr, &vr1, &vr2, split_len)) != OK) {
1265 				printf("VM: unmap split failed\n");
1266 				return r;
1267 			}
1268 			vr = vr1;
1269 			thislimit = vr->vaddr + vr->length;
1270 		}
1271 
1272 		remainlen = this_unmap_limit - vr->vaddr;
1273 
1274 		assert(this_unmap_start >= vr->vaddr);
1275 		assert(this_unmap_limit <= thislimit);
1276 		assert(remainlen > 0);
1277 
1278 		r = map_unmap_region(vmp, vr, this_unmap_start - vr->vaddr,
1279 			this_unmap_limit - this_unmap_start);
1280 
1281 		if(r != OK) {
1282 			printf("map_unmap_range: map_unmap_region failed\n");
1283 			return r;
1284 		}
1285 
1286 		region_start_iter(&vmp->vm_regions_avl, &v_iter, nextvr->vaddr, AVL_EQUAL);
1287 		assert(region_get_iter(&v_iter) == nextvr);
1288 	}
1289 
1290 	return OK;
1291 
1292 }
1293 
1294 /*========================================================================*
1295  *				map_get_phys				  *
1296  *========================================================================*/
1297 int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1298 {
1299 	struct vir_region *vr;
1300 
1301 	if (!(vr = map_lookup(vmp, addr, NULL)) ||
1302 		(vr->vaddr != addr))
1303 		return EINVAL;
1304 
1305 	if (!vr->def_memtype->regionid)
1306 		return EINVAL;
1307 
1308 	if(r)
1309 		*r = vr->def_memtype->regionid(vr);
1310 
1311 	return OK;
1312 }
1313 
1314 /*========================================================================*
1315  *				map_get_ref				  *
1316  *========================================================================*/
1317 int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1318 {
1319 	struct vir_region *vr;
1320 
1321 	if (!(vr = map_lookup(vmp, addr, NULL)) ||
1322 		(vr->vaddr != addr) || !vr->def_memtype->refcount)
1323 		return EINVAL;
1324 
1325 	if (cnt)
1326 		*cnt = vr->def_memtype->refcount(vr);
1327 
1328 	return OK;
1329 }
1330 
1331 void get_usage_info_kernel(struct vm_usage_info *vui)
1332 {
1333 	memset(vui, 0, sizeof(*vui));
1334 	vui->vui_total = kernel_boot_info.kernel_allocated_bytes +
1335 		kernel_boot_info.kernel_allocated_bytes_dynamic;
1336 }
1337 
1338 static void get_usage_info_vm(struct vm_usage_info *vui)
1339 {
1340 	memset(vui, 0, sizeof(*vui));
1341 	vui->vui_total = kernel_boot_info.vm_allocated_bytes +
1342 		get_vm_self_pages() * VM_PAGE_SIZE;
1343 }
1344 
1345 /*========================================================================*
1346  *				get_usage_info				  *
1347  *========================================================================*/
1348 void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1349 {
1350 	struct vir_region *vr;
1351 	struct phys_region *ph;
1352 	region_iter v_iter;
1353 	region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1354 	vir_bytes voffset;
1355 
1356 	memset(vui, 0, sizeof(*vui));
1357 
1358 	if(vmp->vm_endpoint == VM_PROC_NR) {
1359 		get_usage_info_vm(vui);
1360 		return;
1361 	}
1362 
1363 	if(vmp->vm_endpoint < 0) {
1364 		get_usage_info_kernel(vui);
1365 		return;
1366 	}
1367 
1368 	while((vr = region_get_iter(&v_iter))) {
1369 		for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1370 			if(!(ph = physblock_get(vr, voffset))) continue;
1371 			/* All present pages are counted towards the total. */
1372 			vui->vui_total += VM_PAGE_SIZE;
1373 
1374 			if (ph->ph->refcount > 1) {
1375 				/* Any page with a refcount > 1 is common. */
1376 				vui->vui_common += VM_PAGE_SIZE;
1377 
1378 				/* Any common, non-COW page is shared. */
1379 				if (vr->flags & VR_SHARED)
1380 					vui->vui_shared += VM_PAGE_SIZE;
1381 			}
1382 		}
1383 		region_incr_iter(&v_iter);
1384 	}
1385 }
1386 
1387 /*===========================================================================*
1388  *				get_region_info				     *
1389  *===========================================================================*/
1390 int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1391 	int max, vir_bytes *nextp)
1392 {
1393 	struct vir_region *vr;
1394 	vir_bytes next;
1395 	int count;
1396 	region_iter v_iter;
1397 
1398 	next = *nextp;
1399 
1400 	if (!max) return 0;
1401 
1402 	region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
1403 	if(!(vr = region_get_iter(&v_iter))) return 0;
1404 
1405 	for(count = 0; (vr = region_get_iter(&v_iter)) && count < max;
1406 	   region_incr_iter(&v_iter)) {
1407 		struct phys_region *ph1 = NULL, *ph2 = NULL;
1408 		vir_bytes voffset;
1409 
1410 		/* where to start on next iteration, regardless of what we find now */
1411 		next = vr->vaddr + vr->length;
1412 
1413 		/* Report part of the region that's actually in use. */
1414 
1415 		/* Get first and last phys_regions, if any */
1416 		for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1417 			struct phys_region *ph;
1418 			if(!(ph = physblock_get(vr, voffset))) continue;
1419 			if(!ph1) ph1 = ph;
1420 			ph2 = ph;
1421 		}
1422 
1423 		if(!ph1 || !ph2) {
1424 			printf("skipping empty region 0x%lx-0x%lx\n",
1425 				vr->vaddr, vr->vaddr+vr->length);
1426 			continue;
1427 		}
1428 
1429 		/* Report start+length of region starting from lowest use. */
1430 		vri->vri_addr = vr->vaddr + ph1->offset;
1431 		vri->vri_prot = PROT_READ;
1432 		vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
1433 
1434 		/* "AND" the provided protection with per-page protection. */
1435 		if (vr->flags & VR_WRITABLE)
1436 			vri->vri_prot |= PROT_WRITE;
1437 		count++;
1438 		vri++;
1439 	}
1440 
1441 	*nextp = next;
1442 	return count;
1443 }
1444 
1445 /*========================================================================*
1446  *				regionprintstats			  *
1447  *========================================================================*/
1448 void printregionstats(struct vmproc *vmp)
1449 {
1450 	struct vir_region *vr;
1451 	struct phys_region *pr;
1452 	vir_bytes used = 0, weighted = 0;
1453 	region_iter v_iter;
1454 	region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1455 
1456 	while((vr = region_get_iter(&v_iter))) {
1457 		vir_bytes voffset;
1458 		region_incr_iter(&v_iter);
1459 		if(vr->flags & VR_DIRECT)
1460 			continue;
1461 		for(voffset = 0; voffset < vr->length; voffset+=VM_PAGE_SIZE) {
1462 			if(!(pr = physblock_get(vr, voffset))) continue;
1463 			used += VM_PAGE_SIZE;
1464 			weighted += VM_PAGE_SIZE / pr->ph->refcount;
1465 		}
1466 	}
1467 
1468 	printf("%6lukB  %6lukB\n", used/1024, weighted/1024);
1469 
1470 	return;
1471 }
1472 
1473 void map_setparent(struct vmproc *vmp)
1474 {
1475 	region_iter iter;
1476 	struct vir_region *vr;
1477         region_start_iter_least(&vmp->vm_regions_avl, &iter);
1478         while((vr = region_get_iter(&iter))) {
1479                 USE(vr, vr->parent = vmp;);
1480                 region_incr_iter(&iter);
1481         }
1482 }
1483 
1484 unsigned int physregions(struct vir_region *vr)
1485 {
1486 	unsigned int n =  0;
1487 	vir_bytes voffset;
1488 	for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1489 		if(physblock_get(vr, voffset))
1490 			n++;
1491 	}
1492 	return n;
1493 }
1494