xref: /original-bsd/sys/i386/i386/pmap.c (revision 68d9582f)
1 /*-
2  * Copyright (c) 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)pmap.c	7.11 (Berkeley) 05/11/92
12  */
13 
14 /*
15  * Derived from hp300 version by Mike Hibler, this version by William
16  * Jolitz uses a recursive map [a pde points to the page directory] to
17  * map the page tables using the pagetables themselves. This is done to
18  * reduce the impact on kernel virtual memory for lots of sparse address
19  * space, and to reduce the cost of memory to each process.
20  *
21  *	Derived from: hp300/@(#)pmap.c	7.1 (Berkeley) 12/5/90
22  */
23 
24 /*
25  *	Reno i386 version, from Mike Hibler's hp300 version.
26  */
27 
28 /*
29  *	Manages physical address maps.
30  *
31  *	In addition to hardware address maps, this
32  *	module is called upon to provide software-use-only
33  *	maps which may or may not be stored in the same
34  *	form as hardware maps.  These pseudo-maps are
35  *	used to store intermediate results from copy
36  *	operations to and from address spaces.
37  *
38  *	Since the information managed by this module is
39  *	also stored by the logical address mapping module,
40  *	this module may throw away valid virtual-to-physical
41  *	mappings at almost any time.  However, invalidations
42  *	of virtual-to-physical mappings must be done as
43  *	requested.
44  *
45  *	In order to cope with hardware architectures which
46  *	make virtual-to-physical map invalidates expensive,
47  *	this module may delay invalidate or reduced protection
48  *	operations until such time as they are actually
49  *	necessary.  This module is given full information as
50  *	to which processors are currently using which maps,
51  *	and to when physical maps must be made correct.
52  */
53 
54 #include "param.h"
55 #include "proc.h"
56 #include "malloc.h"
57 #include "user.h"
58 
59 #include "vm/vm.h"
60 #include "vm/vm_kern.h"
61 #include "vm/vm_page.h"
62 /*#include "vm/vm_pageout.h"*/
63 
64 /*#include "machine/isa.h"*/
65 
66 /*
67  * Allocate various and sundry SYSMAPs used in the days of old VM
68  * and not yet converted.  XXX.
69  */
70 #define BSDVM_COMPAT	1
71 
72 #ifdef DEBUG
73 struct {
74 	int kernel;	/* entering kernel mapping */
75 	int user;	/* entering user mapping */
76 	int ptpneeded;	/* needed to allocate a PT page */
77 	int pwchange;	/* no mapping change, just wiring or protection */
78 	int wchange;	/* no mapping change, just wiring */
79 	int mchange;	/* was mapped but mapping to different page */
80 	int managed;	/* a managed page */
81 	int firstpv;	/* first mapping for this PA */
82 	int secondpv;	/* second mapping for this PA */
83 	int ci;		/* cache inhibited */
84 	int unmanaged;	/* not a managed page */
85 	int flushes;	/* cache flushes */
86 } enter_stats;
87 struct {
88 	int calls;
89 	int removes;
90 	int pvfirst;
91 	int pvsearch;
92 	int ptinvalid;
93 	int uflushes;
94 	int sflushes;
95 } remove_stats;
96 
97 int debugmap = 0;
98 int pmapdebug = 0;
99 #define PDB_FOLLOW	0x0001
100 #define PDB_INIT	0x0002
101 #define PDB_ENTER	0x0004
102 #define PDB_REMOVE	0x0008
103 #define PDB_CREATE	0x0010
104 #define PDB_PTPAGE	0x0020
105 #define PDB_CACHE	0x0040
106 #define PDB_BITS	0x0080
107 #define PDB_COLLECT	0x0100
108 #define PDB_PROTECT	0x0200
109 #define PDB_PDRTAB	0x0400
110 #define PDB_PARANOIA	0x2000
111 #define PDB_WIRING	0x4000
112 #define PDB_PVDUMP	0x8000
113 
114 int pmapvacflush = 0;
115 #define	PVF_ENTER	0x01
116 #define	PVF_REMOVE	0x02
117 #define	PVF_PROTECT	0x04
118 #define	PVF_TOTAL	0x80
119 #endif
120 
121 /*
122  * Get PDEs and PTEs for user/kernel address space
123  */
124 #define	pmap_pde(m, v)	(&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
125 
126 #define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
127 
128 #define pmap_pde_v(pte)		((pte)->pd_v)
129 #define pmap_pte_w(pte)		((pte)->pg_w)
130 /* #define pmap_pte_ci(pte)	((pte)->pg_ci) */
131 #define pmap_pte_m(pte)		((pte)->pg_m)
132 #define pmap_pte_u(pte)		((pte)->pg_u)
133 #define pmap_pte_v(pte)		((pte)->pg_v)
134 #define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))
135 #define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))
136 
137 /*
138  * Given a map and a machine independent protection code,
139  * convert to a vax protection code.
140  */
141 #define pte_prot(m, p)	(protection_codes[p])
142 int	protection_codes[8];
143 
144 struct pmap	kernel_pmap_store;
145 
146 vm_offset_t    	avail_start;	/* PA of first available physical page */
147 vm_offset_t	avail_end;	/* PA of last available physical page */
148 vm_size_t	mem_size;	/* memory size in bytes */
149 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
150 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
151 vm_offset_t	vm_first_phys;	/* PA of first managed page */
152 vm_offset_t	vm_last_phys;	/* PA just past last managed page */
153 int		i386pagesperpage;	/* PAGE_SIZE / I386_PAGE_SIZE */
154 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
155 char		*pmap_attributes;	/* reference and modify bits */
156 
157 boolean_t	pmap_testbit();
158 void		pmap_clear_modify();
159 
160 #if BSDVM_COMPAT
161 #include "msgbuf.h"
162 
163 /*
164  * All those kernel PT submaps that BSD is so fond of
165  */
166 struct pte	*CMAP1, *CMAP2, *mmap;
167 caddr_t		CADDR1, CADDR2, vmmap;
168 struct pte	*msgbufmap;
169 struct msgbuf	*msgbufp;
170 #endif
171 
172 void pmap_activate __P((pmap_t, struct pcb *));
173 
174 /*
175  *	Bootstrap the system enough to run with virtual memory.
176  *	Map the kernel's code and data, and allocate the system page table.
177  *
178  *	On the I386 this is called after mapping has already been enabled
179  *	and just syncs the pmap module with what has already been done.
180  *	[We can't call it easily with mapping off since the kernel is not
181  *	mapped with PA == VA, hence we would have to relocate every address
182  *	from the linked base (virtual) address 0xFE000000 to the actual
183  *	(physical) address starting relative to 0]
184  */
185 struct pte *pmap_pte();
186 
187 extern vm_offset_t	atdevbase;
188 void
189 pmap_bootstrap(firstaddr, loadaddr)
190 	vm_offset_t firstaddr;
191 	vm_offset_t loadaddr;
192 {
193 #if BSDVM_COMPAT
194 	vm_offset_t va;
195 	struct pte *pte;
196 #endif
197 	extern vm_offset_t maxmem, physmem;
198 extern int IdlePTD;
199 
200 
201 /* disable pageing in basemem for all machines until this cryptic comment
202  * can be explained
203  */
204 #if 1 ||	defined(ODYSSEUS) || defined(ARGO) || defined(CIRCE)
205 firstaddr=0x100000;	/* for some reason, basemem screws up on this machine */
206 #endif
207 printf("ps %x pe %x ", firstaddr, maxmem <<PG_SHIFT);
208 	avail_start = firstaddr;
209 	avail_end = maxmem << PG_SHIFT;
210 
211 	/* XXX: allow for msgbuf */
212 	avail_end -= i386_round_page(sizeof(struct msgbuf));
213 
214 	mem_size = physmem << PG_SHIFT;
215 	virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
216 	virtual_end = VM_MAX_KERNEL_ADDRESS;
217 	i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE;
218 
219 	/*
220 	 * Initialize protection array.
221 	 */
222 	i386_protection_init();
223 
224 #ifdef notdef
225 	/*
226 	 * Create Kernel page directory table and page maps.
227 	 * [ currently done in locore. i have wild and crazy ideas -wfj ]
228 	 */
229 	bzero(firstaddr, 4*NBPG);
230 	kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
231 	kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
232 
233 	firstaddr += NBPG;
234 	for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
235 		x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {
236 			struct pde *pde;
237 		pde = kernel_pmap->pm_pdir + x;
238 		*(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;
239 	}
240 #else
241 	kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD);
242 #endif
243 
244 
245 	simple_lock_init(&kernel_pmap->pm_lock);
246 	kernel_pmap->pm_count = 1;
247 
248 #if BSDVM_COMPAT
249 	/*
250 	 * Allocate all the submaps we need
251 	 */
252 #define	SYSMAP(c, p, v, n)	\
253 	v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);
254 
255 	va = virtual_avail;
256 	pte = pmap_pte(kernel_pmap, va);
257 
258 	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
259 	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
260 	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
261 	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
262 	virtual_avail = va;
263 #endif
264 
265 	/**(int *)PTD = 0;
266 	load_cr3(rcr3());*/
267 
268 }
269 
270 pmap_isvalidphys(addr) {
271 	if (addr < 0xa0000) return (1);
272 	if (addr >= 0x100000) return (1);
273 	return(0);
274 }
275 
276 /*
277  * Bootstrap memory allocator. This function allows for early dynamic
278  * memory allocation until the virtual memory system has been bootstrapped.
279  * After that point, either kmem_alloc or malloc should be used. This
280  * function works by stealing pages from the (to be) managed page pool,
281  * stealing virtual address space, then mapping the pages and zeroing them.
282  *
283  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
284  * it cannot be used, and will generate a panic if tried. Note that this
285  * memory will never be freed, and in essence it is wired down.
286  */
287 void *
288 pmap_bootstrap_alloc(size) {
289 	vm_offset_t val;
290 	int i;
291 	extern boolean_t vm_page_startup_initialized;
292 
293 	if (vm_page_startup_initialized)
294 		panic("pmap_bootstrap_alloc: called after startup initialized");
295 	size = round_page(size);
296 	val = virtual_avail;
297 
298 	/* deal with "hole incursion" */
299 	for (i = 0; i < size; i += PAGE_SIZE) {
300 
301 		while (!pmap_isvalidphys(avail_start))
302 				avail_start += PAGE_SIZE;
303 
304 		virtual_avail = pmap_map(virtual_avail, avail_start,
305 			avail_start + PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
306 		avail_start += PAGE_SIZE;
307 	}
308 
309 	blkclr ((caddr_t) val, size);
310 	return ((void *) val);
311 }
312 
313 /*
314  *	Initialize the pmap module.
315  *	Called by vm_init, to initialize any structures that the pmap
316  *	system needs to map virtual memory.
317  */
318 void
319 pmap_init(phys_start, phys_end)
320 	vm_offset_t	phys_start, phys_end;
321 {
322 	vm_offset_t	addr, addr2;
323 	vm_size_t	npg, s;
324 	int		rv;
325 	extern int KPTphys;
326 
327 #ifdef DEBUG
328 	if (pmapdebug & PDB_FOLLOW)
329 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
330 #endif
331 	/*
332 	 * Now that kernel map has been allocated, we can mark as
333 	 * unavailable regions which we have mapped in locore.
334 	 */
335 	addr = atdevbase;
336 	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
337 			   &addr, (0x100000-0xa0000), FALSE);
338 
339 	addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */;
340 	vm_object_reference(kernel_object);
341 	(void) vm_map_find(kernel_map, kernel_object, addr,
342 			   &addr, 2*NBPG, FALSE);
343 
344 	/*
345 	 * Allocate memory for random pmap data structures.  Includes the
346 	 * pv_head_table and pmap_attributes.
347 	 */
348 	npg = atop(phys_end - phys_start);
349 	s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg);
350 	s = round_page(s);
351 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
352 	pv_table = (pv_entry_t) addr;
353 	addr += sizeof(struct pv_entry) * npg;
354 	pmap_attributes = (char *) addr;
355 #ifdef DEBUG
356 	if (pmapdebug & PDB_INIT)
357 		printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",
358 		       s, npg, pv_table, pmap_attributes);
359 #endif
360 
361 	/*
362 	 * Now it is safe to enable pv_table recording.
363 	 */
364 	vm_first_phys = phys_start;
365 	vm_last_phys = phys_end;
366 	pmap_initialized = TRUE;
367 }
368 
369 /*
370  *	Used to map a range of physical addresses into kernel
371  *	virtual address space.
372  *
373  *	For now, VM is already on, we only need to map the
374  *	specified memory.
375  */
376 vm_offset_t
377 pmap_map(virt, start, end, prot)
378 	vm_offset_t	virt;
379 	vm_offset_t	start;
380 	vm_offset_t	end;
381 	int		prot;
382 {
383 #ifdef DEBUG
384 	if (pmapdebug & PDB_FOLLOW)
385 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
386 #endif
387 	while (start < end) {
388 		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
389 		virt += PAGE_SIZE;
390 		start += PAGE_SIZE;
391 	}
392 	return(virt);
393 }
394 
395 /*
396  *	Create and return a physical map.
397  *
398  *	If the size specified for the map
399  *	is zero, the map is an actual physical
400  *	map, and may be referenced by the
401  *	hardware.
402  *
403  *	If the size specified is non-zero,
404  *	the map will be used in software only, and
405  *	is bounded by that size.
406  *
407  * [ just allocate a ptd and mark it uninitialize -- should we track
408  *   with a table which process has which ptd? -wfj ]
409  */
410 
411 pmap_t
412 pmap_create(size)
413 	vm_size_t	size;
414 {
415 	register pmap_t pmap;
416 
417 #ifdef DEBUG
418 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
419 		printf("pmap_create(%x)\n", size);
420 #endif
421 	/*
422 	 * Software use map does not need a pmap
423 	 */
424 	if (size)
425 		return(NULL);
426 
427 	/* XXX: is it ok to wait here? */
428 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
429 #ifdef notifwewait
430 	if (pmap == NULL)
431 		panic("pmap_create: cannot allocate a pmap");
432 #endif
433 	bzero(pmap, sizeof(*pmap));
434 	pmap_pinit(pmap);
435 	return (pmap);
436 }
437 
438 /*
439  * Initialize a preallocated and zeroed pmap structure,
440  * such as one in a vmspace structure.
441  */
442 void
443 pmap_pinit(pmap)
444 	register struct pmap *pmap;
445 {
446 
447 #ifdef DEBUG
448 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
449 		pg("pmap_pinit(%x)\n", pmap);
450 #endif
451 
452 	/*
453 	 * No need to allocate page table space yet but we do need a
454 	 * valid page directory table.
455 	 */
456 	pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
457 
458 	/* wire in kernel global address entries */
459 	bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST,
460 		(KPTDI_LAST-KPTDI_FIRST+1)*4);
461 
462 	/* install self-referential address mapping entry */
463 	*(int *)(pmap->pm_pdir+PTDPTDI) =
464 		(int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir) | PG_V | PG_URKW;
465 
466 	pmap->pm_count = 1;
467 	simple_lock_init(&pmap->pm_lock);
468 }
469 
470 /*
471  *	Retire the given physical map from service.
472  *	Should only be called if the map contains
473  *	no valid mappings.
474  */
475 void
476 pmap_destroy(pmap)
477 	register pmap_t pmap;
478 {
479 	int count;
480 
481 #ifdef DEBUG
482 	if (pmapdebug & PDB_FOLLOW)
483 		printf("pmap_destroy(%x)\n", pmap);
484 #endif
485 	if (pmap == NULL)
486 		return;
487 
488 	simple_lock(&pmap->pm_lock);
489 	count = --pmap->pm_count;
490 	simple_unlock(&pmap->pm_lock);
491 	if (count == 0) {
492 		pmap_release(pmap);
493 		free((caddr_t)pmap, M_VMPMAP);
494 	}
495 }
496 
497 /*
498  * Release any resources held by the given physical map.
499  * Called when a pmap initialized by pmap_pinit is being released.
500  * Should only be called if the map contains no valid mappings.
501  */
502 void
503 pmap_release(pmap)
504 	register struct pmap *pmap;
505 {
506 
507 #ifdef DEBUG
508 	if (pmapdebug & PDB_FOLLOW)
509 		pg("pmap_release(%x)\n", pmap);
510 #endif
511 #ifdef notdef /* DIAGNOSTIC */
512 	/* count would be 0 from pmap_destroy... */
513 	simple_lock(&pmap->pm_lock);
514 	if (pmap->pm_count != 1)
515 		panic("pmap_release count");
516 #endif
517 	kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
518 }
519 
520 /*
521  *	Add a reference to the specified pmap.
522  */
523 void
524 pmap_reference(pmap)
525 	pmap_t	pmap;
526 {
527 #ifdef DEBUG
528 	if (pmapdebug & PDB_FOLLOW)
529 		pg("pmap_reference(%x)", pmap);
530 #endif
531 	if (pmap != NULL) {
532 		simple_lock(&pmap->pm_lock);
533 		pmap->pm_count++;
534 		simple_unlock(&pmap->pm_lock);
535 	}
536 }
537 
538 /*
539  *	Remove the given range of addresses from the specified map.
540  *
541  *	It is assumed that the start and end are properly
542  *	rounded to the page size.
543  */
544 void
545 pmap_remove(pmap, sva, eva)
546 	register struct pmap *pmap;
547 	vm_offset_t sva, eva;
548 {
549 	register vm_offset_t pa, va;
550 	register pt_entry_t *pte;
551 	register pv_entry_t pv, npv;
552 	register int ix;
553 	pmap_t ptpmap;
554 	int *pde, s, bits;
555 	boolean_t firstpage = TRUE;
556 	boolean_t flushcache = FALSE;
557 #ifdef DEBUG
558 	pt_entry_t opte;
559 
560 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
561 		printf("pmap_remove(%x, %x, %x)", pmap, sva, eva);
562 	if (eva >= USRSTACK && eva <= UPT_MAX_ADDRESS)
563 		nullop();
564 #endif
565 
566 	if (pmap == NULL)
567 		return;
568 
569 #ifdef DEBUG
570 	remove_stats.calls++;
571 #endif
572 	for (va = sva; va < eva; va += PAGE_SIZE) {
573 		/*
574 		 * Weed out invalid mappings.
575 		 * Note: we assume that the page directory table is
576 	 	 * always allocated, and in kernel virtual.
577 		 */
578 		if (!pmap_pde_v(pmap_pde(pmap, va)))
579 			continue;
580 
581 		pte = pmap_pte(pmap, va);
582 		if (pte == 0)
583 			continue;
584 		pa = pmap_pte_pa(pte);
585 		if (pa == 0)
586 			continue;
587 #ifdef DEBUG
588 		opte = *pte;
589 		remove_stats.removes++;
590 #endif
591 		/*
592 		 * Update statistics
593 		 */
594 		if (pmap_pte_w(pte))
595 			pmap->pm_stats.wired_count--;
596 		pmap->pm_stats.resident_count--;
597 
598 		/*
599 		 * Invalidate the PTEs.
600 		 * XXX: should cluster them up and invalidate as many
601 		 * as possible at once.
602 		 */
603 #ifdef DEBUG
604 		if (pmapdebug & PDB_REMOVE)
605 			printf("remove: inv %x ptes at %x(%x) ",
606 			       i386pagesperpage, pte, *(int *)pte);
607 #endif
608 		bits = ix = 0;
609 		do {
610 			bits |= *(int *)pte & (PG_U|PG_M);
611 			*(int *)pte++ = 0;
612 			/*TBIS(va + ix * I386_PAGE_SIZE);*/
613 		} while (++ix != i386pagesperpage);
614 		if (pmap == &curproc->p_vmspace->vm_pmap)
615 			pmap_activate(pmap, (struct pcb *)curproc->p_addr);
616 		/* are we current address space or kernel? */
617 		/*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
618 			|| pmap == kernel_pmap)
619 		load_cr3(curpcb->pcb_ptd);*/
620 		tlbflush();
621 
622 #ifdef needednotdone
623 reduce wiring count on page table pages as references drop
624 #endif
625 
626 		/*
627 		 * Remove from the PV table (raise IPL since we
628 		 * may be called at interrupt time).
629 		 */
630 		if (pa < vm_first_phys || pa >= vm_last_phys)
631 			continue;
632 		pv = pa_to_pvh(pa);
633 		s = splimp();
634 		/*
635 		 * If it is the first entry on the list, it is actually
636 		 * in the header and we must copy the following entry up
637 		 * to the header.  Otherwise we must search the list for
638 		 * the entry.  In either case we free the now unused entry.
639 		 */
640 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
641 			npv = pv->pv_next;
642 			if (npv) {
643 				*pv = *npv;
644 				free((caddr_t)npv, M_VMPVENT);
645 			} else
646 				pv->pv_pmap = NULL;
647 #ifdef DEBUG
648 			remove_stats.pvfirst++;
649 #endif
650 		} else {
651 			for (npv = pv->pv_next; npv; npv = npv->pv_next) {
652 #ifdef DEBUG
653 				remove_stats.pvsearch++;
654 #endif
655 				if (pmap == npv->pv_pmap && va == npv->pv_va)
656 					break;
657 				pv = npv;
658 			}
659 #ifdef DEBUG
660 			if (npv == NULL)
661 				panic("pmap_remove: PA not in pv_tab");
662 #endif
663 			pv->pv_next = npv->pv_next;
664 			free((caddr_t)npv, M_VMPVENT);
665 			pv = pa_to_pvh(pa);
666 		}
667 
668 #ifdef notdef
669 [tally number of pagetable pages, if sharing of ptpages adjust here]
670 #endif
671 		/*
672 		 * Update saved attributes for managed page
673 		 */
674 		pmap_attributes[pa_index(pa)] |= bits;
675 		splx(s);
676 	}
677 #ifdef notdef
678 [cache and tlb flushing, if needed]
679 #endif
680 }
681 
682 /*
683  *	Routine:	pmap_remove_all
684  *	Function:
685  *		Removes this physical page from
686  *		all physical maps in which it resides.
687  *		Reflects back modify bits to the pager.
688  */
689 void
690 pmap_remove_all(pa)
691 	vm_offset_t pa;
692 {
693 	register pv_entry_t pv;
694 	int s;
695 
696 #ifdef DEBUG
697 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
698 		printf("pmap_remove_all(%x)", pa);
699 	/*pmap_pvdump(pa);*/
700 #endif
701 	/*
702 	 * Not one of ours
703 	 */
704 	if (pa < vm_first_phys || pa >= vm_last_phys)
705 		return;
706 
707 	pv = pa_to_pvh(pa);
708 	s = splimp();
709 	/*
710 	 * Do it the easy way for now
711 	 */
712 	while (pv->pv_pmap != NULL) {
713 #ifdef DEBUG
714 		if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) ||
715 		    pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
716 			panic("pmap_remove_all: bad mapping");
717 #endif
718 		pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
719 	}
720 	splx(s);
721 }
722 
723 /*
724  *	Routine:	pmap_copy_on_write
725  *	Function:
726  *		Remove write privileges from all
727  *		physical maps for this physical page.
728  */
729 void
730 pmap_copy_on_write(pa)
731 	vm_offset_t pa;
732 {
733 #ifdef DEBUG
734 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
735 		printf("pmap_copy_on_write(%x)", pa);
736 #endif
737 	pmap_changebit(pa, PG_RO, TRUE);
738 }
739 
740 /*
741  *	Set the physical protection on the
742  *	specified range of this map as requested.
743  */
744 void
745 pmap_protect(pmap, sva, eva, prot)
746 	register pmap_t	pmap;
747 	vm_offset_t	sva, eva;
748 	vm_prot_t	prot;
749 {
750 	register pt_entry_t *pte;
751 	register vm_offset_t va;
752 	register int ix;
753 	int i386prot;
754 	boolean_t firstpage = TRUE;
755 
756 #ifdef DEBUG
757 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
758 		printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
759 #endif
760 	if (pmap == NULL)
761 		return;
762 
763 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
764 		pmap_remove(pmap, sva, eva);
765 		return;
766 	}
767 	if (prot & VM_PROT_WRITE)
768 		return;
769 
770 	for (va = sva; va < eva; va += PAGE_SIZE) {
771 		/*
772 		 * Page table page is not allocated.
773 		 * Skip it, we don't want to force allocation
774 		 * of unnecessary PTE pages just to set the protection.
775 		 */
776 		if (!pmap_pde_v(pmap_pde(pmap, va))) {
777 			/* XXX: avoid address wrap around */
778 			if (va >= i386_trunc_pdr((vm_offset_t)-1))
779 				break;
780 			va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE;
781 			continue;
782 		} else	pte = pmap_pte(pmap, va);
783 
784 		/*
785 		 * Page not valid.  Again, skip it.
786 		 * Should we do this?  Or set protection anyway?
787 		 */
788 		if (!pmap_pte_v(pte))
789 			continue;
790 
791 		ix = 0;
792 		i386prot = pte_prot(pmap, prot);
793 		if(va < UPT_MAX_ADDRESS)
794 			i386prot |= 2 /*PG_u*/;
795 		do {
796 			/* clear VAC here if PG_RO? */
797 			pmap_pte_set_prot(pte++, i386prot);
798 			/*TBIS(va + ix * I386_PAGE_SIZE);*/
799 		} while (++ix != i386pagesperpage);
800 	}
801 out:
802 	if (pmap == &curproc->p_vmspace->vm_pmap)
803 		pmap_activate(pmap, (struct pcb *)curproc->p_addr);
804 }
805 
806 /*
807  *	Insert the given physical page (p) at
808  *	the specified virtual address (v) in the
809  *	target physical map with the protection requested.
810  *
811  *	If specified, the page will be wired down, meaning
812  *	that the related pte can not be reclaimed.
813  *
814  *	NB:  This is the only routine which MAY NOT lazy-evaluate
815  *	or lose information.  That is, this routine must actually
816  *	insert this page into the given map NOW.
817  */
818 void
819 pmap_enter(pmap, va, pa, prot, wired)
820 	register pmap_t pmap;
821 	vm_offset_t va;
822 	register vm_offset_t pa;
823 	vm_prot_t prot;
824 	boolean_t wired;
825 {
826 	register pt_entry_t *pte;
827 	register int npte, ix;
828 	vm_offset_t opa;
829 	boolean_t cacheable = TRUE;
830 	boolean_t checkpv = TRUE;
831 
832 #ifdef DEBUG
833 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
834 		printf("pmap_enter(%x, %x, %x, %x, %x)",
835 		       pmap, va, pa, prot, wired);
836 	if(!pmap_isvalidphys(pa)) panic("invalid phys");
837 #endif
838 	if (pmap == NULL)
839 		return;
840 
841 	if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
842 	/* also, should not muck with PTD va! */
843 
844 #ifdef DEBUG
845 	if (pmap == kernel_pmap)
846 		enter_stats.kernel++;
847 	else
848 		enter_stats.user++;
849 #endif
850 
851 	/*
852 	 * Page Directory table entry not valid, we need a new PT page
853 	 */
854 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
855 		pg("ptdi %x", pmap->pm_pdir[PTDPTDI]);
856 	}
857 
858 	pte = pmap_pte(pmap, va);
859 	opa = pmap_pte_pa(pte);
860 #ifdef DEBUG
861 	if (pmapdebug & PDB_ENTER)
862 		printf("enter: pte %x, *pte %x ", pte, *(int *)pte);
863 #endif
864 
865 	/*
866 	 * Mapping has not changed, must be protection or wiring change.
867 	 */
868 	if (opa == pa) {
869 #ifdef DEBUG
870 		enter_stats.pwchange++;
871 #endif
872 		/*
873 		 * Wiring change, just update stats.
874 		 * We don't worry about wiring PT pages as they remain
875 		 * resident as long as there are valid mappings in them.
876 		 * Hence, if a user page is wired, the PT page will be also.
877 		 */
878 		if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
879 #ifdef DEBUG
880 			if (pmapdebug & PDB_ENTER)
881 				pg("enter: wiring change -> %x ", wired);
882 #endif
883 			if (wired)
884 				pmap->pm_stats.wired_count++;
885 			else
886 				pmap->pm_stats.wired_count--;
887 #ifdef DEBUG
888 			enter_stats.wchange++;
889 #endif
890 		}
891 		goto validate;
892 	}
893 
894 	/*
895 	 * Mapping has changed, invalidate old range and fall through to
896 	 * handle validating new mapping.
897 	 */
898 	if (opa) {
899 #ifdef DEBUG
900 		if (pmapdebug & PDB_ENTER)
901 			printf("enter: removing old mapping %x pa %x ", va, opa);
902 #endif
903 		pmap_remove(pmap, va, va + PAGE_SIZE);
904 #ifdef DEBUG
905 		enter_stats.mchange++;
906 #endif
907 	}
908 
909 	/*
910 	 * Enter on the PV list if part of our managed memory
911 	 * Note that we raise IPL while manipulating pv_table
912 	 * since pmap_enter can be called at interrupt time.
913 	 */
914 	if (pa >= vm_first_phys && pa < vm_last_phys) {
915 		register pv_entry_t pv, npv;
916 		int s;
917 
918 #ifdef DEBUG
919 		enter_stats.managed++;
920 #endif
921 		pv = pa_to_pvh(pa);
922 		s = splimp();
923 #ifdef DEBUG
924 		if (pmapdebug & PDB_ENTER)
925 			printf("enter: pv at %x: %x/%x/%x ",
926 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
927 #endif
928 		/*
929 		 * No entries yet, use header as the first entry
930 		 */
931 		if (pv->pv_pmap == NULL) {
932 #ifdef DEBUG
933 			enter_stats.firstpv++;
934 #endif
935 			pv->pv_va = va;
936 			pv->pv_pmap = pmap;
937 			pv->pv_next = NULL;
938 			pv->pv_flags = 0;
939 		}
940 		/*
941 		 * There is at least one other VA mapping this page.
942 		 * Place this entry after the header.
943 		 */
944 		else {
945 			/*printf("second time: ");*/
946 #ifdef DEBUG
947 			for (npv = pv; npv; npv = npv->pv_next)
948 				if (pmap == npv->pv_pmap && va == npv->pv_va)
949 					panic("pmap_enter: already in pv_tab");
950 #endif
951 			npv = (pv_entry_t)
952 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
953 			npv->pv_va = va;
954 			npv->pv_pmap = pmap;
955 			npv->pv_next = pv->pv_next;
956 			pv->pv_next = npv;
957 #ifdef DEBUG
958 			if (!npv->pv_next)
959 				enter_stats.secondpv++;
960 #endif
961 		splx(s);
962 		}
963 	}
964 	/*
965 	 * Assumption: if it is not part of our managed memory
966 	 * then it must be device memory which may be volitile.
967 	 */
968 	if (pmap_initialized) {
969 		checkpv = cacheable = FALSE;
970 #ifdef DEBUG
971 		enter_stats.unmanaged++;
972 #endif
973 	}
974 
975 	/*
976 	 * Increment counters
977 	 */
978 	pmap->pm_stats.resident_count++;
979 	if (wired)
980 		pmap->pm_stats.wired_count++;
981 
982 validate:
983 	/*
984 	 * Now validate mapping with desired protection/wiring.
985 	 * Assume uniform modified and referenced status for all
986 	 * I386 pages in a MACH page.
987 	 */
988 	npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
989 	npte |= (*(int *)pte & (PG_M|PG_U));
990 	if (wired)
991 		npte |= PG_W;
992 	if(va < UPT_MIN_ADDRESS)
993 		npte |= PG_u;
994 	else if(va < UPT_MAX_ADDRESS)
995 		npte |= PG_u | PG_RW;
996 #ifdef DEBUG
997 	if (pmapdebug & PDB_ENTER)
998 		printf("enter: new pte value %x ", npte);
999 #endif
1000 	ix = 0;
1001 	do {
1002 		*(int *)pte++ = npte;
1003 		/*TBIS(va);*/
1004 		npte += I386_PAGE_SIZE;
1005 		va += I386_PAGE_SIZE;
1006 	} while (++ix != i386pagesperpage);
1007 	pte--;
1008 #ifdef DEBUGx
1009 cache, tlb flushes
1010 #endif
1011 /*pads(pmap);*/
1012 	/*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/
1013 	tlbflush();
1014 }
1015 
1016 /*
1017  *      pmap_page_protect:
1018  *
1019  *      Lower the permission for all mappings to a given page.
1020  */
1021 void
1022 pmap_page_protect(phys, prot)
1023         vm_offset_t     phys;
1024         vm_prot_t       prot;
1025 {
1026         switch (prot) {
1027         case VM_PROT_READ:
1028         case VM_PROT_READ|VM_PROT_EXECUTE:
1029                 pmap_copy_on_write(phys);
1030                 break;
1031         case VM_PROT_ALL:
1032                 break;
1033         default:
1034                 pmap_remove_all(phys);
1035                 break;
1036         }
1037 }
1038 
1039 /*
1040  *	Routine:	pmap_change_wiring
1041  *	Function:	Change the wiring attribute for a map/virtual-address
1042  *			pair.
1043  *	In/out conditions:
1044  *			The mapping must already exist in the pmap.
1045  */
1046 void
1047 pmap_change_wiring(pmap, va, wired)
1048 	register pmap_t	pmap;
1049 	vm_offset_t	va;
1050 	boolean_t	wired;
1051 {
1052 	register pt_entry_t *pte;
1053 	register int ix;
1054 
1055 #ifdef DEBUG
1056 	if (pmapdebug & PDB_FOLLOW)
1057 		printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);
1058 #endif
1059 	if (pmap == NULL)
1060 		return;
1061 
1062 	pte = pmap_pte(pmap, va);
1063 #ifdef DEBUG
1064 	/*
1065 	 * Page table page is not allocated.
1066 	 * Should this ever happen?  Ignore it for now,
1067 	 * we don't want to force allocation of unnecessary PTE pages.
1068 	 */
1069 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
1070 		if (pmapdebug & PDB_PARANOIA)
1071 			pg("pmap_change_wiring: invalid PDE for %x ", va);
1072 		return;
1073 	}
1074 	/*
1075 	 * Page not valid.  Should this ever happen?
1076 	 * Just continue and change wiring anyway.
1077 	 */
1078 	if (!pmap_pte_v(pte)) {
1079 		if (pmapdebug & PDB_PARANOIA)
1080 			pg("pmap_change_wiring: invalid PTE for %x ", va);
1081 	}
1082 #endif
1083 	if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1084 		if (wired)
1085 			pmap->pm_stats.wired_count++;
1086 		else
1087 			pmap->pm_stats.wired_count--;
1088 	}
1089 	/*
1090 	 * Wiring is not a hardware characteristic so there is no need
1091 	 * to invalidate TLB.
1092 	 */
1093 	ix = 0;
1094 	do {
1095 		pmap_pte_set_w(pte++, wired);
1096 	} while (++ix != i386pagesperpage);
1097 }
1098 
1099 /*
1100  *	Routine:	pmap_pte
1101  *	Function:
1102  *		Extract the page table entry associated
1103  *		with the given map/virtual_address pair.
1104  * [ what about induced faults -wfj]
1105  */
1106 
1107 struct pte *pmap_pte(pmap, va)
1108 	register pmap_t	pmap;
1109 	vm_offset_t va;
1110 {
1111 
1112 #ifdef DEBUGx
1113 	if (pmapdebug & PDB_FOLLOW)
1114 		printf("pmap_pte(%x, %x) ->\n", pmap, va);
1115 #endif
1116 	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1117 
1118 		/* are we current address space or kernel? */
1119 		if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
1120 			|| pmap == kernel_pmap)
1121 			return ((struct pte *) vtopte(va));
1122 
1123 		/* otherwise, we are alternate address space */
1124 		else {
1125 			if (pmap->pm_pdir[PTDPTDI].pd_pfnum
1126 				!= APTDpde.pd_pfnum) {
1127 				APTDpde = pmap->pm_pdir[PTDPTDI];
1128 				tlbflush();
1129 			}
1130 			return((struct pte *) avtopte(va));
1131 		}
1132 	}
1133 	return(0);
1134 }
1135 
1136 /*
1137  *	Routine:	pmap_extract
1138  *	Function:
1139  *		Extract the physical page address associated
1140  *		with the given map/virtual_address pair.
1141  */
1142 
1143 vm_offset_t
1144 pmap_extract(pmap, va)
1145 	register pmap_t	pmap;
1146 	vm_offset_t va;
1147 {
1148 	register vm_offset_t pa;
1149 
1150 #ifdef DEBUGx
1151 	if (pmapdebug & PDB_FOLLOW)
1152 		pg("pmap_extract(%x, %x) -> ", pmap, va);
1153 #endif
1154 	pa = 0;
1155 	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1156 		pa = *(int *) pmap_pte(pmap, va);
1157 	}
1158 	if (pa)
1159 		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1160 #ifdef DEBUGx
1161 	if (pmapdebug & PDB_FOLLOW)
1162 		printf("%x\n", pa);
1163 #endif
1164 	return(pa);
1165 }
1166 
1167 /*
1168  *	Copy the range specified by src_addr/len
1169  *	from the source map to the range dst_addr/len
1170  *	in the destination map.
1171  *
1172  *	This routine is only advisory and need not do anything.
1173  */
1174 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1175 	pmap_t		dst_pmap;
1176 	pmap_t		src_pmap;
1177 	vm_offset_t	dst_addr;
1178 	vm_size_t	len;
1179 	vm_offset_t	src_addr;
1180 {
1181 #ifdef DEBUG
1182 	if (pmapdebug & PDB_FOLLOW)
1183 		printf("pmap_copy(%x, %x, %x, %x, %x)",
1184 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1185 #endif
1186 }
1187 
1188 /*
1189  *	Require that all active physical maps contain no
1190  *	incorrect entries NOW.  [This update includes
1191  *	forcing updates of any address map caching.]
1192  *
1193  *	Generally used to insure that a thread about
1194  *	to run will see a semantically correct world.
1195  */
1196 void pmap_update()
1197 {
1198 #ifdef DEBUG
1199 	if (pmapdebug & PDB_FOLLOW)
1200 		printf("pmap_update()");
1201 #endif
1202 	tlbflush();
1203 }
1204 
1205 /*
1206  *	Routine:	pmap_collect
1207  *	Function:
1208  *		Garbage collects the physical map system for
1209  *		pages which are no longer used.
1210  *		Success need not be guaranteed -- that is, there
1211  *		may well be pages which are not referenced, but
1212  *		others may be collected.
1213  *	Usage:
1214  *		Called by the pageout daemon when pages are scarce.
1215  * [ needs to be written -wfj ]
1216  */
1217 void
1218 pmap_collect(pmap)
1219 	pmap_t		pmap;
1220 {
1221 	register vm_offset_t pa;
1222 	register pv_entry_t pv;
1223 	register int *pte;
1224 	vm_offset_t kpa;
1225 	int s;
1226 
1227 #ifdef DEBUG
1228 	int *pde;
1229 	int opmapdebug;
1230 	printf("pmap_collect(%x) ", pmap);
1231 #endif
1232 	if (pmap != kernel_pmap)
1233 		return;
1234 
1235 }
1236 
1237 /* [ macro again?, should I force kstack into user map here? -wfj ] */
1238 void
1239 pmap_activate(pmap, pcbp)
1240 	register pmap_t pmap;
1241 	struct pcb *pcbp;
1242 {
1243 int x;
1244 #ifdef DEBUG
1245 	if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB))
1246 		pg("pmap_activate(%x, %x) ", pmap, pcbp);
1247 #endif
1248 	PMAP_ACTIVATE(pmap, pcbp);
1249 /*printf("pde ");
1250 for(x=0x3f6; x < 0x3fA; x++)
1251 	printf("%x ", pmap->pm_pdir[x]);*/
1252 /*pads(pmap);*/
1253 /*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/
1254 }
1255 
1256 /*
1257  *	pmap_zero_page zeros the specified (machine independent)
1258  *	page by mapping the page into virtual memory and using
1259  *	bzero to clear its contents, one machine dependent page
1260  *	at a time.
1261  */
1262 void
1263 pmap_zero_page(phys)
1264 	register vm_offset_t	phys;
1265 {
1266 	register int ix;
1267 
1268 #ifdef DEBUG
1269 	if (pmapdebug & PDB_FOLLOW)
1270 		printf("pmap_zero_page(%x)", phys);
1271 #endif
1272 	phys >>= PG_SHIFT;
1273 	ix = 0;
1274 	do {
1275 		clearseg(phys++);
1276 	} while (++ix != i386pagesperpage);
1277 }
1278 
1279 /*
1280  *	pmap_copy_page copies the specified (machine independent)
1281  *	page by mapping the page into virtual memory and using
1282  *	bcopy to copy the page, one machine dependent page at a
1283  *	time.
1284  */
1285 void
1286 pmap_copy_page(src, dst)
1287 	register vm_offset_t	src, dst;
1288 {
1289 	register int ix;
1290 
1291 #ifdef DEBUG
1292 	if (pmapdebug & PDB_FOLLOW)
1293 		printf("pmap_copy_page(%x, %x)", src, dst);
1294 #endif
1295 	src >>= PG_SHIFT;
1296 	dst >>= PG_SHIFT;
1297 	ix = 0;
1298 	do {
1299 		physcopyseg(src++, dst++);
1300 	} while (++ix != i386pagesperpage);
1301 }
1302 
1303 
1304 /*
1305  *	Routine:	pmap_pageable
1306  *	Function:
1307  *		Make the specified pages (by pmap, offset)
1308  *		pageable (or not) as requested.
1309  *
1310  *		A page which is not pageable may not take
1311  *		a fault; therefore, its page table entry
1312  *		must remain valid for the duration.
1313  *
1314  *		This routine is merely advisory; pmap_enter
1315  *		will specify that these pages are to be wired
1316  *		down (or not) as appropriate.
1317  */
1318 void
1319 pmap_pageable(pmap, sva, eva, pageable)
1320 	pmap_t		pmap;
1321 	vm_offset_t	sva, eva;
1322 	boolean_t	pageable;
1323 {
1324 #ifdef DEBUG
1325 	if (pmapdebug & PDB_FOLLOW)
1326 		printf("pmap_pageable(%x, %x, %x, %x)",
1327 		       pmap, sva, eva, pageable);
1328 #endif
1329 	/*
1330 	 * If we are making a PT page pageable then all valid
1331 	 * mappings must be gone from that page.  Hence it should
1332 	 * be all zeros and there is no need to clean it.
1333 	 * Assumptions:
1334 	 *	- we are called with only one page at a time
1335 	 *	- PT pages have only one pv_table entry
1336 	 */
1337 	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1338 		register pv_entry_t pv;
1339 		register vm_offset_t pa;
1340 
1341 #ifdef DEBUG
1342 		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1343 			printf("pmap_pageable(%x, %x, %x, %x)",
1344 			       pmap, sva, eva, pageable);
1345 #endif
1346 		/*if (!pmap_pde_v(pmap_pde(pmap, sva)))
1347 			return;*/
1348 		if(pmap_pte(pmap, sva) == 0)
1349 			return;
1350 		pa = pmap_pte_pa(pmap_pte(pmap, sva));
1351 		if (pa < vm_first_phys || pa >= vm_last_phys)
1352 			return;
1353 		pv = pa_to_pvh(pa);
1354 		/*if (!ispt(pv->pv_va))
1355 			return;*/
1356 #ifdef DEBUG
1357 		if (pv->pv_va != sva || pv->pv_next) {
1358 			pg("pmap_pageable: bad PT page va %x next %x\n",
1359 			       pv->pv_va, pv->pv_next);
1360 			return;
1361 		}
1362 #endif
1363 		/*
1364 		 * Mark it unmodified to avoid pageout
1365 		 */
1366 		pmap_clear_modify(pa);
1367 #ifdef needsomethinglikethis
1368 		if (pmapdebug & PDB_PTPAGE)
1369 			pg("pmap_pageable: PT page %x(%x) unmodified\n",
1370 			       sva, *(int *)pmap_pte(pmap, sva));
1371 		if (pmapdebug & PDB_WIRING)
1372 			pmap_check_wiring("pageable", sva);
1373 #endif
1374 	}
1375 }
1376 
1377 /*
1378  *	Clear the modify bits on the specified physical page.
1379  */
1380 
1381 void
1382 pmap_clear_modify(pa)
1383 	vm_offset_t	pa;
1384 {
1385 #ifdef DEBUG
1386 	if (pmapdebug & PDB_FOLLOW)
1387 		printf("pmap_clear_modify(%x)", pa);
1388 #endif
1389 	pmap_changebit(pa, PG_M, FALSE);
1390 }
1391 
1392 /*
1393  *	pmap_clear_reference:
1394  *
1395  *	Clear the reference bit on the specified physical page.
1396  */
1397 
1398 void pmap_clear_reference(pa)
1399 	vm_offset_t	pa;
1400 {
1401 #ifdef DEBUG
1402 	if (pmapdebug & PDB_FOLLOW)
1403 		printf("pmap_clear_reference(%x)", pa);
1404 #endif
1405 	pmap_changebit(pa, PG_U, FALSE);
1406 }
1407 
1408 /*
1409  *	pmap_is_referenced:
1410  *
1411  *	Return whether or not the specified physical page is referenced
1412  *	by any physical maps.
1413  */
1414 
1415 boolean_t
1416 pmap_is_referenced(pa)
1417 	vm_offset_t	pa;
1418 {
1419 #ifdef DEBUG
1420 	if (pmapdebug & PDB_FOLLOW) {
1421 		boolean_t rv = pmap_testbit(pa, PG_U);
1422 		printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]);
1423 		return(rv);
1424 	}
1425 #endif
1426 	return(pmap_testbit(pa, PG_U));
1427 }
1428 
1429 /*
1430  *	pmap_is_modified:
1431  *
1432  *	Return whether or not the specified physical page is modified
1433  *	by any physical maps.
1434  */
1435 
1436 boolean_t
1437 pmap_is_modified(pa)
1438 	vm_offset_t	pa;
1439 {
1440 #ifdef DEBUG
1441 	if (pmapdebug & PDB_FOLLOW) {
1442 		boolean_t rv = pmap_testbit(pa, PG_M);
1443 		printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]);
1444 		return(rv);
1445 	}
1446 #endif
1447 	return(pmap_testbit(pa, PG_M));
1448 }
1449 
1450 vm_offset_t
1451 pmap_phys_address(ppn)
1452 	int ppn;
1453 {
1454 	return(i386_ptob(ppn));
1455 }
1456 
1457 /*
1458  * Miscellaneous support routines follow
1459  */
1460 
1461 i386_protection_init()
1462 {
1463 	register int *kp, prot;
1464 
1465 	kp = protection_codes;
1466 	for (prot = 0; prot < 8; prot++) {
1467 		switch (prot) {
1468 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1469 			*kp++ = 0;
1470 			break;
1471 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1472 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1473 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1474 			*kp++ = PG_RO;
1475 			break;
1476 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1477 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1478 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1479 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1480 			*kp++ = PG_RW;
1481 			break;
1482 		}
1483 	}
1484 }
1485 
1486 static
1487 boolean_t
1488 pmap_testbit(pa, bit)
1489 	register vm_offset_t pa;
1490 	int bit;
1491 {
1492 	register pv_entry_t pv;
1493 	register int *pte, ix;
1494 	int s;
1495 
1496 	if (pa < vm_first_phys || pa >= vm_last_phys)
1497 		return(FALSE);
1498 
1499 	pv = pa_to_pvh(pa);
1500 	s = splimp();
1501 	/*
1502 	 * Check saved info first
1503 	 */
1504 	if (pmap_attributes[pa_index(pa)] & bit) {
1505 		splx(s);
1506 		return(TRUE);
1507 	}
1508 	/*
1509 	 * Not found, check current mappings returning
1510 	 * immediately if found.
1511 	 */
1512 	if (pv->pv_pmap != NULL) {
1513 		for (; pv; pv = pv->pv_next) {
1514 			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1515 			ix = 0;
1516 			do {
1517 				if (*pte++ & bit) {
1518 					splx(s);
1519 					return(TRUE);
1520 				}
1521 			} while (++ix != i386pagesperpage);
1522 		}
1523 	}
1524 	splx(s);
1525 	return(FALSE);
1526 }
1527 
1528 pmap_changebit(pa, bit, setem)
1529 	register vm_offset_t pa;
1530 	int bit;
1531 	boolean_t setem;
1532 {
1533 	register pv_entry_t pv;
1534 	register int *pte, npte, ix;
1535 	vm_offset_t va;
1536 	int s;
1537 	boolean_t firstpage = TRUE;
1538 
1539 #ifdef DEBUG
1540 	if (pmapdebug & PDB_BITS)
1541 		printf("pmap_changebit(%x, %x, %s)",
1542 		       pa, bit, setem ? "set" : "clear");
1543 #endif
1544 	if (pa < vm_first_phys || pa >= vm_last_phys)
1545 		return;
1546 
1547 	pv = pa_to_pvh(pa);
1548 	s = splimp();
1549 	/*
1550 	 * Clear saved attributes (modify, reference)
1551 	 */
1552 	if (!setem)
1553 		pmap_attributes[pa_index(pa)] &= ~bit;
1554 	/*
1555 	 * Loop over all current mappings setting/clearing as appropos
1556 	 * If setting RO do we need to clear the VAC?
1557 	 */
1558 	if (pv->pv_pmap != NULL) {
1559 #ifdef DEBUG
1560 		int toflush = 0;
1561 #endif
1562 		for (; pv; pv = pv->pv_next) {
1563 #ifdef DEBUG
1564 			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
1565 #endif
1566 			va = pv->pv_va;
1567 
1568                         /*
1569                          * XXX don't write protect pager mappings
1570                          */
1571                         if (bit == PG_RO) {
1572                                 extern vm_offset_t pager_sva, pager_eva;
1573 
1574                                 if (va >= pager_sva && va < pager_eva)
1575                                         continue;
1576                         }
1577 
1578 			pte = (int *) pmap_pte(pv->pv_pmap, va);
1579 			ix = 0;
1580 			do {
1581 				if (setem)
1582 					npte = *pte | bit;
1583 				else
1584 					npte = *pte & ~bit;
1585 				if (*pte != npte) {
1586 					*pte = npte;
1587 					/*TBIS(va);*/
1588 				}
1589 				va += I386_PAGE_SIZE;
1590 				pte++;
1591 			} while (++ix != i386pagesperpage);
1592 
1593 			if (pv->pv_pmap == &curproc->p_vmspace->vm_pmap)
1594 				pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr);
1595 		}
1596 #ifdef somethinglikethis
1597 		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
1598 			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
1599 				DCIA();
1600 			else if (toflush == 2)
1601 				DCIS();
1602 			else
1603 				DCIU();
1604 		}
1605 #endif
1606 	}
1607 	splx(s);
1608 }
1609 
1610 #ifdef DEBUG
1611 pmap_pvdump(pa)
1612 	vm_offset_t pa;
1613 {
1614 	register pv_entry_t pv;
1615 
1616 	printf("pa %x", pa);
1617 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
1618 		printf(" -> pmap %x, va %x, flags %x",
1619 		       pv->pv_pmap, pv->pv_va, pv->pv_flags);
1620 		pads(pv->pv_pmap);
1621 	}
1622 	printf(" ");
1623 }
1624 
1625 #ifdef notyet
1626 pmap_check_wiring(str, va)
1627 	char *str;
1628 	vm_offset_t va;
1629 {
1630 	vm_map_entry_t entry;
1631 	register int count, *pte;
1632 
1633 	va = trunc_page(va);
1634 	if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
1635 	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
1636 		return;
1637 
1638 	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
1639 		pg("wired_check: entry for %x not found\n", va);
1640 		return;
1641 	}
1642 	count = 0;
1643 	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
1644 		if (*pte)
1645 			count++;
1646 	if (entry->wired_count != count)
1647 		pg("*%s*: %x: w%d/a%d\n",
1648 		       str, va, entry->wired_count, count);
1649 }
1650 #endif
1651 
1652 /* print address space of pmap*/
1653 pads(pm) pmap_t pm; {
1654 	unsigned va, i, j;
1655 	struct pte *ptep;
1656 
1657 	if(pm == kernel_pmap) return;
1658 	for (i = 0; i < 1024; i++)
1659 		if(pm->pm_pdir[i].pd_v)
1660 			for (j = 0; j < 1024 ; j++) {
1661 				va = (i<<22)+(j<<12);
1662 				if (pm == kernel_pmap && va < 0xfe000000)
1663 						continue;
1664 				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
1665 						continue;
1666 				ptep = pmap_pte(pm, va);
1667 				if(pmap_pte_v(ptep))
1668 					printf("%x:%x ", va, *(int *)ptep);
1669 			} ;
1670 
1671 }
1672 #endif
1673