xref: /original-bsd/sys/i386/i386/pmap.c (revision e59fb703)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38  */
39 
40 /*
41  * Derived from hp300 version by Mike Hibler, this version by William
42  * Jolitz uses a recursive map [a pde points to the page directory] to
43  * map the page tables using the pagetables themselves. This is done to
44  * reduce the impact on kernel virtual memory for lots of sparse address
45  * space, and to reduce the cost of memory to each process.
46  *
47  *	Derived from: hp300/@(#)pmap.c	7.1 (Berkeley) 12/5/90
48  */
49 
50 /*
51  *	Reno i386 version, from Mike Hibler's hp300 version.
52  */
53 
54 /*
55  *	Manages physical address maps.
56  *
57  *	In addition to hardware address maps, this
58  *	module is called upon to provide software-use-only
59  *	maps which may or may not be stored in the same
60  *	form as hardware maps.  These pseudo-maps are
61  *	used to store intermediate results from copy
62  *	operations to and from address spaces.
63  *
64  *	Since the information managed by this module is
65  *	also stored by the logical address mapping module,
66  *	this module may throw away valid virtual-to-physical
67  *	mappings at almost any time.  However, invalidations
68  *	of virtual-to-physical mappings must be done as
69  *	requested.
70  *
71  *	In order to cope with hardware architectures which
72  *	make virtual-to-physical map invalidates expensive,
73  *	this module may delay invalidate or reduced protection
74  *	operations until such time as they are actually
75  *	necessary.  This module is given full information as
76  *	to which processors are currently using which maps,
77  *	and to when physical maps must be made correct.
78  */
79 
80 #include "param.h"
81 #include "proc.h"
82 #include "malloc.h"
83 #include "user.h"
84 
85 #include "vm/vm.h"
86 #include "vm/vm_kern.h"
87 #include "vm/vm_page.h"
88 /*#include "vm/vm_pageout.h"*/
89 
90 /*#include "machine/isa.h"*/
91 
92 /*
93  * Allocate various and sundry SYSMAPs used in the days of old VM
94  * and not yet converted.  XXX.
95  */
96 #define BSDVM_COMPAT	1
97 
98 #ifdef DEBUG
99 struct {
100 	int kernel;	/* entering kernel mapping */
101 	int user;	/* entering user mapping */
102 	int ptpneeded;	/* needed to allocate a PT page */
103 	int pwchange;	/* no mapping change, just wiring or protection */
104 	int wchange;	/* no mapping change, just wiring */
105 	int mchange;	/* was mapped but mapping to different page */
106 	int managed;	/* a managed page */
107 	int firstpv;	/* first mapping for this PA */
108 	int secondpv;	/* second mapping for this PA */
109 	int ci;		/* cache inhibited */
110 	int unmanaged;	/* not a managed page */
111 	int flushes;	/* cache flushes */
112 } enter_stats;
113 struct {
114 	int calls;
115 	int removes;
116 	int pvfirst;
117 	int pvsearch;
118 	int ptinvalid;
119 	int uflushes;
120 	int sflushes;
121 } remove_stats;
122 
123 int debugmap = 0;
124 int pmapdebug = 0;
125 #define PDB_FOLLOW	0x0001
126 #define PDB_INIT	0x0002
127 #define PDB_ENTER	0x0004
128 #define PDB_REMOVE	0x0008
129 #define PDB_CREATE	0x0010
130 #define PDB_PTPAGE	0x0020
131 #define PDB_CACHE	0x0040
132 #define PDB_BITS	0x0080
133 #define PDB_COLLECT	0x0100
134 #define PDB_PROTECT	0x0200
135 #define PDB_PDRTAB	0x0400
136 #define PDB_PARANOIA	0x2000
137 #define PDB_WIRING	0x4000
138 #define PDB_PVDUMP	0x8000
139 
140 int pmapvacflush = 0;
141 #define	PVF_ENTER	0x01
142 #define	PVF_REMOVE	0x02
143 #define	PVF_PROTECT	0x04
144 #define	PVF_TOTAL	0x80
145 #endif
146 
147 /*
148  * Get PDEs and PTEs for user/kernel address space
149  */
150 #define	pmap_pde(m, v)	(&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
151 
152 #define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
153 
154 #define pmap_pde_v(pte)		((pte)->pd_v)
155 #define pmap_pte_w(pte)		((pte)->pg_w)
156 /* #define pmap_pte_ci(pte)	((pte)->pg_ci) */
157 #define pmap_pte_m(pte)		((pte)->pg_m)
158 #define pmap_pte_u(pte)		((pte)->pg_u)
159 #define pmap_pte_v(pte)		((pte)->pg_v)
160 #define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))
161 #define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))
162 
163 /*
164  * Given a map and a machine independent protection code,
165  * convert to a vax protection code.
166  */
167 #define pte_prot(m, p)	(protection_codes[p])
168 int	protection_codes[8];
169 
170 struct pmap	kernel_pmap_store;
171 pmap_t		kernel_pmap;
172 
173 vm_offset_t    	avail_start;	/* PA of first available physical page */
174 vm_offset_t	avail_end;	/* PA of last available physical page */
175 vm_size_t	mem_size;	/* memory size in bytes */
176 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
177 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
178 vm_offset_t	vm_first_phys;	/* PA of first managed page */
179 vm_offset_t	vm_last_phys;	/* PA just past last managed page */
180 int		i386pagesperpage;	/* PAGE_SIZE / I386_PAGE_SIZE */
181 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
182 char		*pmap_attributes;	/* reference and modify bits */
183 
184 boolean_t	pmap_testbit();
185 void		pmap_clear_modify();
186 
187 #if BSDVM_COMPAT
188 #include "msgbuf.h"
189 
190 /*
191  * All those kernel PT submaps that BSD is so fond of
192  */
193 struct pte	*CMAP1, *CMAP2, *mmap;
194 caddr_t		CADDR1, CADDR2, vmmap;
195 struct pte	*msgbufmap;
196 struct msgbuf	*msgbufp;
197 #endif
198 
199 void pmap_activate __P((pmap_t, struct pcb *));
200 
201 /*
202  *	Bootstrap the system enough to run with virtual memory.
203  *	Map the kernel's code and data, and allocate the system page table.
204  *
205  *	On the I386 this is called after mapping has already been enabled
206  *	and just syncs the pmap module with what has already been done.
207  *	[We can't call it easily with mapping off since the kernel is not
208  *	mapped with PA == VA, hence we would have to relocate every address
209  *	from the linked base (virtual) address 0xFE000000 to the actual
210  *	(physical) address starting relative to 0]
211  */
212 struct pte *pmap_pte();
213 
214 extern vm_offset_t	atdevbase;
215 void
216 pmap_bootstrap(firstaddr, loadaddr)
217 	vm_offset_t firstaddr;
218 	vm_offset_t loadaddr;
219 {
220 #if BSDVM_COMPAT
221 	vm_offset_t va;
222 	struct pte *pte;
223 #endif
224 	extern vm_offset_t maxmem, physmem;
225 extern int IdlePTD;
226 
227 #if	defined(ODYSSEUS) || defined(ARGO) || defined(CIRCE)
228 firstaddr=0x100000;	/* for some reason, basemem screws up on this machine */
229 #endif
230 printf("ps %x pe %x ", firstaddr, maxmem <<PG_SHIFT);
231 	avail_start = firstaddr;
232 	avail_end = maxmem << PG_SHIFT;
233 
234 	/* XXX: allow for msgbuf */
235 	avail_end -= i386_round_page(sizeof(struct msgbuf));
236 
237 	mem_size = physmem << PG_SHIFT;
238 	virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
239 	virtual_end = VM_MAX_KERNEL_ADDRESS;
240 	i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE;
241 
242 	/*
243 	 * Initialize protection array.
244 	 */
245 	i386_protection_init();
246 
247 	/*
248 	 * The kernel's pmap is statically allocated so we don't
249 	 * have to use pmap_create, which is unlikely to work
250 	 * correctly at this part of the boot sequence.
251 	 */
252 	kernel_pmap = &kernel_pmap_store;
253 
254 #ifdef notdef
255 	/*
256 	 * Create Kernel page directory table and page maps.
257 	 * [ currently done in locore. i have wild and crazy ideas -wfj ]
258 	 */
259 	bzero(firstaddr, 4*NBPG);
260 	kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
261 	kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
262 
263 	firstaddr += NBPG;
264 	for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
265 		x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {
266 			struct pde *pde;
267 		pde = kernel_pmap->pm_pdir + x;
268 		*(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;
269 	}
270 #else
271 	kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD);
272 #endif
273 
274 
275 	simple_lock_init(&kernel_pmap->pm_lock);
276 	kernel_pmap->pm_count = 1;
277 
278 #if BSDVM_COMPAT
279 	/*
280 	 * Allocate all the submaps we need
281 	 */
282 #define	SYSMAP(c, p, v, n)	\
283 	v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);
284 
285 	va = virtual_avail;
286 	pte = pmap_pte(kernel_pmap, va);
287 
288 	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
289 	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
290 	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
291 	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
292 	virtual_avail = va;
293 #endif
294 
295 	/**(int *)PTD = 0;
296 	load_cr3(rcr3());*/
297 
298 }
299 
300 pmap_isvalidphys(addr) {
301 	if (addr < 0xa0000) return (1);
302 	if (addr >= 0x100000) return (1);
303 	return(0);
304 }
305 
306 /*
307  * Bootstrap memory allocator. This function allows for early dynamic
308  * memory allocation until the virtual memory system has been bootstrapped.
309  * After that point, either kmem_alloc or malloc should be used. This
310  * function works by stealing pages from the (to be) managed page pool,
311  * stealing virtual address space, then mapping the pages and zeroing them.
312  *
313  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
314  * it cannot be used, and will generate a panic if tried. Note that this
315  * memory will never be freed, and in essence it is wired down.
316  */
317 void *
318 pmap_bootstrap_alloc(size) {
319 	vm_offset_t val;
320 	int i;
321 	extern boolean_t vm_page_startup_initialized;
322 
323 	if (vm_page_startup_initialized)
324 		panic("pmap_bootstrap_alloc: called after startup initialized");
325 	size = round_page(size);
326 	val = virtual_avail;
327 
328 	/* deal with "hole incursion" */
329 	for (i = 0; i < size; i += PAGE_SIZE) {
330 
331 		while (!pmap_isvalidphys(avail_start))
332 				avail_start += PAGE_SIZE;
333 
334 		virtual_avail = pmap_map(virtual_avail, avail_start,
335 			avail_start + PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
336 		avail_start += PAGE_SIZE;
337 	}
338 
339 	blkclr ((caddr_t) val, size);
340 	return ((void *) val);
341 }
342 
343 /*
344  *	Initialize the pmap module.
345  *	Called by vm_init, to initialize any structures that the pmap
346  *	system needs to map virtual memory.
347  */
348 void
349 pmap_init(phys_start, phys_end)
350 	vm_offset_t	phys_start, phys_end;
351 {
352 	vm_offset_t	addr, addr2;
353 	vm_size_t	npg, s;
354 	int		rv;
355 	extern int KPTphys;
356 
357 #ifdef DEBUG
358 	if (pmapdebug & PDB_FOLLOW)
359 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
360 #endif
361 	/*
362 	 * Now that kernel map has been allocated, we can mark as
363 	 * unavailable regions which we have mapped in locore.
364 	 */
365 	addr = atdevbase;
366 	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
367 			   &addr, (0x100000-0xa0000), FALSE);
368 
369 	addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */;
370 	vm_object_reference(kernel_object);
371 	(void) vm_map_find(kernel_map, kernel_object, addr,
372 			   &addr, 2*NBPG, FALSE);
373 
374 	/*
375 	 * Allocate memory for random pmap data structures.  Includes the
376 	 * pv_head_table and pmap_attributes.
377 	 */
378 	npg = atop(phys_end - phys_start);
379 	s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg);
380 	s = round_page(s);
381 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
382 	pv_table = (pv_entry_t) addr;
383 	addr += sizeof(struct pv_entry) * npg;
384 	pmap_attributes = (char *) addr;
385 #ifdef DEBUG
386 	if (pmapdebug & PDB_INIT)
387 		printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",
388 		       s, npg, pv_table, pmap_attributes);
389 #endif
390 
391 	/*
392 	 * Now it is safe to enable pv_table recording.
393 	 */
394 	vm_first_phys = phys_start;
395 	vm_last_phys = phys_end;
396 	pmap_initialized = TRUE;
397 }
398 
399 /*
400  *	Used to map a range of physical addresses into kernel
401  *	virtual address space.
402  *
403  *	For now, VM is already on, we only need to map the
404  *	specified memory.
405  */
406 vm_offset_t
407 pmap_map(virt, start, end, prot)
408 	vm_offset_t	virt;
409 	vm_offset_t	start;
410 	vm_offset_t	end;
411 	int		prot;
412 {
413 #ifdef DEBUG
414 	if (pmapdebug & PDB_FOLLOW)
415 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
416 #endif
417 	while (start < end) {
418 		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
419 		virt += PAGE_SIZE;
420 		start += PAGE_SIZE;
421 	}
422 	return(virt);
423 }
424 
425 /*
426  *	Create and return a physical map.
427  *
428  *	If the size specified for the map
429  *	is zero, the map is an actual physical
430  *	map, and may be referenced by the
431  *	hardware.
432  *
433  *	If the size specified is non-zero,
434  *	the map will be used in software only, and
435  *	is bounded by that size.
436  *
437  * [ just allocate a ptd and mark it uninitialize -- should we track
438  *   with a table which process has which ptd? -wfj ]
439  */
440 
441 pmap_t
442 pmap_create(size)
443 	vm_size_t	size;
444 {
445 	register pmap_t pmap;
446 
447 #ifdef DEBUG
448 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
449 		printf("pmap_create(%x)\n", size);
450 #endif
451 	/*
452 	 * Software use map does not need a pmap
453 	 */
454 	if (size)
455 		return(NULL);
456 
457 	/* XXX: is it ok to wait here? */
458 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
459 #ifdef notifwewait
460 	if (pmap == NULL)
461 		panic("pmap_create: cannot allocate a pmap");
462 #endif
463 	bzero(pmap, sizeof(*pmap));
464 	pmap_pinit(pmap);
465 	return (pmap);
466 }
467 
468 /*
469  * Initialize a preallocated and zeroed pmap structure,
470  * such as one in a vmspace structure.
471  */
472 void
473 pmap_pinit(pmap)
474 	register struct pmap *pmap;
475 {
476 
477 #ifdef DEBUG
478 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
479 		pg("pmap_pinit(%x)\n", pmap);
480 #endif
481 
482 	/*
483 	 * No need to allocate page table space yet but we do need a
484 	 * valid page directory table.
485 	 */
486 	pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
487 
488 	/* wire in kernel global address entries */
489 	bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST,
490 		(KPTDI_LAST-KPTDI_FIRST+1)*4);
491 
492 	/* install self-referential address mapping entry */
493 	*(int *)(pmap->pm_pdir+PTDPTDI) =
494 		(int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir) | PG_V | PG_URKW;
495 
496 	pmap->pm_count = 1;
497 	simple_lock_init(&pmap->pm_lock);
498 }
499 
500 /*
501  *	Retire the given physical map from service.
502  *	Should only be called if the map contains
503  *	no valid mappings.
504  */
505 void
506 pmap_destroy(pmap)
507 	register pmap_t pmap;
508 {
509 	int count;
510 
511 #ifdef DEBUG
512 	if (pmapdebug & PDB_FOLLOW)
513 		printf("pmap_destroy(%x)\n", pmap);
514 #endif
515 	if (pmap == NULL)
516 		return;
517 
518 	simple_lock(&pmap->pm_lock);
519 	count = --pmap->pm_count;
520 	simple_unlock(&pmap->pm_lock);
521 	if (count == 0) {
522 		pmap_release(pmap);
523 		free((caddr_t)pmap, M_VMPMAP);
524 	}
525 }
526 
527 /*
528  * Release any resources held by the given physical map.
529  * Called when a pmap initialized by pmap_pinit is being released.
530  * Should only be called if the map contains no valid mappings.
531  */
532 void
533 pmap_release(pmap)
534 	register struct pmap *pmap;
535 {
536 
537 #ifdef DEBUG
538 	if (pmapdebug & PDB_FOLLOW)
539 		pg("pmap_release(%x)\n", pmap);
540 #endif
541 #ifdef notdef /* DIAGNOSTIC */
542 	/* count would be 0 from pmap_destroy... */
543 	simple_lock(&pmap->pm_lock);
544 	if (pmap->pm_count != 1)
545 		panic("pmap_release count");
546 #endif
547 	kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
548 }
549 
550 /*
551  *	Add a reference to the specified pmap.
552  */
553 void
554 pmap_reference(pmap)
555 	pmap_t	pmap;
556 {
557 #ifdef DEBUG
558 	if (pmapdebug & PDB_FOLLOW)
559 		pg("pmap_reference(%x)", pmap);
560 #endif
561 	if (pmap != NULL) {
562 		simple_lock(&pmap->pm_lock);
563 		pmap->pm_count++;
564 		simple_unlock(&pmap->pm_lock);
565 	}
566 }
567 
568 /*
569  *	Remove the given range of addresses from the specified map.
570  *
571  *	It is assumed that the start and end are properly
572  *	rounded to the page size.
573  */
574 void
575 pmap_remove(pmap, sva, eva)
576 	register struct pmap *pmap;
577 	vm_offset_t sva, eva;
578 {
579 	register vm_offset_t pa, va;
580 	register pt_entry_t *pte;
581 	register pv_entry_t pv, npv;
582 	register int ix;
583 	pmap_t ptpmap;
584 	int *pde, s, bits;
585 	boolean_t firstpage = TRUE;
586 	boolean_t flushcache = FALSE;
587 #ifdef DEBUG
588 	pt_entry_t opte;
589 
590 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
591 		printf("pmap_remove(%x, %x, %x)", pmap, sva, eva);
592 	if (eva >= USRSTACK && eva <= UPT_MAX_ADDRESS)
593 		nullop();
594 #endif
595 
596 	if (pmap == NULL)
597 		return;
598 
599 #ifdef DEBUG
600 	remove_stats.calls++;
601 #endif
602 	for (va = sva; va < eva; va += PAGE_SIZE) {
603 		/*
604 		 * Weed out invalid mappings.
605 		 * Note: we assume that the page directory table is
606 	 	 * always allocated, and in kernel virtual.
607 		 */
608 		if (!pmap_pde_v(pmap_pde(pmap, va)))
609 			continue;
610 
611 		pte = pmap_pte(pmap, va);
612 		if (pte == 0)
613 			continue;
614 		pa = pmap_pte_pa(pte);
615 		if (pa == 0)
616 			continue;
617 #ifdef DEBUG
618 		opte = *pte;
619 		remove_stats.removes++;
620 #endif
621 		/*
622 		 * Update statistics
623 		 */
624 		if (pmap_pte_w(pte))
625 			pmap->pm_stats.wired_count--;
626 		pmap->pm_stats.resident_count--;
627 
628 		/*
629 		 * Invalidate the PTEs.
630 		 * XXX: should cluster them up and invalidate as many
631 		 * as possible at once.
632 		 */
633 #ifdef DEBUG
634 		if (pmapdebug & PDB_REMOVE)
635 			printf("remove: inv %x ptes at %x(%x) ",
636 			       i386pagesperpage, pte, *(int *)pte);
637 #endif
638 		bits = ix = 0;
639 		do {
640 			bits |= *(int *)pte & (PG_U|PG_M);
641 			*(int *)pte++ = 0;
642 			/*TBIS(va + ix * I386_PAGE_SIZE);*/
643 		} while (++ix != i386pagesperpage);
644 		if (pmap == &curproc->p_vmspace->vm_pmap)
645 			pmap_activate(pmap, (struct pcb *)curproc->p_addr);
646 		/* are we current address space or kernel? */
647 		/*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
648 			|| pmap == kernel_pmap)
649 		load_cr3(curpcb->pcb_ptd);*/
650 		tlbflush();
651 
652 #ifdef needednotdone
653 reduce wiring count on page table pages as references drop
654 #endif
655 
656 		/*
657 		 * Remove from the PV table (raise IPL since we
658 		 * may be called at interrupt time).
659 		 */
660 		if (pa < vm_first_phys || pa >= vm_last_phys)
661 			continue;
662 		pv = pa_to_pvh(pa);
663 		s = splimp();
664 		/*
665 		 * If it is the first entry on the list, it is actually
666 		 * in the header and we must copy the following entry up
667 		 * to the header.  Otherwise we must search the list for
668 		 * the entry.  In either case we free the now unused entry.
669 		 */
670 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
671 			npv = pv->pv_next;
672 			if (npv) {
673 				*pv = *npv;
674 				free((caddr_t)npv, M_VMPVENT);
675 			} else
676 				pv->pv_pmap = NULL;
677 #ifdef DEBUG
678 			remove_stats.pvfirst++;
679 #endif
680 		} else {
681 			for (npv = pv->pv_next; npv; npv = npv->pv_next) {
682 #ifdef DEBUG
683 				remove_stats.pvsearch++;
684 #endif
685 				if (pmap == npv->pv_pmap && va == npv->pv_va)
686 					break;
687 				pv = npv;
688 			}
689 #ifdef DEBUG
690 			if (npv == NULL)
691 				panic("pmap_remove: PA not in pv_tab");
692 #endif
693 			pv->pv_next = npv->pv_next;
694 			free((caddr_t)npv, M_VMPVENT);
695 			pv = pa_to_pvh(pa);
696 		}
697 
698 #ifdef notdef
699 [tally number of pagetable pages, if sharing of ptpages adjust here]
700 #endif
701 		/*
702 		 * Update saved attributes for managed page
703 		 */
704 		pmap_attributes[pa_index(pa)] |= bits;
705 		splx(s);
706 	}
707 #ifdef notdef
708 [cache and tlb flushing, if needed]
709 #endif
710 }
711 
712 /*
713  *	Routine:	pmap_remove_all
714  *	Function:
715  *		Removes this physical page from
716  *		all physical maps in which it resides.
717  *		Reflects back modify bits to the pager.
718  */
719 void
720 pmap_remove_all(pa)
721 	vm_offset_t pa;
722 {
723 	register pv_entry_t pv;
724 	int s;
725 
726 #ifdef DEBUG
727 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
728 		printf("pmap_remove_all(%x)", pa);
729 	/*pmap_pvdump(pa);*/
730 #endif
731 	/*
732 	 * Not one of ours
733 	 */
734 	if (pa < vm_first_phys || pa >= vm_last_phys)
735 		return;
736 
737 	pv = pa_to_pvh(pa);
738 	s = splimp();
739 	/*
740 	 * Do it the easy way for now
741 	 */
742 	while (pv->pv_pmap != NULL) {
743 #ifdef DEBUG
744 		if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) ||
745 		    pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
746 			panic("pmap_remove_all: bad mapping");
747 #endif
748 		pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
749 	}
750 	splx(s);
751 }
752 
753 /*
754  *	Routine:	pmap_copy_on_write
755  *	Function:
756  *		Remove write privileges from all
757  *		physical maps for this physical page.
758  */
759 void
760 pmap_copy_on_write(pa)
761 	vm_offset_t pa;
762 {
763 #ifdef DEBUG
764 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
765 		printf("pmap_copy_on_write(%x)", pa);
766 #endif
767 	pmap_changebit(pa, PG_RO, TRUE);
768 }
769 
770 /*
771  *	Set the physical protection on the
772  *	specified range of this map as requested.
773  */
774 void
775 pmap_protect(pmap, sva, eva, prot)
776 	register pmap_t	pmap;
777 	vm_offset_t	sva, eva;
778 	vm_prot_t	prot;
779 {
780 	register pt_entry_t *pte;
781 	register vm_offset_t va;
782 	register int ix;
783 	int i386prot;
784 	boolean_t firstpage = TRUE;
785 
786 #ifdef DEBUG
787 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
788 		printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
789 #endif
790 	if (pmap == NULL)
791 		return;
792 
793 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
794 		pmap_remove(pmap, sva, eva);
795 		return;
796 	}
797 	if (prot & VM_PROT_WRITE)
798 		return;
799 
800 	for (va = sva; va < eva; va += PAGE_SIZE) {
801 		/*
802 		 * Page table page is not allocated.
803 		 * Skip it, we don't want to force allocation
804 		 * of unnecessary PTE pages just to set the protection.
805 		 */
806 		if (!pmap_pde_v(pmap_pde(pmap, va))) {
807 			/* XXX: avoid address wrap around */
808 			if (va >= i386_trunc_pdr((vm_offset_t)-1))
809 				break;
810 			va = i386_round_pdr(va + PAGE_SIZE);
811 			continue;
812 		} else	pte = pmap_pte(pmap, va);
813 
814 		/*
815 		 * Page not valid.  Again, skip it.
816 		 * Should we do this?  Or set protection anyway?
817 		 */
818 		if (!pmap_pte_v(pte))
819 			continue;
820 
821 		ix = 0;
822 		i386prot = pte_prot(pmap, prot);
823 		if(va < UPT_MAX_ADDRESS)
824 			i386prot |= 2 /*PG_u*/;
825 		do {
826 			/* clear VAC here if PG_RO? */
827 			pmap_pte_set_prot(pte++, i386prot);
828 			/*TBIS(va + ix * I386_PAGE_SIZE);*/
829 		} while (++ix != i386pagesperpage);
830 	}
831 out:
832 	if (pmap == &curproc->p_vmspace->vm_pmap)
833 		pmap_activate(pmap, (struct pcb *)curproc->p_addr);
834 }
835 
836 /*
837  *	Insert the given physical page (p) at
838  *	the specified virtual address (v) in the
839  *	target physical map with the protection requested.
840  *
841  *	If specified, the page will be wired down, meaning
842  *	that the related pte can not be reclaimed.
843  *
844  *	NB:  This is the only routine which MAY NOT lazy-evaluate
845  *	or lose information.  That is, this routine must actually
846  *	insert this page into the given map NOW.
847  */
848 void
849 pmap_enter(pmap, va, pa, prot, wired)
850 	register pmap_t pmap;
851 	vm_offset_t va;
852 	register vm_offset_t pa;
853 	vm_prot_t prot;
854 	boolean_t wired;
855 {
856 	register pt_entry_t *pte;
857 	register int npte, ix;
858 	vm_offset_t opa;
859 	boolean_t cacheable = TRUE;
860 	boolean_t checkpv = TRUE;
861 
862 #ifdef DEBUG
863 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
864 		printf("pmap_enter(%x, %x, %x, %x, %x)",
865 		       pmap, va, pa, prot, wired);
866 	if(!pmap_isvalidphys(pa)) panic("invalid phys");
867 #endif
868 	if (pmap == NULL)
869 		return;
870 
871 	if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
872 	/* also, should not muck with PTD va! */
873 
874 #ifdef DEBUG
875 	if (pmap == kernel_pmap)
876 		enter_stats.kernel++;
877 	else
878 		enter_stats.user++;
879 #endif
880 
881 	/*
882 	 * Page Directory table entry not valid, we need a new PT page
883 	 */
884 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
885 		pg("ptdi %x", pmap->pm_pdir[PTDPTDI]);
886 	}
887 
888 	pte = pmap_pte(pmap, va);
889 	opa = pmap_pte_pa(pte);
890 #ifdef DEBUG
891 	if (pmapdebug & PDB_ENTER)
892 		printf("enter: pte %x, *pte %x ", pte, *(int *)pte);
893 #endif
894 
895 	/*
896 	 * Mapping has not changed, must be protection or wiring change.
897 	 */
898 	if (opa == pa) {
899 #ifdef DEBUG
900 		enter_stats.pwchange++;
901 #endif
902 		/*
903 		 * Wiring change, just update stats.
904 		 * We don't worry about wiring PT pages as they remain
905 		 * resident as long as there are valid mappings in them.
906 		 * Hence, if a user page is wired, the PT page will be also.
907 		 */
908 		if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
909 #ifdef DEBUG
910 			if (pmapdebug & PDB_ENTER)
911 				pg("enter: wiring change -> %x ", wired);
912 #endif
913 			if (wired)
914 				pmap->pm_stats.wired_count++;
915 			else
916 				pmap->pm_stats.wired_count--;
917 #ifdef DEBUG
918 			enter_stats.wchange++;
919 #endif
920 		}
921 		goto validate;
922 	}
923 
924 	/*
925 	 * Mapping has changed, invalidate old range and fall through to
926 	 * handle validating new mapping.
927 	 */
928 	if (opa) {
929 #ifdef DEBUG
930 		if (pmapdebug & PDB_ENTER)
931 			printf("enter: removing old mapping %x pa %x ", va, opa);
932 #endif
933 		pmap_remove(pmap, va, va + PAGE_SIZE);
934 #ifdef DEBUG
935 		enter_stats.mchange++;
936 #endif
937 	}
938 
939 	/*
940 	 * Enter on the PV list if part of our managed memory
941 	 * Note that we raise IPL while manipulating pv_table
942 	 * since pmap_enter can be called at interrupt time.
943 	 */
944 	if (pa >= vm_first_phys && pa < vm_last_phys) {
945 		register pv_entry_t pv, npv;
946 		int s;
947 
948 #ifdef DEBUG
949 		enter_stats.managed++;
950 #endif
951 		pv = pa_to_pvh(pa);
952 		s = splimp();
953 #ifdef DEBUG
954 		if (pmapdebug & PDB_ENTER)
955 			printf("enter: pv at %x: %x/%x/%x ",
956 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
957 #endif
958 		/*
959 		 * No entries yet, use header as the first entry
960 		 */
961 		if (pv->pv_pmap == NULL) {
962 #ifdef DEBUG
963 			enter_stats.firstpv++;
964 #endif
965 			pv->pv_va = va;
966 			pv->pv_pmap = pmap;
967 			pv->pv_next = NULL;
968 			pv->pv_flags = 0;
969 		}
970 		/*
971 		 * There is at least one other VA mapping this page.
972 		 * Place this entry after the header.
973 		 */
974 		else {
975 			/*printf("second time: ");*/
976 #ifdef DEBUG
977 			for (npv = pv; npv; npv = npv->pv_next)
978 				if (pmap == npv->pv_pmap && va == npv->pv_va)
979 					panic("pmap_enter: already in pv_tab");
980 #endif
981 			npv = (pv_entry_t)
982 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
983 			npv->pv_va = va;
984 			npv->pv_pmap = pmap;
985 			npv->pv_next = pv->pv_next;
986 			pv->pv_next = npv;
987 #ifdef DEBUG
988 			if (!npv->pv_next)
989 				enter_stats.secondpv++;
990 #endif
991 		splx(s);
992 		}
993 	}
994 	/*
995 	 * Assumption: if it is not part of our managed memory
996 	 * then it must be device memory which may be volitile.
997 	 */
998 	if (pmap_initialized) {
999 		checkpv = cacheable = FALSE;
1000 #ifdef DEBUG
1001 		enter_stats.unmanaged++;
1002 #endif
1003 	}
1004 
1005 	/*
1006 	 * Increment counters
1007 	 */
1008 	pmap->pm_stats.resident_count++;
1009 	if (wired)
1010 		pmap->pm_stats.wired_count++;
1011 
1012 validate:
1013 	/*
1014 	 * Now validate mapping with desired protection/wiring.
1015 	 * Assume uniform modified and referenced status for all
1016 	 * I386 pages in a MACH page.
1017 	 */
1018 	npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1019 	npte |= (*(int *)pte & (PG_M|PG_U));
1020 	if (wired)
1021 		npte |= PG_W;
1022 	if(va < UPT_MIN_ADDRESS)
1023 		npte |= PG_u;
1024 	else if(va < UPT_MAX_ADDRESS)
1025 		npte |= PG_u | PG_RW;
1026 #ifdef DEBUG
1027 	if (pmapdebug & PDB_ENTER)
1028 		printf("enter: new pte value %x ", npte);
1029 #endif
1030 	ix = 0;
1031 	do {
1032 		*(int *)pte++ = npte;
1033 		/*TBIS(va);*/
1034 		npte += I386_PAGE_SIZE;
1035 		va += I386_PAGE_SIZE;
1036 	} while (++ix != i386pagesperpage);
1037 	pte--;
1038 #ifdef DEBUGx
1039 cache, tlb flushes
1040 #endif
1041 /*pads(pmap);*/
1042 	/*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/
1043 	tlbflush();
1044 }
1045 
1046 /*
1047  *      pmap_page_protect:
1048  *
1049  *      Lower the permission for all mappings to a given page.
1050  */
1051 void
1052 pmap_page_protect(phys, prot)
1053         vm_offset_t     phys;
1054         vm_prot_t       prot;
1055 {
1056         switch (prot) {
1057         case VM_PROT_READ:
1058         case VM_PROT_READ|VM_PROT_EXECUTE:
1059                 pmap_copy_on_write(phys);
1060                 break;
1061         case VM_PROT_ALL:
1062                 break;
1063         default:
1064                 pmap_remove_all(phys);
1065                 break;
1066         }
1067 }
1068 
1069 /*
1070  *	Routine:	pmap_change_wiring
1071  *	Function:	Change the wiring attribute for a map/virtual-address
1072  *			pair.
1073  *	In/out conditions:
1074  *			The mapping must already exist in the pmap.
1075  */
1076 void
1077 pmap_change_wiring(pmap, va, wired)
1078 	register pmap_t	pmap;
1079 	vm_offset_t	va;
1080 	boolean_t	wired;
1081 {
1082 	register pt_entry_t *pte;
1083 	register int ix;
1084 
1085 #ifdef DEBUG
1086 	if (pmapdebug & PDB_FOLLOW)
1087 		printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);
1088 #endif
1089 	if (pmap == NULL)
1090 		return;
1091 
1092 	pte = pmap_pte(pmap, va);
1093 #ifdef DEBUG
1094 	/*
1095 	 * Page table page is not allocated.
1096 	 * Should this ever happen?  Ignore it for now,
1097 	 * we don't want to force allocation of unnecessary PTE pages.
1098 	 */
1099 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
1100 		if (pmapdebug & PDB_PARANOIA)
1101 			pg("pmap_change_wiring: invalid PDE for %x ", va);
1102 		return;
1103 	}
1104 	/*
1105 	 * Page not valid.  Should this ever happen?
1106 	 * Just continue and change wiring anyway.
1107 	 */
1108 	if (!pmap_pte_v(pte)) {
1109 		if (pmapdebug & PDB_PARANOIA)
1110 			pg("pmap_change_wiring: invalid PTE for %x ", va);
1111 	}
1112 #endif
1113 	if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1114 		if (wired)
1115 			pmap->pm_stats.wired_count++;
1116 		else
1117 			pmap->pm_stats.wired_count--;
1118 	}
1119 	/*
1120 	 * Wiring is not a hardware characteristic so there is no need
1121 	 * to invalidate TLB.
1122 	 */
1123 	ix = 0;
1124 	do {
1125 		pmap_pte_set_w(pte++, wired);
1126 	} while (++ix != i386pagesperpage);
1127 }
1128 
1129 /*
1130  *	Routine:	pmap_pte
1131  *	Function:
1132  *		Extract the page table entry associated
1133  *		with the given map/virtual_address pair.
1134  * [ what about induced faults -wfj]
1135  */
1136 
1137 struct pte *pmap_pte(pmap, va)
1138 	register pmap_t	pmap;
1139 	vm_offset_t va;
1140 {
1141 
1142 #ifdef DEBUGx
1143 	if (pmapdebug & PDB_FOLLOW)
1144 		printf("pmap_pte(%x, %x) ->\n", pmap, va);
1145 #endif
1146 	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1147 
1148 		/* are we current address space or kernel? */
1149 		if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
1150 			|| pmap == kernel_pmap)
1151 			return ((struct pte *) vtopte(va));
1152 
1153 		/* otherwise, we are alternate address space */
1154 		else {
1155 			if (pmap->pm_pdir[PTDPTDI].pd_pfnum
1156 				!= APTDpde.pd_pfnum) {
1157 				APTDpde = pmap->pm_pdir[PTDPTDI];
1158 				tlbflush();
1159 			}
1160 			return((struct pte *) avtopte(va));
1161 		}
1162 	}
1163 	return(0);
1164 }
1165 
1166 /*
1167  *	Routine:	pmap_extract
1168  *	Function:
1169  *		Extract the physical page address associated
1170  *		with the given map/virtual_address pair.
1171  */
1172 
1173 vm_offset_t
1174 pmap_extract(pmap, va)
1175 	register pmap_t	pmap;
1176 	vm_offset_t va;
1177 {
1178 	register vm_offset_t pa;
1179 
1180 #ifdef DEBUGx
1181 	if (pmapdebug & PDB_FOLLOW)
1182 		pg("pmap_extract(%x, %x) -> ", pmap, va);
1183 #endif
1184 	pa = 0;
1185 	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1186 		pa = *(int *) pmap_pte(pmap, va);
1187 	}
1188 	if (pa)
1189 		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1190 #ifdef DEBUGx
1191 	if (pmapdebug & PDB_FOLLOW)
1192 		printf("%x\n", pa);
1193 #endif
1194 	return(pa);
1195 }
1196 
1197 /*
1198  *	Copy the range specified by src_addr/len
1199  *	from the source map to the range dst_addr/len
1200  *	in the destination map.
1201  *
1202  *	This routine is only advisory and need not do anything.
1203  */
1204 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1205 	pmap_t		dst_pmap;
1206 	pmap_t		src_pmap;
1207 	vm_offset_t	dst_addr;
1208 	vm_size_t	len;
1209 	vm_offset_t	src_addr;
1210 {
1211 #ifdef DEBUG
1212 	if (pmapdebug & PDB_FOLLOW)
1213 		printf("pmap_copy(%x, %x, %x, %x, %x)",
1214 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1215 #endif
1216 }
1217 
1218 /*
1219  *	Require that all active physical maps contain no
1220  *	incorrect entries NOW.  [This update includes
1221  *	forcing updates of any address map caching.]
1222  *
1223  *	Generally used to insure that a thread about
1224  *	to run will see a semantically correct world.
1225  */
1226 void pmap_update()
1227 {
1228 #ifdef DEBUG
1229 	if (pmapdebug & PDB_FOLLOW)
1230 		printf("pmap_update()");
1231 #endif
1232 	tlbflush();
1233 }
1234 
1235 /*
1236  *	Routine:	pmap_collect
1237  *	Function:
1238  *		Garbage collects the physical map system for
1239  *		pages which are no longer used.
1240  *		Success need not be guaranteed -- that is, there
1241  *		may well be pages which are not referenced, but
1242  *		others may be collected.
1243  *	Usage:
1244  *		Called by the pageout daemon when pages are scarce.
1245  * [ needs to be written -wfj ]
1246  */
1247 void
1248 pmap_collect(pmap)
1249 	pmap_t		pmap;
1250 {
1251 	register vm_offset_t pa;
1252 	register pv_entry_t pv;
1253 	register int *pte;
1254 	vm_offset_t kpa;
1255 	int s;
1256 
1257 #ifdef DEBUG
1258 	int *pde;
1259 	int opmapdebug;
1260 	printf("pmap_collect(%x) ", pmap);
1261 #endif
1262 	if (pmap != kernel_pmap)
1263 		return;
1264 
1265 }
1266 
1267 /* [ macro again?, should I force kstack into user map here? -wfj ] */
1268 void
1269 pmap_activate(pmap, pcbp)
1270 	register pmap_t pmap;
1271 	struct pcb *pcbp;
1272 {
1273 int x;
1274 #ifdef DEBUG
1275 	if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB))
1276 		pg("pmap_activate(%x, %x) ", pmap, pcbp);
1277 #endif
1278 	PMAP_ACTIVATE(pmap, pcbp);
1279 /*printf("pde ");
1280 for(x=0x3f6; x < 0x3fA; x++)
1281 	printf("%x ", pmap->pm_pdir[x]);*/
1282 /*pads(pmap);*/
1283 /*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/
1284 }
1285 
1286 /*
1287  *	Routine:	pmap_kernel
1288  *	Function:
1289  *		Returns the physical map handle for the kernel.
1290  */
1291 pmap_t
1292 pmap_kernel()
1293 {
1294     	return (kernel_pmap);
1295 }
1296 
1297 /*
1298  *	pmap_zero_page zeros the specified (machine independent)
1299  *	page by mapping the page into virtual memory and using
1300  *	bzero to clear its contents, one machine dependent page
1301  *	at a time.
1302  */
1303 void
1304 pmap_zero_page(phys)
1305 	register vm_offset_t	phys;
1306 {
1307 	register int ix;
1308 
1309 #ifdef DEBUG
1310 	if (pmapdebug & PDB_FOLLOW)
1311 		printf("pmap_zero_page(%x)", phys);
1312 #endif
1313 	phys >>= PG_SHIFT;
1314 	ix = 0;
1315 	do {
1316 		clearseg(phys++);
1317 	} while (++ix != i386pagesperpage);
1318 }
1319 
1320 /*
1321  *	pmap_copy_page copies the specified (machine independent)
1322  *	page by mapping the page into virtual memory and using
1323  *	bcopy to copy the page, one machine dependent page at a
1324  *	time.
1325  */
1326 void
1327 pmap_copy_page(src, dst)
1328 	register vm_offset_t	src, dst;
1329 {
1330 	register int ix;
1331 
1332 #ifdef DEBUG
1333 	if (pmapdebug & PDB_FOLLOW)
1334 		printf("pmap_copy_page(%x, %x)", src, dst);
1335 #endif
1336 	src >>= PG_SHIFT;
1337 	dst >>= PG_SHIFT;
1338 	ix = 0;
1339 	do {
1340 		physcopyseg(src++, dst++);
1341 	} while (++ix != i386pagesperpage);
1342 }
1343 
1344 
1345 /*
1346  *	Routine:	pmap_pageable
1347  *	Function:
1348  *		Make the specified pages (by pmap, offset)
1349  *		pageable (or not) as requested.
1350  *
1351  *		A page which is not pageable may not take
1352  *		a fault; therefore, its page table entry
1353  *		must remain valid for the duration.
1354  *
1355  *		This routine is merely advisory; pmap_enter
1356  *		will specify that these pages are to be wired
1357  *		down (or not) as appropriate.
1358  */
1359 void
1360 pmap_pageable(pmap, sva, eva, pageable)
1361 	pmap_t		pmap;
1362 	vm_offset_t	sva, eva;
1363 	boolean_t	pageable;
1364 {
1365 #ifdef DEBUG
1366 	if (pmapdebug & PDB_FOLLOW)
1367 		printf("pmap_pageable(%x, %x, %x, %x)",
1368 		       pmap, sva, eva, pageable);
1369 #endif
1370 	/*
1371 	 * If we are making a PT page pageable then all valid
1372 	 * mappings must be gone from that page.  Hence it should
1373 	 * be all zeros and there is no need to clean it.
1374 	 * Assumptions:
1375 	 *	- we are called with only one page at a time
1376 	 *	- PT pages have only one pv_table entry
1377 	 */
1378 	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1379 		register pv_entry_t pv;
1380 		register vm_offset_t pa;
1381 
1382 #ifdef DEBUG
1383 		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1384 			printf("pmap_pageable(%x, %x, %x, %x)",
1385 			       pmap, sva, eva, pageable);
1386 #endif
1387 		/*if (!pmap_pde_v(pmap_pde(pmap, sva)))
1388 			return;*/
1389 		if(pmap_pte(pmap, sva) == 0)
1390 			return;
1391 		pa = pmap_pte_pa(pmap_pte(pmap, sva));
1392 		if (pa < vm_first_phys || pa >= vm_last_phys)
1393 			return;
1394 		pv = pa_to_pvh(pa);
1395 		/*if (!ispt(pv->pv_va))
1396 			return;*/
1397 #ifdef DEBUG
1398 		if (pv->pv_va != sva || pv->pv_next) {
1399 			pg("pmap_pageable: bad PT page va %x next %x\n",
1400 			       pv->pv_va, pv->pv_next);
1401 			return;
1402 		}
1403 #endif
1404 		/*
1405 		 * Mark it unmodified to avoid pageout
1406 		 */
1407 		pmap_clear_modify(pa);
1408 #ifdef needsomethinglikethis
1409 		if (pmapdebug & PDB_PTPAGE)
1410 			pg("pmap_pageable: PT page %x(%x) unmodified\n",
1411 			       sva, *(int *)pmap_pte(pmap, sva));
1412 		if (pmapdebug & PDB_WIRING)
1413 			pmap_check_wiring("pageable", sva);
1414 #endif
1415 	}
1416 }
1417 
1418 /*
1419  *	Clear the modify bits on the specified physical page.
1420  */
1421 
1422 void
1423 pmap_clear_modify(pa)
1424 	vm_offset_t	pa;
1425 {
1426 #ifdef DEBUG
1427 	if (pmapdebug & PDB_FOLLOW)
1428 		printf("pmap_clear_modify(%x)", pa);
1429 #endif
1430 	pmap_changebit(pa, PG_M, FALSE);
1431 }
1432 
1433 /*
1434  *	pmap_clear_reference:
1435  *
1436  *	Clear the reference bit on the specified physical page.
1437  */
1438 
1439 void pmap_clear_reference(pa)
1440 	vm_offset_t	pa;
1441 {
1442 #ifdef DEBUG
1443 	if (pmapdebug & PDB_FOLLOW)
1444 		printf("pmap_clear_reference(%x)", pa);
1445 #endif
1446 	pmap_changebit(pa, PG_U, FALSE);
1447 }
1448 
1449 /*
1450  *	pmap_is_referenced:
1451  *
1452  *	Return whether or not the specified physical page is referenced
1453  *	by any physical maps.
1454  */
1455 
1456 boolean_t
1457 pmap_is_referenced(pa)
1458 	vm_offset_t	pa;
1459 {
1460 #ifdef DEBUG
1461 	if (pmapdebug & PDB_FOLLOW) {
1462 		boolean_t rv = pmap_testbit(pa, PG_U);
1463 		printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]);
1464 		return(rv);
1465 	}
1466 #endif
1467 	return(pmap_testbit(pa, PG_U));
1468 }
1469 
1470 /*
1471  *	pmap_is_modified:
1472  *
1473  *	Return whether or not the specified physical page is modified
1474  *	by any physical maps.
1475  */
1476 
1477 boolean_t
1478 pmap_is_modified(pa)
1479 	vm_offset_t	pa;
1480 {
1481 #ifdef DEBUG
1482 	if (pmapdebug & PDB_FOLLOW) {
1483 		boolean_t rv = pmap_testbit(pa, PG_M);
1484 		printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]);
1485 		return(rv);
1486 	}
1487 #endif
1488 	return(pmap_testbit(pa, PG_M));
1489 }
1490 
1491 vm_offset_t
1492 pmap_phys_address(ppn)
1493 	int ppn;
1494 {
1495 	return(i386_ptob(ppn));
1496 }
1497 
1498 /*
1499  * Miscellaneous support routines follow
1500  */
1501 
1502 i386_protection_init()
1503 {
1504 	register int *kp, prot;
1505 
1506 	kp = protection_codes;
1507 	for (prot = 0; prot < 8; prot++) {
1508 		switch (prot) {
1509 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1510 			*kp++ = 0;
1511 			break;
1512 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1513 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1514 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1515 			*kp++ = PG_RO;
1516 			break;
1517 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1518 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1519 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1520 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1521 			*kp++ = PG_RW;
1522 			break;
1523 		}
1524 	}
1525 }
1526 
1527 static
1528 boolean_t
1529 pmap_testbit(pa, bit)
1530 	register vm_offset_t pa;
1531 	int bit;
1532 {
1533 	register pv_entry_t pv;
1534 	register int *pte, ix;
1535 	int s;
1536 
1537 	if (pa < vm_first_phys || pa >= vm_last_phys)
1538 		return(FALSE);
1539 
1540 	pv = pa_to_pvh(pa);
1541 	s = splimp();
1542 	/*
1543 	 * Check saved info first
1544 	 */
1545 	if (pmap_attributes[pa_index(pa)] & bit) {
1546 		splx(s);
1547 		return(TRUE);
1548 	}
1549 	/*
1550 	 * Not found, check current mappings returning
1551 	 * immediately if found.
1552 	 */
1553 	if (pv->pv_pmap != NULL) {
1554 		for (; pv; pv = pv->pv_next) {
1555 			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1556 			ix = 0;
1557 			do {
1558 				if (*pte++ & bit) {
1559 					splx(s);
1560 					return(TRUE);
1561 				}
1562 			} while (++ix != i386pagesperpage);
1563 		}
1564 	}
1565 	splx(s);
1566 	return(FALSE);
1567 }
1568 
1569 pmap_changebit(pa, bit, setem)
1570 	register vm_offset_t pa;
1571 	int bit;
1572 	boolean_t setem;
1573 {
1574 	register pv_entry_t pv;
1575 	register int *pte, npte, ix;
1576 	vm_offset_t va;
1577 	int s;
1578 	boolean_t firstpage = TRUE;
1579 
1580 #ifdef DEBUG
1581 	if (pmapdebug & PDB_BITS)
1582 		printf("pmap_changebit(%x, %x, %s)",
1583 		       pa, bit, setem ? "set" : "clear");
1584 #endif
1585 	if (pa < vm_first_phys || pa >= vm_last_phys)
1586 		return;
1587 
1588 	pv = pa_to_pvh(pa);
1589 	s = splimp();
1590 	/*
1591 	 * Clear saved attributes (modify, reference)
1592 	 */
1593 	if (!setem)
1594 		pmap_attributes[pa_index(pa)] &= ~bit;
1595 	/*
1596 	 * Loop over all current mappings setting/clearing as appropos
1597 	 * If setting RO do we need to clear the VAC?
1598 	 */
1599 	if (pv->pv_pmap != NULL) {
1600 #ifdef DEBUG
1601 		int toflush = 0;
1602 #endif
1603 		for (; pv; pv = pv->pv_next) {
1604 #ifdef DEBUG
1605 			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
1606 #endif
1607 			va = pv->pv_va;
1608 
1609                         /*
1610                          * XXX don't write protect pager mappings
1611                          */
1612                         if (bit == PG_RO) {
1613                                 extern vm_offset_t pager_sva, pager_eva;
1614 
1615                                 if (va >= pager_sva && va < pager_eva)
1616                                         continue;
1617                         }
1618 
1619 			pte = (int *) pmap_pte(pv->pv_pmap, va);
1620 			ix = 0;
1621 			do {
1622 				if (setem)
1623 					npte = *pte | bit;
1624 				else
1625 					npte = *pte & ~bit;
1626 				if (*pte != npte) {
1627 					*pte = npte;
1628 					/*TBIS(va);*/
1629 				}
1630 				va += I386_PAGE_SIZE;
1631 				pte++;
1632 			} while (++ix != i386pagesperpage);
1633 
1634 			if (pv->pv_pmap == &curproc->p_vmspace->vm_pmap)
1635 				pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr);
1636 		}
1637 #ifdef somethinglikethis
1638 		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
1639 			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
1640 				DCIA();
1641 			else if (toflush == 2)
1642 				DCIS();
1643 			else
1644 				DCIU();
1645 		}
1646 #endif
1647 	}
1648 	splx(s);
1649 }
1650 
1651 #ifdef DEBUG
1652 pmap_pvdump(pa)
1653 	vm_offset_t pa;
1654 {
1655 	register pv_entry_t pv;
1656 
1657 	printf("pa %x", pa);
1658 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
1659 		printf(" -> pmap %x, va %x, flags %x",
1660 		       pv->pv_pmap, pv->pv_va, pv->pv_flags);
1661 		pads(pv->pv_pmap);
1662 	}
1663 	printf(" ");
1664 }
1665 
1666 #ifdef notyet
1667 pmap_check_wiring(str, va)
1668 	char *str;
1669 	vm_offset_t va;
1670 {
1671 	vm_map_entry_t entry;
1672 	register int count, *pte;
1673 
1674 	va = trunc_page(va);
1675 	if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
1676 	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
1677 		return;
1678 
1679 	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
1680 		pg("wired_check: entry for %x not found\n", va);
1681 		return;
1682 	}
1683 	count = 0;
1684 	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
1685 		if (*pte)
1686 			count++;
1687 	if (entry->wired_count != count)
1688 		pg("*%s*: %x: w%d/a%d\n",
1689 		       str, va, entry->wired_count, count);
1690 }
1691 #endif
1692 
1693 /* print address space of pmap*/
1694 pads(pm) pmap_t pm; {
1695 	unsigned va, i, j;
1696 	struct pte *ptep;
1697 
1698 	if(pm == kernel_pmap) return;
1699 	for (i = 0; i < 1024; i++)
1700 		if(pm->pm_pdir[i].pd_v)
1701 			for (j = 0; j < 1024 ; j++) {
1702 				va = (i<<22)+(j<<12);
1703 				if (pm == kernel_pmap && va < 0xfe000000)
1704 						continue;
1705 				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
1706 						continue;
1707 				ptep = pmap_pte(pm, va);
1708 				if(pmap_pte_v(ptep))
1709 					printf("%x:%x ", va, *(int *)ptep);
1710 			} ;
1711 
1712 }
1713 #endif
1714