xref: /original-bsd/sys/i386/i386/pmap.c (revision 753853ba)
1 /*-
2  * Copyright (c) 1991 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)pmap.c	7.10 (Berkeley) 02/19/92
12  */
13 
14 /*
15  * Derived from hp300 version by Mike Hibler, this version by William
16  * Jolitz uses a recursive map [a pde points to the page directory] to
17  * map the page tables using the pagetables themselves. This is done to
18  * reduce the impact on kernel virtual memory for lots of sparse address
19  * space, and to reduce the cost of memory to each process.
20  *
21  *	Derived from: hp300/@(#)pmap.c	7.1 (Berkeley) 12/5/90
22  */
23 
24 /*
25  *	Reno i386 version, from Mike Hibler's hp300 version.
26  */
27 
28 /*
29  *	Manages physical address maps.
30  *
31  *	In addition to hardware address maps, this
32  *	module is called upon to provide software-use-only
33  *	maps which may or may not be stored in the same
34  *	form as hardware maps.  These pseudo-maps are
35  *	used to store intermediate results from copy
36  *	operations to and from address spaces.
37  *
38  *	Since the information managed by this module is
39  *	also stored by the logical address mapping module,
40  *	this module may throw away valid virtual-to-physical
41  *	mappings at almost any time.  However, invalidations
42  *	of virtual-to-physical mappings must be done as
43  *	requested.
44  *
45  *	In order to cope with hardware architectures which
46  *	make virtual-to-physical map invalidates expensive,
47  *	this module may delay invalidate or reduced protection
48  *	operations until such time as they are actually
49  *	necessary.  This module is given full information as
50  *	to which processors are currently using which maps,
51  *	and to when physical maps must be made correct.
52  */
53 
54 #include "param.h"
55 #include "proc.h"
56 #include "malloc.h"
57 #include "user.h"
58 
59 #include "vm/vm.h"
60 #include "vm/vm_kern.h"
61 #include "vm/vm_page.h"
62 /*#include "vm/vm_pageout.h"*/
63 
64 /*#include "machine/isa.h"*/
65 
66 /*
67  * Allocate various and sundry SYSMAPs used in the days of old VM
68  * and not yet converted.  XXX.
69  */
70 #define BSDVM_COMPAT	1
71 
72 #ifdef DEBUG
73 struct {
74 	int kernel;	/* entering kernel mapping */
75 	int user;	/* entering user mapping */
76 	int ptpneeded;	/* needed to allocate a PT page */
77 	int pwchange;	/* no mapping change, just wiring or protection */
78 	int wchange;	/* no mapping change, just wiring */
79 	int mchange;	/* was mapped but mapping to different page */
80 	int managed;	/* a managed page */
81 	int firstpv;	/* first mapping for this PA */
82 	int secondpv;	/* second mapping for this PA */
83 	int ci;		/* cache inhibited */
84 	int unmanaged;	/* not a managed page */
85 	int flushes;	/* cache flushes */
86 } enter_stats;
87 struct {
88 	int calls;
89 	int removes;
90 	int pvfirst;
91 	int pvsearch;
92 	int ptinvalid;
93 	int uflushes;
94 	int sflushes;
95 } remove_stats;
96 
97 int debugmap = 0;
98 int pmapdebug = 0;
99 #define PDB_FOLLOW	0x0001
100 #define PDB_INIT	0x0002
101 #define PDB_ENTER	0x0004
102 #define PDB_REMOVE	0x0008
103 #define PDB_CREATE	0x0010
104 #define PDB_PTPAGE	0x0020
105 #define PDB_CACHE	0x0040
106 #define PDB_BITS	0x0080
107 #define PDB_COLLECT	0x0100
108 #define PDB_PROTECT	0x0200
109 #define PDB_PDRTAB	0x0400
110 #define PDB_PARANOIA	0x2000
111 #define PDB_WIRING	0x4000
112 #define PDB_PVDUMP	0x8000
113 
114 int pmapvacflush = 0;
115 #define	PVF_ENTER	0x01
116 #define	PVF_REMOVE	0x02
117 #define	PVF_PROTECT	0x04
118 #define	PVF_TOTAL	0x80
119 #endif
120 
121 /*
122  * Get PDEs and PTEs for user/kernel address space
123  */
124 #define	pmap_pde(m, v)	(&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
125 
126 #define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
127 
128 #define pmap_pde_v(pte)		((pte)->pd_v)
129 #define pmap_pte_w(pte)		((pte)->pg_w)
130 /* #define pmap_pte_ci(pte)	((pte)->pg_ci) */
131 #define pmap_pte_m(pte)		((pte)->pg_m)
132 #define pmap_pte_u(pte)		((pte)->pg_u)
133 #define pmap_pte_v(pte)		((pte)->pg_v)
134 #define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))
135 #define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))
136 
137 /*
138  * Given a map and a machine independent protection code,
139  * convert to a vax protection code.
140  */
141 #define pte_prot(m, p)	(protection_codes[p])
142 int	protection_codes[8];
143 
144 struct pmap	kernel_pmap_store;
145 
146 vm_offset_t    	avail_start;	/* PA of first available physical page */
147 vm_offset_t	avail_end;	/* PA of last available physical page */
148 vm_size_t	mem_size;	/* memory size in bytes */
149 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
150 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
151 vm_offset_t	vm_first_phys;	/* PA of first managed page */
152 vm_offset_t	vm_last_phys;	/* PA just past last managed page */
153 int		i386pagesperpage;	/* PAGE_SIZE / I386_PAGE_SIZE */
154 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
155 char		*pmap_attributes;	/* reference and modify bits */
156 
157 boolean_t	pmap_testbit();
158 void		pmap_clear_modify();
159 
160 #if BSDVM_COMPAT
161 #include "msgbuf.h"
162 
163 /*
164  * All those kernel PT submaps that BSD is so fond of
165  */
166 struct pte	*CMAP1, *CMAP2, *mmap;
167 caddr_t		CADDR1, CADDR2, vmmap;
168 struct pte	*msgbufmap;
169 struct msgbuf	*msgbufp;
170 #endif
171 
172 void pmap_activate __P((pmap_t, struct pcb *));
173 
174 /*
175  *	Bootstrap the system enough to run with virtual memory.
176  *	Map the kernel's code and data, and allocate the system page table.
177  *
178  *	On the I386 this is called after mapping has already been enabled
179  *	and just syncs the pmap module with what has already been done.
180  *	[We can't call it easily with mapping off since the kernel is not
181  *	mapped with PA == VA, hence we would have to relocate every address
182  *	from the linked base (virtual) address 0xFE000000 to the actual
183  *	(physical) address starting relative to 0]
184  */
185 struct pte *pmap_pte();
186 
187 extern vm_offset_t	atdevbase;
188 void
189 pmap_bootstrap(firstaddr, loadaddr)
190 	vm_offset_t firstaddr;
191 	vm_offset_t loadaddr;
192 {
193 #if BSDVM_COMPAT
194 	vm_offset_t va;
195 	struct pte *pte;
196 #endif
197 	extern vm_offset_t maxmem, physmem;
198 extern int IdlePTD;
199 
200 #if	defined(ODYSSEUS) || defined(ARGO) || defined(CIRCE)
201 firstaddr=0x100000;	/* for some reason, basemem screws up on this machine */
202 #endif
203 printf("ps %x pe %x ", firstaddr, maxmem <<PG_SHIFT);
204 	avail_start = firstaddr;
205 	avail_end = maxmem << PG_SHIFT;
206 
207 	/* XXX: allow for msgbuf */
208 	avail_end -= i386_round_page(sizeof(struct msgbuf));
209 
210 	mem_size = physmem << PG_SHIFT;
211 	virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
212 	virtual_end = VM_MAX_KERNEL_ADDRESS;
213 	i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE;
214 
215 	/*
216 	 * Initialize protection array.
217 	 */
218 	i386_protection_init();
219 
220 #ifdef notdef
221 	/*
222 	 * Create Kernel page directory table and page maps.
223 	 * [ currently done in locore. i have wild and crazy ideas -wfj ]
224 	 */
225 	bzero(firstaddr, 4*NBPG);
226 	kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
227 	kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
228 
229 	firstaddr += NBPG;
230 	for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
231 		x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {
232 			struct pde *pde;
233 		pde = kernel_pmap->pm_pdir + x;
234 		*(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;
235 	}
236 #else
237 	kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD);
238 #endif
239 
240 
241 	simple_lock_init(&kernel_pmap->pm_lock);
242 	kernel_pmap->pm_count = 1;
243 
244 #if BSDVM_COMPAT
245 	/*
246 	 * Allocate all the submaps we need
247 	 */
248 #define	SYSMAP(c, p, v, n)	\
249 	v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);
250 
251 	va = virtual_avail;
252 	pte = pmap_pte(kernel_pmap, va);
253 
254 	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
255 	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
256 	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
257 	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
258 	virtual_avail = va;
259 #endif
260 
261 	/**(int *)PTD = 0;
262 	load_cr3(rcr3());*/
263 
264 }
265 
266 pmap_isvalidphys(addr) {
267 	if (addr < 0xa0000) return (1);
268 	if (addr >= 0x100000) return (1);
269 	return(0);
270 }
271 
272 /*
273  * Bootstrap memory allocator. This function allows for early dynamic
274  * memory allocation until the virtual memory system has been bootstrapped.
275  * After that point, either kmem_alloc or malloc should be used. This
276  * function works by stealing pages from the (to be) managed page pool,
277  * stealing virtual address space, then mapping the pages and zeroing them.
278  *
279  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
280  * it cannot be used, and will generate a panic if tried. Note that this
281  * memory will never be freed, and in essence it is wired down.
282  */
283 void *
284 pmap_bootstrap_alloc(size) {
285 	vm_offset_t val;
286 	int i;
287 	extern boolean_t vm_page_startup_initialized;
288 
289 	if (vm_page_startup_initialized)
290 		panic("pmap_bootstrap_alloc: called after startup initialized");
291 	size = round_page(size);
292 	val = virtual_avail;
293 
294 	/* deal with "hole incursion" */
295 	for (i = 0; i < size; i += PAGE_SIZE) {
296 
297 		while (!pmap_isvalidphys(avail_start))
298 				avail_start += PAGE_SIZE;
299 
300 		virtual_avail = pmap_map(virtual_avail, avail_start,
301 			avail_start + PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
302 		avail_start += PAGE_SIZE;
303 	}
304 
305 	blkclr ((caddr_t) val, size);
306 	return ((void *) val);
307 }
308 
309 /*
310  *	Initialize the pmap module.
311  *	Called by vm_init, to initialize any structures that the pmap
312  *	system needs to map virtual memory.
313  */
314 void
315 pmap_init(phys_start, phys_end)
316 	vm_offset_t	phys_start, phys_end;
317 {
318 	vm_offset_t	addr, addr2;
319 	vm_size_t	npg, s;
320 	int		rv;
321 	extern int KPTphys;
322 
323 #ifdef DEBUG
324 	if (pmapdebug & PDB_FOLLOW)
325 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
326 #endif
327 	/*
328 	 * Now that kernel map has been allocated, we can mark as
329 	 * unavailable regions which we have mapped in locore.
330 	 */
331 	addr = atdevbase;
332 	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
333 			   &addr, (0x100000-0xa0000), FALSE);
334 
335 	addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */;
336 	vm_object_reference(kernel_object);
337 	(void) vm_map_find(kernel_map, kernel_object, addr,
338 			   &addr, 2*NBPG, FALSE);
339 
340 	/*
341 	 * Allocate memory for random pmap data structures.  Includes the
342 	 * pv_head_table and pmap_attributes.
343 	 */
344 	npg = atop(phys_end - phys_start);
345 	s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg);
346 	s = round_page(s);
347 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
348 	pv_table = (pv_entry_t) addr;
349 	addr += sizeof(struct pv_entry) * npg;
350 	pmap_attributes = (char *) addr;
351 #ifdef DEBUG
352 	if (pmapdebug & PDB_INIT)
353 		printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",
354 		       s, npg, pv_table, pmap_attributes);
355 #endif
356 
357 	/*
358 	 * Now it is safe to enable pv_table recording.
359 	 */
360 	vm_first_phys = phys_start;
361 	vm_last_phys = phys_end;
362 	pmap_initialized = TRUE;
363 }
364 
365 /*
366  *	Used to map a range of physical addresses into kernel
367  *	virtual address space.
368  *
369  *	For now, VM is already on, we only need to map the
370  *	specified memory.
371  */
372 vm_offset_t
373 pmap_map(virt, start, end, prot)
374 	vm_offset_t	virt;
375 	vm_offset_t	start;
376 	vm_offset_t	end;
377 	int		prot;
378 {
379 #ifdef DEBUG
380 	if (pmapdebug & PDB_FOLLOW)
381 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
382 #endif
383 	while (start < end) {
384 		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
385 		virt += PAGE_SIZE;
386 		start += PAGE_SIZE;
387 	}
388 	return(virt);
389 }
390 
391 /*
392  *	Create and return a physical map.
393  *
394  *	If the size specified for the map
395  *	is zero, the map is an actual physical
396  *	map, and may be referenced by the
397  *	hardware.
398  *
399  *	If the size specified is non-zero,
400  *	the map will be used in software only, and
401  *	is bounded by that size.
402  *
403  * [ just allocate a ptd and mark it uninitialize -- should we track
404  *   with a table which process has which ptd? -wfj ]
405  */
406 
407 pmap_t
408 pmap_create(size)
409 	vm_size_t	size;
410 {
411 	register pmap_t pmap;
412 
413 #ifdef DEBUG
414 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
415 		printf("pmap_create(%x)\n", size);
416 #endif
417 	/*
418 	 * Software use map does not need a pmap
419 	 */
420 	if (size)
421 		return(NULL);
422 
423 	/* XXX: is it ok to wait here? */
424 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
425 #ifdef notifwewait
426 	if (pmap == NULL)
427 		panic("pmap_create: cannot allocate a pmap");
428 #endif
429 	bzero(pmap, sizeof(*pmap));
430 	pmap_pinit(pmap);
431 	return (pmap);
432 }
433 
434 /*
435  * Initialize a preallocated and zeroed pmap structure,
436  * such as one in a vmspace structure.
437  */
438 void
439 pmap_pinit(pmap)
440 	register struct pmap *pmap;
441 {
442 
443 #ifdef DEBUG
444 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
445 		pg("pmap_pinit(%x)\n", pmap);
446 #endif
447 
448 	/*
449 	 * No need to allocate page table space yet but we do need a
450 	 * valid page directory table.
451 	 */
452 	pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
453 
454 	/* wire in kernel global address entries */
455 	bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST,
456 		(KPTDI_LAST-KPTDI_FIRST+1)*4);
457 
458 	/* install self-referential address mapping entry */
459 	*(int *)(pmap->pm_pdir+PTDPTDI) =
460 		(int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir) | PG_V | PG_URKW;
461 
462 	pmap->pm_count = 1;
463 	simple_lock_init(&pmap->pm_lock);
464 }
465 
466 /*
467  *	Retire the given physical map from service.
468  *	Should only be called if the map contains
469  *	no valid mappings.
470  */
471 void
472 pmap_destroy(pmap)
473 	register pmap_t pmap;
474 {
475 	int count;
476 
477 #ifdef DEBUG
478 	if (pmapdebug & PDB_FOLLOW)
479 		printf("pmap_destroy(%x)\n", pmap);
480 #endif
481 	if (pmap == NULL)
482 		return;
483 
484 	simple_lock(&pmap->pm_lock);
485 	count = --pmap->pm_count;
486 	simple_unlock(&pmap->pm_lock);
487 	if (count == 0) {
488 		pmap_release(pmap);
489 		free((caddr_t)pmap, M_VMPMAP);
490 	}
491 }
492 
493 /*
494  * Release any resources held by the given physical map.
495  * Called when a pmap initialized by pmap_pinit is being released.
496  * Should only be called if the map contains no valid mappings.
497  */
498 void
499 pmap_release(pmap)
500 	register struct pmap *pmap;
501 {
502 
503 #ifdef DEBUG
504 	if (pmapdebug & PDB_FOLLOW)
505 		pg("pmap_release(%x)\n", pmap);
506 #endif
507 #ifdef notdef /* DIAGNOSTIC */
508 	/* count would be 0 from pmap_destroy... */
509 	simple_lock(&pmap->pm_lock);
510 	if (pmap->pm_count != 1)
511 		panic("pmap_release count");
512 #endif
513 	kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
514 }
515 
516 /*
517  *	Add a reference to the specified pmap.
518  */
519 void
520 pmap_reference(pmap)
521 	pmap_t	pmap;
522 {
523 #ifdef DEBUG
524 	if (pmapdebug & PDB_FOLLOW)
525 		pg("pmap_reference(%x)", pmap);
526 #endif
527 	if (pmap != NULL) {
528 		simple_lock(&pmap->pm_lock);
529 		pmap->pm_count++;
530 		simple_unlock(&pmap->pm_lock);
531 	}
532 }
533 
534 /*
535  *	Remove the given range of addresses from the specified map.
536  *
537  *	It is assumed that the start and end are properly
538  *	rounded to the page size.
539  */
540 void
541 pmap_remove(pmap, sva, eva)
542 	register struct pmap *pmap;
543 	vm_offset_t sva, eva;
544 {
545 	register vm_offset_t pa, va;
546 	register pt_entry_t *pte;
547 	register pv_entry_t pv, npv;
548 	register int ix;
549 	pmap_t ptpmap;
550 	int *pde, s, bits;
551 	boolean_t firstpage = TRUE;
552 	boolean_t flushcache = FALSE;
553 #ifdef DEBUG
554 	pt_entry_t opte;
555 
556 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
557 		printf("pmap_remove(%x, %x, %x)", pmap, sva, eva);
558 	if (eva >= USRSTACK && eva <= UPT_MAX_ADDRESS)
559 		nullop();
560 #endif
561 
562 	if (pmap == NULL)
563 		return;
564 
565 #ifdef DEBUG
566 	remove_stats.calls++;
567 #endif
568 	for (va = sva; va < eva; va += PAGE_SIZE) {
569 		/*
570 		 * Weed out invalid mappings.
571 		 * Note: we assume that the page directory table is
572 	 	 * always allocated, and in kernel virtual.
573 		 */
574 		if (!pmap_pde_v(pmap_pde(pmap, va)))
575 			continue;
576 
577 		pte = pmap_pte(pmap, va);
578 		if (pte == 0)
579 			continue;
580 		pa = pmap_pte_pa(pte);
581 		if (pa == 0)
582 			continue;
583 #ifdef DEBUG
584 		opte = *pte;
585 		remove_stats.removes++;
586 #endif
587 		/*
588 		 * Update statistics
589 		 */
590 		if (pmap_pte_w(pte))
591 			pmap->pm_stats.wired_count--;
592 		pmap->pm_stats.resident_count--;
593 
594 		/*
595 		 * Invalidate the PTEs.
596 		 * XXX: should cluster them up and invalidate as many
597 		 * as possible at once.
598 		 */
599 #ifdef DEBUG
600 		if (pmapdebug & PDB_REMOVE)
601 			printf("remove: inv %x ptes at %x(%x) ",
602 			       i386pagesperpage, pte, *(int *)pte);
603 #endif
604 		bits = ix = 0;
605 		do {
606 			bits |= *(int *)pte & (PG_U|PG_M);
607 			*(int *)pte++ = 0;
608 			/*TBIS(va + ix * I386_PAGE_SIZE);*/
609 		} while (++ix != i386pagesperpage);
610 		if (pmap == &curproc->p_vmspace->vm_pmap)
611 			pmap_activate(pmap, (struct pcb *)curproc->p_addr);
612 		/* are we current address space or kernel? */
613 		/*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
614 			|| pmap == kernel_pmap)
615 		load_cr3(curpcb->pcb_ptd);*/
616 		tlbflush();
617 
618 #ifdef needednotdone
619 reduce wiring count on page table pages as references drop
620 #endif
621 
622 		/*
623 		 * Remove from the PV table (raise IPL since we
624 		 * may be called at interrupt time).
625 		 */
626 		if (pa < vm_first_phys || pa >= vm_last_phys)
627 			continue;
628 		pv = pa_to_pvh(pa);
629 		s = splimp();
630 		/*
631 		 * If it is the first entry on the list, it is actually
632 		 * in the header and we must copy the following entry up
633 		 * to the header.  Otherwise we must search the list for
634 		 * the entry.  In either case we free the now unused entry.
635 		 */
636 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
637 			npv = pv->pv_next;
638 			if (npv) {
639 				*pv = *npv;
640 				free((caddr_t)npv, M_VMPVENT);
641 			} else
642 				pv->pv_pmap = NULL;
643 #ifdef DEBUG
644 			remove_stats.pvfirst++;
645 #endif
646 		} else {
647 			for (npv = pv->pv_next; npv; npv = npv->pv_next) {
648 #ifdef DEBUG
649 				remove_stats.pvsearch++;
650 #endif
651 				if (pmap == npv->pv_pmap && va == npv->pv_va)
652 					break;
653 				pv = npv;
654 			}
655 #ifdef DEBUG
656 			if (npv == NULL)
657 				panic("pmap_remove: PA not in pv_tab");
658 #endif
659 			pv->pv_next = npv->pv_next;
660 			free((caddr_t)npv, M_VMPVENT);
661 			pv = pa_to_pvh(pa);
662 		}
663 
664 #ifdef notdef
665 [tally number of pagetable pages, if sharing of ptpages adjust here]
666 #endif
667 		/*
668 		 * Update saved attributes for managed page
669 		 */
670 		pmap_attributes[pa_index(pa)] |= bits;
671 		splx(s);
672 	}
673 #ifdef notdef
674 [cache and tlb flushing, if needed]
675 #endif
676 }
677 
678 /*
679  *	Routine:	pmap_remove_all
680  *	Function:
681  *		Removes this physical page from
682  *		all physical maps in which it resides.
683  *		Reflects back modify bits to the pager.
684  */
685 void
686 pmap_remove_all(pa)
687 	vm_offset_t pa;
688 {
689 	register pv_entry_t pv;
690 	int s;
691 
692 #ifdef DEBUG
693 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
694 		printf("pmap_remove_all(%x)", pa);
695 	/*pmap_pvdump(pa);*/
696 #endif
697 	/*
698 	 * Not one of ours
699 	 */
700 	if (pa < vm_first_phys || pa >= vm_last_phys)
701 		return;
702 
703 	pv = pa_to_pvh(pa);
704 	s = splimp();
705 	/*
706 	 * Do it the easy way for now
707 	 */
708 	while (pv->pv_pmap != NULL) {
709 #ifdef DEBUG
710 		if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) ||
711 		    pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
712 			panic("pmap_remove_all: bad mapping");
713 #endif
714 		pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
715 	}
716 	splx(s);
717 }
718 
719 /*
720  *	Routine:	pmap_copy_on_write
721  *	Function:
722  *		Remove write privileges from all
723  *		physical maps for this physical page.
724  */
725 void
726 pmap_copy_on_write(pa)
727 	vm_offset_t pa;
728 {
729 #ifdef DEBUG
730 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
731 		printf("pmap_copy_on_write(%x)", pa);
732 #endif
733 	pmap_changebit(pa, PG_RO, TRUE);
734 }
735 
736 /*
737  *	Set the physical protection on the
738  *	specified range of this map as requested.
739  */
740 void
741 pmap_protect(pmap, sva, eva, prot)
742 	register pmap_t	pmap;
743 	vm_offset_t	sva, eva;
744 	vm_prot_t	prot;
745 {
746 	register pt_entry_t *pte;
747 	register vm_offset_t va;
748 	register int ix;
749 	int i386prot;
750 	boolean_t firstpage = TRUE;
751 
752 #ifdef DEBUG
753 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
754 		printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
755 #endif
756 	if (pmap == NULL)
757 		return;
758 
759 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
760 		pmap_remove(pmap, sva, eva);
761 		return;
762 	}
763 	if (prot & VM_PROT_WRITE)
764 		return;
765 
766 	for (va = sva; va < eva; va += PAGE_SIZE) {
767 		/*
768 		 * Page table page is not allocated.
769 		 * Skip it, we don't want to force allocation
770 		 * of unnecessary PTE pages just to set the protection.
771 		 */
772 		if (!pmap_pde_v(pmap_pde(pmap, va))) {
773 			/* XXX: avoid address wrap around */
774 			if (va >= i386_trunc_pdr((vm_offset_t)-1))
775 				break;
776 			va = i386_round_pdr(va + PAGE_SIZE);
777 			continue;
778 		} else	pte = pmap_pte(pmap, va);
779 
780 		/*
781 		 * Page not valid.  Again, skip it.
782 		 * Should we do this?  Or set protection anyway?
783 		 */
784 		if (!pmap_pte_v(pte))
785 			continue;
786 
787 		ix = 0;
788 		i386prot = pte_prot(pmap, prot);
789 		if(va < UPT_MAX_ADDRESS)
790 			i386prot |= 2 /*PG_u*/;
791 		do {
792 			/* clear VAC here if PG_RO? */
793 			pmap_pte_set_prot(pte++, i386prot);
794 			/*TBIS(va + ix * I386_PAGE_SIZE);*/
795 		} while (++ix != i386pagesperpage);
796 	}
797 out:
798 	if (pmap == &curproc->p_vmspace->vm_pmap)
799 		pmap_activate(pmap, (struct pcb *)curproc->p_addr);
800 }
801 
802 /*
803  *	Insert the given physical page (p) at
804  *	the specified virtual address (v) in the
805  *	target physical map with the protection requested.
806  *
807  *	If specified, the page will be wired down, meaning
808  *	that the related pte can not be reclaimed.
809  *
810  *	NB:  This is the only routine which MAY NOT lazy-evaluate
811  *	or lose information.  That is, this routine must actually
812  *	insert this page into the given map NOW.
813  */
814 void
815 pmap_enter(pmap, va, pa, prot, wired)
816 	register pmap_t pmap;
817 	vm_offset_t va;
818 	register vm_offset_t pa;
819 	vm_prot_t prot;
820 	boolean_t wired;
821 {
822 	register pt_entry_t *pte;
823 	register int npte, ix;
824 	vm_offset_t opa;
825 	boolean_t cacheable = TRUE;
826 	boolean_t checkpv = TRUE;
827 
828 #ifdef DEBUG
829 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
830 		printf("pmap_enter(%x, %x, %x, %x, %x)",
831 		       pmap, va, pa, prot, wired);
832 	if(!pmap_isvalidphys(pa)) panic("invalid phys");
833 #endif
834 	if (pmap == NULL)
835 		return;
836 
837 	if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
838 	/* also, should not muck with PTD va! */
839 
840 #ifdef DEBUG
841 	if (pmap == kernel_pmap)
842 		enter_stats.kernel++;
843 	else
844 		enter_stats.user++;
845 #endif
846 
847 	/*
848 	 * Page Directory table entry not valid, we need a new PT page
849 	 */
850 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
851 		pg("ptdi %x", pmap->pm_pdir[PTDPTDI]);
852 	}
853 
854 	pte = pmap_pte(pmap, va);
855 	opa = pmap_pte_pa(pte);
856 #ifdef DEBUG
857 	if (pmapdebug & PDB_ENTER)
858 		printf("enter: pte %x, *pte %x ", pte, *(int *)pte);
859 #endif
860 
861 	/*
862 	 * Mapping has not changed, must be protection or wiring change.
863 	 */
864 	if (opa == pa) {
865 #ifdef DEBUG
866 		enter_stats.pwchange++;
867 #endif
868 		/*
869 		 * Wiring change, just update stats.
870 		 * We don't worry about wiring PT pages as they remain
871 		 * resident as long as there are valid mappings in them.
872 		 * Hence, if a user page is wired, the PT page will be also.
873 		 */
874 		if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
875 #ifdef DEBUG
876 			if (pmapdebug & PDB_ENTER)
877 				pg("enter: wiring change -> %x ", wired);
878 #endif
879 			if (wired)
880 				pmap->pm_stats.wired_count++;
881 			else
882 				pmap->pm_stats.wired_count--;
883 #ifdef DEBUG
884 			enter_stats.wchange++;
885 #endif
886 		}
887 		goto validate;
888 	}
889 
890 	/*
891 	 * Mapping has changed, invalidate old range and fall through to
892 	 * handle validating new mapping.
893 	 */
894 	if (opa) {
895 #ifdef DEBUG
896 		if (pmapdebug & PDB_ENTER)
897 			printf("enter: removing old mapping %x pa %x ", va, opa);
898 #endif
899 		pmap_remove(pmap, va, va + PAGE_SIZE);
900 #ifdef DEBUG
901 		enter_stats.mchange++;
902 #endif
903 	}
904 
905 	/*
906 	 * Enter on the PV list if part of our managed memory
907 	 * Note that we raise IPL while manipulating pv_table
908 	 * since pmap_enter can be called at interrupt time.
909 	 */
910 	if (pa >= vm_first_phys && pa < vm_last_phys) {
911 		register pv_entry_t pv, npv;
912 		int s;
913 
914 #ifdef DEBUG
915 		enter_stats.managed++;
916 #endif
917 		pv = pa_to_pvh(pa);
918 		s = splimp();
919 #ifdef DEBUG
920 		if (pmapdebug & PDB_ENTER)
921 			printf("enter: pv at %x: %x/%x/%x ",
922 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
923 #endif
924 		/*
925 		 * No entries yet, use header as the first entry
926 		 */
927 		if (pv->pv_pmap == NULL) {
928 #ifdef DEBUG
929 			enter_stats.firstpv++;
930 #endif
931 			pv->pv_va = va;
932 			pv->pv_pmap = pmap;
933 			pv->pv_next = NULL;
934 			pv->pv_flags = 0;
935 		}
936 		/*
937 		 * There is at least one other VA mapping this page.
938 		 * Place this entry after the header.
939 		 */
940 		else {
941 			/*printf("second time: ");*/
942 #ifdef DEBUG
943 			for (npv = pv; npv; npv = npv->pv_next)
944 				if (pmap == npv->pv_pmap && va == npv->pv_va)
945 					panic("pmap_enter: already in pv_tab");
946 #endif
947 			npv = (pv_entry_t)
948 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
949 			npv->pv_va = va;
950 			npv->pv_pmap = pmap;
951 			npv->pv_next = pv->pv_next;
952 			pv->pv_next = npv;
953 #ifdef DEBUG
954 			if (!npv->pv_next)
955 				enter_stats.secondpv++;
956 #endif
957 		splx(s);
958 		}
959 	}
960 	/*
961 	 * Assumption: if it is not part of our managed memory
962 	 * then it must be device memory which may be volitile.
963 	 */
964 	if (pmap_initialized) {
965 		checkpv = cacheable = FALSE;
966 #ifdef DEBUG
967 		enter_stats.unmanaged++;
968 #endif
969 	}
970 
971 	/*
972 	 * Increment counters
973 	 */
974 	pmap->pm_stats.resident_count++;
975 	if (wired)
976 		pmap->pm_stats.wired_count++;
977 
978 validate:
979 	/*
980 	 * Now validate mapping with desired protection/wiring.
981 	 * Assume uniform modified and referenced status for all
982 	 * I386 pages in a MACH page.
983 	 */
984 	npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
985 	npte |= (*(int *)pte & (PG_M|PG_U));
986 	if (wired)
987 		npte |= PG_W;
988 	if(va < UPT_MIN_ADDRESS)
989 		npte |= PG_u;
990 	else if(va < UPT_MAX_ADDRESS)
991 		npte |= PG_u | PG_RW;
992 #ifdef DEBUG
993 	if (pmapdebug & PDB_ENTER)
994 		printf("enter: new pte value %x ", npte);
995 #endif
996 	ix = 0;
997 	do {
998 		*(int *)pte++ = npte;
999 		/*TBIS(va);*/
1000 		npte += I386_PAGE_SIZE;
1001 		va += I386_PAGE_SIZE;
1002 	} while (++ix != i386pagesperpage);
1003 	pte--;
1004 #ifdef DEBUGx
1005 cache, tlb flushes
1006 #endif
1007 /*pads(pmap);*/
1008 	/*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/
1009 	tlbflush();
1010 }
1011 
1012 /*
1013  *      pmap_page_protect:
1014  *
1015  *      Lower the permission for all mappings to a given page.
1016  */
1017 void
1018 pmap_page_protect(phys, prot)
1019         vm_offset_t     phys;
1020         vm_prot_t       prot;
1021 {
1022         switch (prot) {
1023         case VM_PROT_READ:
1024         case VM_PROT_READ|VM_PROT_EXECUTE:
1025                 pmap_copy_on_write(phys);
1026                 break;
1027         case VM_PROT_ALL:
1028                 break;
1029         default:
1030                 pmap_remove_all(phys);
1031                 break;
1032         }
1033 }
1034 
1035 /*
1036  *	Routine:	pmap_change_wiring
1037  *	Function:	Change the wiring attribute for a map/virtual-address
1038  *			pair.
1039  *	In/out conditions:
1040  *			The mapping must already exist in the pmap.
1041  */
1042 void
1043 pmap_change_wiring(pmap, va, wired)
1044 	register pmap_t	pmap;
1045 	vm_offset_t	va;
1046 	boolean_t	wired;
1047 {
1048 	register pt_entry_t *pte;
1049 	register int ix;
1050 
1051 #ifdef DEBUG
1052 	if (pmapdebug & PDB_FOLLOW)
1053 		printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);
1054 #endif
1055 	if (pmap == NULL)
1056 		return;
1057 
1058 	pte = pmap_pte(pmap, va);
1059 #ifdef DEBUG
1060 	/*
1061 	 * Page table page is not allocated.
1062 	 * Should this ever happen?  Ignore it for now,
1063 	 * we don't want to force allocation of unnecessary PTE pages.
1064 	 */
1065 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
1066 		if (pmapdebug & PDB_PARANOIA)
1067 			pg("pmap_change_wiring: invalid PDE for %x ", va);
1068 		return;
1069 	}
1070 	/*
1071 	 * Page not valid.  Should this ever happen?
1072 	 * Just continue and change wiring anyway.
1073 	 */
1074 	if (!pmap_pte_v(pte)) {
1075 		if (pmapdebug & PDB_PARANOIA)
1076 			pg("pmap_change_wiring: invalid PTE for %x ", va);
1077 	}
1078 #endif
1079 	if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1080 		if (wired)
1081 			pmap->pm_stats.wired_count++;
1082 		else
1083 			pmap->pm_stats.wired_count--;
1084 	}
1085 	/*
1086 	 * Wiring is not a hardware characteristic so there is no need
1087 	 * to invalidate TLB.
1088 	 */
1089 	ix = 0;
1090 	do {
1091 		pmap_pte_set_w(pte++, wired);
1092 	} while (++ix != i386pagesperpage);
1093 }
1094 
1095 /*
1096  *	Routine:	pmap_pte
1097  *	Function:
1098  *		Extract the page table entry associated
1099  *		with the given map/virtual_address pair.
1100  * [ what about induced faults -wfj]
1101  */
1102 
1103 struct pte *pmap_pte(pmap, va)
1104 	register pmap_t	pmap;
1105 	vm_offset_t va;
1106 {
1107 
1108 #ifdef DEBUGx
1109 	if (pmapdebug & PDB_FOLLOW)
1110 		printf("pmap_pte(%x, %x) ->\n", pmap, va);
1111 #endif
1112 	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1113 
1114 		/* are we current address space or kernel? */
1115 		if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
1116 			|| pmap == kernel_pmap)
1117 			return ((struct pte *) vtopte(va));
1118 
1119 		/* otherwise, we are alternate address space */
1120 		else {
1121 			if (pmap->pm_pdir[PTDPTDI].pd_pfnum
1122 				!= APTDpde.pd_pfnum) {
1123 				APTDpde = pmap->pm_pdir[PTDPTDI];
1124 				tlbflush();
1125 			}
1126 			return((struct pte *) avtopte(va));
1127 		}
1128 	}
1129 	return(0);
1130 }
1131 
1132 /*
1133  *	Routine:	pmap_extract
1134  *	Function:
1135  *		Extract the physical page address associated
1136  *		with the given map/virtual_address pair.
1137  */
1138 
1139 vm_offset_t
1140 pmap_extract(pmap, va)
1141 	register pmap_t	pmap;
1142 	vm_offset_t va;
1143 {
1144 	register vm_offset_t pa;
1145 
1146 #ifdef DEBUGx
1147 	if (pmapdebug & PDB_FOLLOW)
1148 		pg("pmap_extract(%x, %x) -> ", pmap, va);
1149 #endif
1150 	pa = 0;
1151 	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1152 		pa = *(int *) pmap_pte(pmap, va);
1153 	}
1154 	if (pa)
1155 		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1156 #ifdef DEBUGx
1157 	if (pmapdebug & PDB_FOLLOW)
1158 		printf("%x\n", pa);
1159 #endif
1160 	return(pa);
1161 }
1162 
1163 /*
1164  *	Copy the range specified by src_addr/len
1165  *	from the source map to the range dst_addr/len
1166  *	in the destination map.
1167  *
1168  *	This routine is only advisory and need not do anything.
1169  */
1170 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1171 	pmap_t		dst_pmap;
1172 	pmap_t		src_pmap;
1173 	vm_offset_t	dst_addr;
1174 	vm_size_t	len;
1175 	vm_offset_t	src_addr;
1176 {
1177 #ifdef DEBUG
1178 	if (pmapdebug & PDB_FOLLOW)
1179 		printf("pmap_copy(%x, %x, %x, %x, %x)",
1180 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1181 #endif
1182 }
1183 
1184 /*
1185  *	Require that all active physical maps contain no
1186  *	incorrect entries NOW.  [This update includes
1187  *	forcing updates of any address map caching.]
1188  *
1189  *	Generally used to insure that a thread about
1190  *	to run will see a semantically correct world.
1191  */
1192 void pmap_update()
1193 {
1194 #ifdef DEBUG
1195 	if (pmapdebug & PDB_FOLLOW)
1196 		printf("pmap_update()");
1197 #endif
1198 	tlbflush();
1199 }
1200 
1201 /*
1202  *	Routine:	pmap_collect
1203  *	Function:
1204  *		Garbage collects the physical map system for
1205  *		pages which are no longer used.
1206  *		Success need not be guaranteed -- that is, there
1207  *		may well be pages which are not referenced, but
1208  *		others may be collected.
1209  *	Usage:
1210  *		Called by the pageout daemon when pages are scarce.
1211  * [ needs to be written -wfj ]
1212  */
1213 void
1214 pmap_collect(pmap)
1215 	pmap_t		pmap;
1216 {
1217 	register vm_offset_t pa;
1218 	register pv_entry_t pv;
1219 	register int *pte;
1220 	vm_offset_t kpa;
1221 	int s;
1222 
1223 #ifdef DEBUG
1224 	int *pde;
1225 	int opmapdebug;
1226 	printf("pmap_collect(%x) ", pmap);
1227 #endif
1228 	if (pmap != kernel_pmap)
1229 		return;
1230 
1231 }
1232 
1233 /* [ macro again?, should I force kstack into user map here? -wfj ] */
1234 void
1235 pmap_activate(pmap, pcbp)
1236 	register pmap_t pmap;
1237 	struct pcb *pcbp;
1238 {
1239 int x;
1240 #ifdef DEBUG
1241 	if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB))
1242 		pg("pmap_activate(%x, %x) ", pmap, pcbp);
1243 #endif
1244 	PMAP_ACTIVATE(pmap, pcbp);
1245 /*printf("pde ");
1246 for(x=0x3f6; x < 0x3fA; x++)
1247 	printf("%x ", pmap->pm_pdir[x]);*/
1248 /*pads(pmap);*/
1249 /*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/
1250 }
1251 
1252 /*
1253  *	pmap_zero_page zeros the specified (machine independent)
1254  *	page by mapping the page into virtual memory and using
1255  *	bzero to clear its contents, one machine dependent page
1256  *	at a time.
1257  */
1258 void
1259 pmap_zero_page(phys)
1260 	register vm_offset_t	phys;
1261 {
1262 	register int ix;
1263 
1264 #ifdef DEBUG
1265 	if (pmapdebug & PDB_FOLLOW)
1266 		printf("pmap_zero_page(%x)", phys);
1267 #endif
1268 	phys >>= PG_SHIFT;
1269 	ix = 0;
1270 	do {
1271 		clearseg(phys++);
1272 	} while (++ix != i386pagesperpage);
1273 }
1274 
1275 /*
1276  *	pmap_copy_page copies the specified (machine independent)
1277  *	page by mapping the page into virtual memory and using
1278  *	bcopy to copy the page, one machine dependent page at a
1279  *	time.
1280  */
1281 void
1282 pmap_copy_page(src, dst)
1283 	register vm_offset_t	src, dst;
1284 {
1285 	register int ix;
1286 
1287 #ifdef DEBUG
1288 	if (pmapdebug & PDB_FOLLOW)
1289 		printf("pmap_copy_page(%x, %x)", src, dst);
1290 #endif
1291 	src >>= PG_SHIFT;
1292 	dst >>= PG_SHIFT;
1293 	ix = 0;
1294 	do {
1295 		physcopyseg(src++, dst++);
1296 	} while (++ix != i386pagesperpage);
1297 }
1298 
1299 
1300 /*
1301  *	Routine:	pmap_pageable
1302  *	Function:
1303  *		Make the specified pages (by pmap, offset)
1304  *		pageable (or not) as requested.
1305  *
1306  *		A page which is not pageable may not take
1307  *		a fault; therefore, its page table entry
1308  *		must remain valid for the duration.
1309  *
1310  *		This routine is merely advisory; pmap_enter
1311  *		will specify that these pages are to be wired
1312  *		down (or not) as appropriate.
1313  */
1314 void
1315 pmap_pageable(pmap, sva, eva, pageable)
1316 	pmap_t		pmap;
1317 	vm_offset_t	sva, eva;
1318 	boolean_t	pageable;
1319 {
1320 #ifdef DEBUG
1321 	if (pmapdebug & PDB_FOLLOW)
1322 		printf("pmap_pageable(%x, %x, %x, %x)",
1323 		       pmap, sva, eva, pageable);
1324 #endif
1325 	/*
1326 	 * If we are making a PT page pageable then all valid
1327 	 * mappings must be gone from that page.  Hence it should
1328 	 * be all zeros and there is no need to clean it.
1329 	 * Assumptions:
1330 	 *	- we are called with only one page at a time
1331 	 *	- PT pages have only one pv_table entry
1332 	 */
1333 	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1334 		register pv_entry_t pv;
1335 		register vm_offset_t pa;
1336 
1337 #ifdef DEBUG
1338 		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1339 			printf("pmap_pageable(%x, %x, %x, %x)",
1340 			       pmap, sva, eva, pageable);
1341 #endif
1342 		/*if (!pmap_pde_v(pmap_pde(pmap, sva)))
1343 			return;*/
1344 		if(pmap_pte(pmap, sva) == 0)
1345 			return;
1346 		pa = pmap_pte_pa(pmap_pte(pmap, sva));
1347 		if (pa < vm_first_phys || pa >= vm_last_phys)
1348 			return;
1349 		pv = pa_to_pvh(pa);
1350 		/*if (!ispt(pv->pv_va))
1351 			return;*/
1352 #ifdef DEBUG
1353 		if (pv->pv_va != sva || pv->pv_next) {
1354 			pg("pmap_pageable: bad PT page va %x next %x\n",
1355 			       pv->pv_va, pv->pv_next);
1356 			return;
1357 		}
1358 #endif
1359 		/*
1360 		 * Mark it unmodified to avoid pageout
1361 		 */
1362 		pmap_clear_modify(pa);
1363 #ifdef needsomethinglikethis
1364 		if (pmapdebug & PDB_PTPAGE)
1365 			pg("pmap_pageable: PT page %x(%x) unmodified\n",
1366 			       sva, *(int *)pmap_pte(pmap, sva));
1367 		if (pmapdebug & PDB_WIRING)
1368 			pmap_check_wiring("pageable", sva);
1369 #endif
1370 	}
1371 }
1372 
1373 /*
1374  *	Clear the modify bits on the specified physical page.
1375  */
1376 
1377 void
1378 pmap_clear_modify(pa)
1379 	vm_offset_t	pa;
1380 {
1381 #ifdef DEBUG
1382 	if (pmapdebug & PDB_FOLLOW)
1383 		printf("pmap_clear_modify(%x)", pa);
1384 #endif
1385 	pmap_changebit(pa, PG_M, FALSE);
1386 }
1387 
1388 /*
1389  *	pmap_clear_reference:
1390  *
1391  *	Clear the reference bit on the specified physical page.
1392  */
1393 
1394 void pmap_clear_reference(pa)
1395 	vm_offset_t	pa;
1396 {
1397 #ifdef DEBUG
1398 	if (pmapdebug & PDB_FOLLOW)
1399 		printf("pmap_clear_reference(%x)", pa);
1400 #endif
1401 	pmap_changebit(pa, PG_U, FALSE);
1402 }
1403 
1404 /*
1405  *	pmap_is_referenced:
1406  *
1407  *	Return whether or not the specified physical page is referenced
1408  *	by any physical maps.
1409  */
1410 
1411 boolean_t
1412 pmap_is_referenced(pa)
1413 	vm_offset_t	pa;
1414 {
1415 #ifdef DEBUG
1416 	if (pmapdebug & PDB_FOLLOW) {
1417 		boolean_t rv = pmap_testbit(pa, PG_U);
1418 		printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]);
1419 		return(rv);
1420 	}
1421 #endif
1422 	return(pmap_testbit(pa, PG_U));
1423 }
1424 
1425 /*
1426  *	pmap_is_modified:
1427  *
1428  *	Return whether or not the specified physical page is modified
1429  *	by any physical maps.
1430  */
1431 
1432 boolean_t
1433 pmap_is_modified(pa)
1434 	vm_offset_t	pa;
1435 {
1436 #ifdef DEBUG
1437 	if (pmapdebug & PDB_FOLLOW) {
1438 		boolean_t rv = pmap_testbit(pa, PG_M);
1439 		printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]);
1440 		return(rv);
1441 	}
1442 #endif
1443 	return(pmap_testbit(pa, PG_M));
1444 }
1445 
1446 vm_offset_t
1447 pmap_phys_address(ppn)
1448 	int ppn;
1449 {
1450 	return(i386_ptob(ppn));
1451 }
1452 
1453 /*
1454  * Miscellaneous support routines follow
1455  */
1456 
1457 i386_protection_init()
1458 {
1459 	register int *kp, prot;
1460 
1461 	kp = protection_codes;
1462 	for (prot = 0; prot < 8; prot++) {
1463 		switch (prot) {
1464 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1465 			*kp++ = 0;
1466 			break;
1467 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1468 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1469 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1470 			*kp++ = PG_RO;
1471 			break;
1472 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1473 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1474 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1475 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1476 			*kp++ = PG_RW;
1477 			break;
1478 		}
1479 	}
1480 }
1481 
1482 static
1483 boolean_t
1484 pmap_testbit(pa, bit)
1485 	register vm_offset_t pa;
1486 	int bit;
1487 {
1488 	register pv_entry_t pv;
1489 	register int *pte, ix;
1490 	int s;
1491 
1492 	if (pa < vm_first_phys || pa >= vm_last_phys)
1493 		return(FALSE);
1494 
1495 	pv = pa_to_pvh(pa);
1496 	s = splimp();
1497 	/*
1498 	 * Check saved info first
1499 	 */
1500 	if (pmap_attributes[pa_index(pa)] & bit) {
1501 		splx(s);
1502 		return(TRUE);
1503 	}
1504 	/*
1505 	 * Not found, check current mappings returning
1506 	 * immediately if found.
1507 	 */
1508 	if (pv->pv_pmap != NULL) {
1509 		for (; pv; pv = pv->pv_next) {
1510 			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1511 			ix = 0;
1512 			do {
1513 				if (*pte++ & bit) {
1514 					splx(s);
1515 					return(TRUE);
1516 				}
1517 			} while (++ix != i386pagesperpage);
1518 		}
1519 	}
1520 	splx(s);
1521 	return(FALSE);
1522 }
1523 
1524 pmap_changebit(pa, bit, setem)
1525 	register vm_offset_t pa;
1526 	int bit;
1527 	boolean_t setem;
1528 {
1529 	register pv_entry_t pv;
1530 	register int *pte, npte, ix;
1531 	vm_offset_t va;
1532 	int s;
1533 	boolean_t firstpage = TRUE;
1534 
1535 #ifdef DEBUG
1536 	if (pmapdebug & PDB_BITS)
1537 		printf("pmap_changebit(%x, %x, %s)",
1538 		       pa, bit, setem ? "set" : "clear");
1539 #endif
1540 	if (pa < vm_first_phys || pa >= vm_last_phys)
1541 		return;
1542 
1543 	pv = pa_to_pvh(pa);
1544 	s = splimp();
1545 	/*
1546 	 * Clear saved attributes (modify, reference)
1547 	 */
1548 	if (!setem)
1549 		pmap_attributes[pa_index(pa)] &= ~bit;
1550 	/*
1551 	 * Loop over all current mappings setting/clearing as appropos
1552 	 * If setting RO do we need to clear the VAC?
1553 	 */
1554 	if (pv->pv_pmap != NULL) {
1555 #ifdef DEBUG
1556 		int toflush = 0;
1557 #endif
1558 		for (; pv; pv = pv->pv_next) {
1559 #ifdef DEBUG
1560 			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
1561 #endif
1562 			va = pv->pv_va;
1563 
1564                         /*
1565                          * XXX don't write protect pager mappings
1566                          */
1567                         if (bit == PG_RO) {
1568                                 extern vm_offset_t pager_sva, pager_eva;
1569 
1570                                 if (va >= pager_sva && va < pager_eva)
1571                                         continue;
1572                         }
1573 
1574 			pte = (int *) pmap_pte(pv->pv_pmap, va);
1575 			ix = 0;
1576 			do {
1577 				if (setem)
1578 					npte = *pte | bit;
1579 				else
1580 					npte = *pte & ~bit;
1581 				if (*pte != npte) {
1582 					*pte = npte;
1583 					/*TBIS(va);*/
1584 				}
1585 				va += I386_PAGE_SIZE;
1586 				pte++;
1587 			} while (++ix != i386pagesperpage);
1588 
1589 			if (pv->pv_pmap == &curproc->p_vmspace->vm_pmap)
1590 				pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr);
1591 		}
1592 #ifdef somethinglikethis
1593 		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
1594 			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
1595 				DCIA();
1596 			else if (toflush == 2)
1597 				DCIS();
1598 			else
1599 				DCIU();
1600 		}
1601 #endif
1602 	}
1603 	splx(s);
1604 }
1605 
1606 #ifdef DEBUG
1607 pmap_pvdump(pa)
1608 	vm_offset_t pa;
1609 {
1610 	register pv_entry_t pv;
1611 
1612 	printf("pa %x", pa);
1613 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
1614 		printf(" -> pmap %x, va %x, flags %x",
1615 		       pv->pv_pmap, pv->pv_va, pv->pv_flags);
1616 		pads(pv->pv_pmap);
1617 	}
1618 	printf(" ");
1619 }
1620 
1621 #ifdef notyet
1622 pmap_check_wiring(str, va)
1623 	char *str;
1624 	vm_offset_t va;
1625 {
1626 	vm_map_entry_t entry;
1627 	register int count, *pte;
1628 
1629 	va = trunc_page(va);
1630 	if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
1631 	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
1632 		return;
1633 
1634 	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
1635 		pg("wired_check: entry for %x not found\n", va);
1636 		return;
1637 	}
1638 	count = 0;
1639 	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
1640 		if (*pte)
1641 			count++;
1642 	if (entry->wired_count != count)
1643 		pg("*%s*: %x: w%d/a%d\n",
1644 		       str, va, entry->wired_count, count);
1645 }
1646 #endif
1647 
1648 /* print address space of pmap*/
1649 pads(pm) pmap_t pm; {
1650 	unsigned va, i, j;
1651 	struct pte *ptep;
1652 
1653 	if(pm == kernel_pmap) return;
1654 	for (i = 0; i < 1024; i++)
1655 		if(pm->pm_pdir[i].pd_v)
1656 			for (j = 0; j < 1024 ; j++) {
1657 				va = (i<<22)+(j<<12);
1658 				if (pm == kernel_pmap && va < 0xfe000000)
1659 						continue;
1660 				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
1661 						continue;
1662 				ptep = pmap_pte(pm, va);
1663 				if(pmap_pte_v(ptep))
1664 					printf("%x:%x ", va, *(int *)ptep);
1665 			} ;
1666 
1667 }
1668 #endif
1669