xref: /original-bsd/sys/hp300/hp300/pmap.c (revision f4b865d8)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)pmap.c	7.8 (Berkeley) 02/19/92
12  */
13 
14 /*
15  *	HP9000/300 series physical map management code.
16  *	For 68020/68030 machines with HP, 68551, or 68030 MMUs
17  *		(models 320,350,318,319,330,340,360,370,345,375)
18  *	Don't even pay lip service to multiprocessor support.
19  *
20  *	XXX will only work for PAGE_SIZE == NBPG (hppagesperpage == 1)
21  *	right now because of the assumed one-to-one relationship of PT
22  *	pages to STEs.
23  */
24 
25 /*
26  *	Manages physical address maps.
27  *
28  *	In addition to hardware address maps, this
29  *	module is called upon to provide software-use-only
30  *	maps which may or may not be stored in the same
31  *	form as hardware maps.  These pseudo-maps are
32  *	used to store intermediate results from copy
33  *	operations to and from address spaces.
34  *
35  *	Since the information managed by this module is
36  *	also stored by the logical address mapping module,
37  *	this module may throw away valid virtual-to-physical
38  *	mappings at almost any time.  However, invalidations
39  *	of virtual-to-physical mappings must be done as
40  *	requested.
41  *
42  *	In order to cope with hardware architectures which
43  *	make virtual-to-physical map invalidates expensive,
44  *	this module may delay invalidate or reduced protection
45  *	operations until such time as they are actually
46  *	necessary.  This module is given full information as
47  *	to which processors are currently using which maps,
48  *	and to when physical maps must be made correct.
49  */
50 
51 #include "param.h"
52 #include "proc.h"
53 #include "malloc.h"
54 #include "user.h"
55 
56 #include "pte.h"
57 
58 #include "vm/vm.h"
59 #include "vm/vm_kern.h"
60 #include "vm/vm_page.h"
61 
62 #include "../include/cpu.h"
63 
64 /*
65  * Allocate various and sundry SYSMAPs used in the days of old VM
66  * and not yet converted.  XXX.
67  */
68 #define BSDVM_COMPAT	1
69 
70 #ifdef DEBUG
71 struct {
72 	int collectscans;
73 	int collectpages;
74 	int kpttotal;
75 	int kptinuse;
76 	int kptmaxuse;
77 } kpt_stats;
78 struct {
79 	int kernel;	/* entering kernel mapping */
80 	int user;	/* entering user mapping */
81 	int ptpneeded;	/* needed to allocate a PT page */
82 	int pwchange;	/* no mapping change, just wiring or protection */
83 	int wchange;	/* no mapping change, just wiring */
84 	int mchange;	/* was mapped but mapping to different page */
85 	int managed;	/* a managed page */
86 	int firstpv;	/* first mapping for this PA */
87 	int secondpv;	/* second mapping for this PA */
88 	int ci;		/* cache inhibited */
89 	int unmanaged;	/* not a managed page */
90 	int flushes;	/* cache flushes */
91 } enter_stats;
92 struct {
93 	int calls;
94 	int removes;
95 	int pvfirst;
96 	int pvsearch;
97 	int ptinvalid;
98 	int uflushes;
99 	int sflushes;
100 } remove_stats;
101 
102 int debugmap = 0;
103 int pmapdebug = 0x2000;
104 #define PDB_FOLLOW	0x0001
105 #define PDB_INIT	0x0002
106 #define PDB_ENTER	0x0004
107 #define PDB_REMOVE	0x0008
108 #define PDB_CREATE	0x0010
109 #define PDB_PTPAGE	0x0020
110 #define PDB_CACHE	0x0040
111 #define PDB_BITS	0x0080
112 #define PDB_COLLECT	0x0100
113 #define PDB_PROTECT	0x0200
114 #define PDB_SEGTAB	0x0400
115 #define PDB_PARANOIA	0x2000
116 #define PDB_WIRING	0x4000
117 #define PDB_PVDUMP	0x8000
118 
119 int pmapvacflush = 0;
120 #define	PVF_ENTER	0x01
121 #define	PVF_REMOVE	0x02
122 #define	PVF_PROTECT	0x04
123 #define	PVF_TOTAL	0x80
124 
125 extern vm_offset_t pager_sva, pager_eva;
126 #endif
127 
128 /*
129  * Get STEs and PTEs for user/kernel address space
130  */
131 #define	pmap_ste(m, v)	(&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
132 #define pmap_pte(m, v)	(&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
133 
134 #define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
135 
136 #define pmap_ste_v(pte)		((pte)->sg_v)
137 #define pmap_pte_w(pte)		((pte)->pg_w)
138 #define pmap_pte_ci(pte)	((pte)->pg_ci)
139 #define pmap_pte_m(pte)		((pte)->pg_m)
140 #define pmap_pte_u(pte)		((pte)->pg_u)
141 #define pmap_pte_v(pte)		((pte)->pg_v)
142 #define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))
143 #define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))
144 
145 /*
146  * Given a map and a machine independent protection code,
147  * convert to a vax protection code.
148  */
149 #define pte_prot(m, p)	(protection_codes[p])
150 int	protection_codes[8];
151 
152 /*
153  * Kernel page table page management.
154  */
155 struct kpt_page {
156 	struct kpt_page *kpt_next;	/* link on either used or free list */
157 	vm_offset_t	kpt_va;		/* always valid kernel VA */
158 	vm_offset_t	kpt_pa;		/* PA of this page (for speed) */
159 };
160 struct kpt_page *kpt_free_list, *kpt_used_list;
161 struct kpt_page *kpt_pages;
162 
163 /*
164  * Kernel segment/page table and page table map.
165  * The page table map gives us a level of indirection we need to dynamically
166  * expand the page table.  It is essentially a copy of the segment table
167  * with PTEs instead of STEs.  All are initialized in locore at boot time.
168  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
169  * Segtabzero is an empty segment table which all processes share til they
170  * reference something.
171  */
172 st_entry_t	*Sysseg;
173 pt_entry_t	*Sysmap, *Sysptmap;
174 st_entry_t	*Segtabzero;
175 #if BSDVM_COMPAT
176 vm_size_t	Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
177 #else
178 vm_size_t	Sysptsize = VM_KERNEL_PT_PAGES;
179 #endif
180 
181 struct pmap	kernel_pmap_store;
182 vm_map_t	pt_map;
183 
184 vm_offset_t    	avail_start;	/* PA of first available physical page */
185 vm_offset_t	avail_end;	/* PA of last available physical page */
186 vm_size_t	mem_size;	/* memory size in bytes */
187 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
188 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
189 vm_offset_t	vm_first_phys;	/* PA of first managed page */
190 vm_offset_t	vm_last_phys;	/* PA just past last managed page */
191 int		hppagesperpage;	/* PAGE_SIZE / HP_PAGE_SIZE */
192 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
193 int		pmap_aliasmask;	/* seperation at which VA aliasing ok */
194 char		*pmap_attributes;	/* reference and modify bits */
195 
196 boolean_t	pmap_testbit();
197 void		pmap_enter_ptpage();
198 
199 #if BSDVM_COMPAT
200 #include "msgbuf.h"
201 
202 /*
203  * All those kernel PT submaps that BSD is so fond of
204  */
205 struct pte	*CMAP1, *CMAP2, *mmap;
206 caddr_t		CADDR1, CADDR2, vmmap;
207 struct pte	*msgbufmap;
208 struct msgbuf	*msgbufp;
209 #endif
210 
211 /*
212  *	Bootstrap the system enough to run with virtual memory.
213  *	Map the kernel's code and data, and allocate the system page table.
214  *
215  *	On the HP this is called after mapping has already been enabled
216  *	and just syncs the pmap module with what has already been done.
217  *	[We can't call it easily with mapping off since the kernel is not
218  *	mapped with PA == VA, hence we would have to relocate every address
219  *	from the linked base (virtual) address 0 to the actual (physical)
220  *	address of 0xFFxxxxxx.]
221  */
222 void
223 pmap_bootstrap(firstaddr, loadaddr)
224 	vm_offset_t firstaddr;
225 	vm_offset_t loadaddr;
226 {
227 #if BSDVM_COMPAT
228 	vm_offset_t va;
229 	struct pte *pte;
230 #endif
231 	extern vm_offset_t maxmem, physmem;
232 
233 	avail_start = firstaddr;
234 	avail_end = maxmem << PGSHIFT;
235 
236 	/* XXX: allow for msgbuf */
237 	avail_end -= hp300_round_page(sizeof(struct msgbuf));
238 
239 	mem_size = physmem << PGSHIFT;
240 	virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
241 	virtual_end = VM_MAX_KERNEL_ADDRESS;
242 	hppagesperpage = PAGE_SIZE / HP_PAGE_SIZE;
243 
244 	/*
245 	 * Determine VA aliasing distance if any
246 	 */
247 	if (ectype == EC_VIRT)
248 		switch (machineid) {
249 		case HP_320:
250 			pmap_aliasmask = 0x3fff;	/* 16k */
251 			break;
252 		case HP_350:
253 			pmap_aliasmask = 0x7fff;	/* 32k */
254 			break;
255 		}
256 
257 	/*
258 	 * Initialize protection array.
259 	 */
260 	hp300_protection_init();
261 
262 	/*
263 	 * Kernel page/segment table allocated in locore,
264 	 * just initialize pointers.
265 	 */
266 	kernel_pmap->pm_stab = Sysseg;
267 	kernel_pmap->pm_ptab = Sysmap;
268 
269 	simple_lock_init(&kernel_pmap->pm_lock);
270 	kernel_pmap->pm_count = 1;
271 
272 #if BSDVM_COMPAT
273 	/*
274 	 * Allocate all the submaps we need
275 	 */
276 #define	SYSMAP(c, p, v, n)	\
277 	v = (c)va; va += ((n)*HP_PAGE_SIZE); p = pte; pte += (n);
278 
279 	va = virtual_avail;
280 	pte = pmap_pte(kernel_pmap, va);
281 
282 	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
283 	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
284 	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
285 	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
286 	virtual_avail = va;
287 #endif
288 }
289 
290 /*
291  * Bootstrap memory allocator. This function allows for early dynamic
292  * memory allocation until the virtual memory system has been bootstrapped.
293  * After that point, either kmem_alloc or malloc should be used. This
294  * function works by stealing pages from the (to be) managed page pool,
295  * stealing virtual address space, then mapping the pages and zeroing them.
296  *
297  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
298  * it cannot be used, and will generate a panic if tried. Note that this
299  * memory will never be freed, and in essence it is wired down.
300  */
301 void *
302 pmap_bootstrap_alloc(size) {
303 	vm_offset_t val;
304 	int i;
305 	extern boolean_t vm_page_startup_initialized;
306 
307 	if (vm_page_startup_initialized)
308 		panic("pmap_bootstrap_alloc: called after startup initialized");
309 	size = round_page(size);
310 	val = virtual_avail;
311 
312 	virtual_avail = pmap_map(virtual_avail, avail_start,
313 		avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
314 	avail_start += size;
315 
316 	blkclr ((caddr_t) val, size);
317 	return ((void *) val);
318 }
319 
320 /*
321  *	Initialize the pmap module.
322  *	Called by vm_init, to initialize any structures that the pmap
323  *	system needs to map virtual memory.
324  */
325 void
326 pmap_init(phys_start, phys_end)
327 	vm_offset_t	phys_start, phys_end;
328 {
329 	vm_offset_t	addr, addr2;
330 	vm_size_t	npg, s;
331 	int		rv;
332 	extern char kstack[];
333 
334 #ifdef DEBUG
335 	if (pmapdebug & PDB_FOLLOW)
336 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
337 #endif
338 	/*
339 	 * Now that kernel map has been allocated, we can mark as
340 	 * unavailable regions which we have mapped in locore.
341 	 */
342 	addr = (vm_offset_t) intiobase;
343 	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
344 			   &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
345 	if (addr != (vm_offset_t)intiobase)
346 		goto bogons;
347 	addr = (vm_offset_t) Sysmap;
348 	vm_object_reference(kernel_object);
349 	(void) vm_map_find(kernel_map, kernel_object, addr,
350 			   &addr, HP_MAX_PTSIZE, FALSE);
351 	/*
352 	 * If this fails it is probably because the static portion of
353 	 * the kernel page table isn't big enough and we overran the
354 	 * page table map.   Need to adjust pmap_size() in hp300_init.c.
355 	 */
356 	if (addr != (vm_offset_t)Sysmap)
357 		goto bogons;
358 
359 	addr = (vm_offset_t) kstack;
360 	vm_object_reference(kernel_object);
361 	(void) vm_map_find(kernel_map, kernel_object, addr,
362 			   &addr, hp300_ptob(UPAGES), FALSE);
363 	if (addr != (vm_offset_t)kstack)
364 bogons:
365 		panic("pmap_init: bogons in the VM system!\n");
366 
367 #ifdef DEBUG
368 	if (pmapdebug & PDB_INIT) {
369 		printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
370 		       Sysseg, Sysmap, Sysptmap);
371 		printf("  pstart %x, pend %x, vstart %x, vend %x\n",
372 		       avail_start, avail_end, virtual_avail, virtual_end);
373 	}
374 #endif
375 
376 	/*
377 	 * Allocate memory for random pmap data structures.  Includes the
378 	 * initial segment table, pv_head_table and pmap_attributes.
379 	 */
380 	npg = atop(phys_end - phys_start);
381 	s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg);
382 	s = round_page(s);
383 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
384 	Segtabzero = (st_entry_t *) addr;
385 	addr += HP_STSIZE;
386 	pv_table = (pv_entry_t) addr;
387 	addr += sizeof(struct pv_entry) * npg;
388 	pmap_attributes = (char *) addr;
389 #ifdef DEBUG
390 	if (pmapdebug & PDB_INIT)
391 		printf("pmap_init: %x bytes (%x pgs): seg %x tbl %x attr %x\n",
392 		       s, npg, Segtabzero, pv_table, pmap_attributes);
393 #endif
394 
395 	/*
396 	 * Allocate physical memory for kernel PT pages and their management.
397 	 * We need 1 PT page per possible task plus some slop.
398 	 */
399 	npg = min(atop(HP_MAX_KPTSIZE), maxproc+16);
400 	s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
401 
402 	/*
403 	 * Verify that space will be allocated in region for which
404 	 * we already have kernel PT pages.
405 	 */
406 	addr = 0;
407 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
408 	if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
409 		panic("pmap_init: kernel PT too small");
410 	vm_map_remove(kernel_map, addr, addr + s);
411 
412 	/*
413 	 * Now allocate the space and link the pages together to
414 	 * form the KPT free list.
415 	 */
416 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
417 	s = ptoa(npg);
418 	addr2 = addr + s;
419 	kpt_pages = &((struct kpt_page *)addr2)[npg];
420 	kpt_free_list = (struct kpt_page *) 0;
421 	do {
422 		addr2 -= HP_PAGE_SIZE;
423 		(--kpt_pages)->kpt_next = kpt_free_list;
424 		kpt_free_list = kpt_pages;
425 		kpt_pages->kpt_va = addr2;
426 		kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
427 	} while (addr != addr2);
428 #ifdef DEBUG
429 	kpt_stats.kpttotal = atop(s);
430 	if (pmapdebug & PDB_INIT)
431 		printf("pmap_init: KPT: %d pages from %x to %x\n",
432 		       atop(s), addr, addr + s);
433 #endif
434 
435 	/*
436 	 * Slightly modified version of kmem_suballoc() to get page table
437 	 * map where we want it.
438 	 */
439 	addr = HP_PTBASE;
440 	s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE);
441 	addr2 = addr + s;
442 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
443 	if (rv != KERN_SUCCESS)
444 		panic("pmap_init: cannot allocate space for PT map");
445 	pmap_reference(vm_map_pmap(kernel_map));
446 	pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
447 	if (pt_map == NULL)
448 		panic("pmap_init: cannot create pt_map");
449 	rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
450 	if (rv != KERN_SUCCESS)
451 		panic("pmap_init: cannot map range to pt_map");
452 #ifdef DEBUG
453 	if (pmapdebug & PDB_INIT)
454 		printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
455 #endif
456 
457 	/*
458 	 * Now it is safe to enable pv_table recording.
459 	 */
460 	vm_first_phys = phys_start;
461 	vm_last_phys = phys_end;
462 	pmap_initialized = TRUE;
463 }
464 
465 /*
466  *	Used to map a range of physical addresses into kernel
467  *	virtual address space.
468  *
469  *	For now, VM is already on, we only need to map the
470  *	specified memory.
471  */
472 vm_offset_t
473 pmap_map(virt, start, end, prot)
474 	vm_offset_t	virt;
475 	vm_offset_t	start;
476 	vm_offset_t	end;
477 	int		prot;
478 {
479 #ifdef DEBUG
480 	if (pmapdebug & PDB_FOLLOW)
481 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
482 #endif
483 	while (start < end) {
484 		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
485 		virt += PAGE_SIZE;
486 		start += PAGE_SIZE;
487 	}
488 	return(virt);
489 }
490 
491 /*
492  *	Create and return a physical map.
493  *
494  *	If the size specified for the map
495  *	is zero, the map is an actual physical
496  *	map, and may be referenced by the
497  *	hardware.
498  *
499  *	If the size specified is non-zero,
500  *	the map will be used in software only, and
501  *	is bounded by that size.
502  */
503 pmap_t
504 pmap_create(size)
505 	vm_size_t	size;
506 {
507 	register pmap_t pmap;
508 
509 #ifdef DEBUG
510 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
511 		printf("pmap_create(%x)\n", size);
512 #endif
513 	/*
514 	 * Software use map does not need a pmap
515 	 */
516 	if (size)
517 		return(NULL);
518 
519 	/* XXX: is it ok to wait here? */
520 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
521 #ifdef notifwewait
522 	if (pmap == NULL)
523 		panic("pmap_create: cannot allocate a pmap");
524 #endif
525 	bzero(pmap, sizeof(*pmap));
526 	pmap_pinit(pmap);
527 	return (pmap);
528 }
529 
530 /*
531  * Initialize a preallocated and zeroed pmap structure,
532  * such as one in a vmspace structure.
533  */
534 void
535 pmap_pinit(pmap)
536 	register struct pmap *pmap;
537 {
538 
539 #ifdef DEBUG
540 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
541 		printf("pmap_pinit(%x)\n", pmap);
542 #endif
543 	/*
544 	 * No need to allocate page table space yet but we do need a
545 	 * valid segment table.  Initially, we point everyone at the
546 	 * "null" segment table.  On the first pmap_enter, a real
547 	 * segment table will be allocated.
548 	 */
549 	pmap->pm_stab = Segtabzero;
550 	pmap->pm_stchanged = TRUE;
551 	pmap->pm_count = 1;
552 	simple_lock_init(&pmap->pm_lock);
553 }
554 
555 /*
556  *	Retire the given physical map from service.
557  *	Should only be called if the map contains
558  *	no valid mappings.
559  */
560 void
561 pmap_destroy(pmap)
562 	register pmap_t pmap;
563 {
564 	int count;
565 
566 #ifdef DEBUG
567 	if (pmapdebug & PDB_FOLLOW)
568 		printf("pmap_destroy(%x)\n", pmap);
569 #endif
570 	if (pmap == NULL)
571 		return;
572 
573 	simple_lock(&pmap->pm_lock);
574 	count = --pmap->pm_count;
575 	simple_unlock(&pmap->pm_lock);
576 	if (count == 0) {
577 		pmap_release(pmap);
578 		free((caddr_t)pmap, M_VMPMAP);
579 	}
580 }
581 
582 /*
583  * Release any resources held by the given physical map.
584  * Called when a pmap initialized by pmap_pinit is being released.
585  * Should only be called if the map contains no valid mappings.
586  */
587 void
588 pmap_release(pmap)
589 	register struct pmap *pmap;
590 {
591 
592 #ifdef DEBUG
593 	if (pmapdebug & PDB_FOLLOW)
594 		printf("pmap_release(%x)\n", pmap);
595 #endif
596 #ifdef notdef /* DIAGNOSTIC */
597 	/* count would be 0 from pmap_destroy... */
598 	simple_lock(&pmap->pm_lock);
599 	if (pmap->pm_count != 1)
600 		panic("pmap_release count");
601 #endif
602 	if (pmap->pm_ptab)
603 		kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
604 				 HP_MAX_PTSIZE);
605 	if (pmap->pm_stab != Segtabzero)
606 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
607 }
608 
609 /*
610  *	Add a reference to the specified pmap.
611  */
612 void
613 pmap_reference(pmap)
614 	pmap_t	pmap;
615 {
616 #ifdef DEBUG
617 	if (pmapdebug & PDB_FOLLOW)
618 		printf("pmap_reference(%x)\n", pmap);
619 #endif
620 	if (pmap != NULL) {
621 		simple_lock(&pmap->pm_lock);
622 		pmap->pm_count++;
623 		simple_unlock(&pmap->pm_lock);
624 	}
625 }
626 
627 /*
628  *	Remove the given range of addresses from the specified map.
629  *
630  *	It is assumed that the start and end are properly
631  *	rounded to the page size.
632  */
633 void
634 pmap_remove(pmap, sva, eva)
635 	register pmap_t pmap;
636 	vm_offset_t sva, eva;
637 {
638 	register vm_offset_t pa, va;
639 	register pt_entry_t *pte;
640 	register pv_entry_t pv, npv;
641 	register int ix;
642 	pmap_t ptpmap;
643 	int *ste, s, bits;
644 	boolean_t firstpage = TRUE;
645 	boolean_t flushcache = FALSE;
646 #ifdef DEBUG
647 	pt_entry_t opte;
648 
649 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
650 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
651 #endif
652 
653 	if (pmap == NULL)
654 		return;
655 
656 #ifdef DEBUG
657 	remove_stats.calls++;
658 #endif
659 	for (va = sva; va < eva; va += PAGE_SIZE) {
660 		/*
661 		 * Weed out invalid mappings.
662 		 * Note: we assume that the segment table is always allocated.
663 		 */
664 		if (!pmap_ste_v(pmap_ste(pmap, va))) {
665 			/* XXX: avoid address wrap around */
666 			if (va >= hp300_trunc_seg((vm_offset_t)-1))
667 				break;
668 			va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
669 			continue;
670 		}
671 		pte = pmap_pte(pmap, va);
672 		pa = pmap_pte_pa(pte);
673 		if (pa == 0)
674 			continue;
675 		/*
676 		 * Invalidating a non-CI page, must flush external VAC
677 		 * unless it is a supervisor mapping and we have already
678 		 * flushed the supervisor side.
679 		 */
680 		if (pmap_aliasmask && !pmap_pte_ci(pte) &&
681 		    !(pmap == kernel_pmap && firstpage))
682 			flushcache = TRUE;
683 #ifdef DEBUG
684 		opte = *pte;
685 		remove_stats.removes++;
686 #endif
687 		/*
688 		 * Update statistics
689 		 */
690 		if (pmap_pte_w(pte))
691 			pmap->pm_stats.wired_count--;
692 		pmap->pm_stats.resident_count--;
693 
694 		/*
695 		 * Invalidate the PTEs.
696 		 * XXX: should cluster them up and invalidate as many
697 		 * as possible at once.
698 		 */
699 #ifdef DEBUG
700 		if (pmapdebug & PDB_REMOVE)
701 			printf("remove: invalidating %x ptes at %x\n",
702 			       hppagesperpage, pte);
703 #endif
704 		/*
705 		 * Flush VAC to ensure we get the correct state of any
706 		 * hardware maintained bits.
707 		 */
708 		if (firstpage && pmap_aliasmask) {
709 			firstpage = FALSE;
710 			if (pmap == kernel_pmap)
711 				flushcache = FALSE;
712 			DCIS();
713 #ifdef DEBUG
714 			remove_stats.sflushes++;
715 #endif
716 		}
717 		bits = ix = 0;
718 		do {
719 			bits |= *(int *)pte & (PG_U|PG_M);
720 			*(int *)pte++ = PG_NV;
721 			TBIS(va + ix * HP_PAGE_SIZE);
722 		} while (++ix != hppagesperpage);
723 
724 		/*
725 		 * For user mappings decrement the wiring count on
726 		 * the PT page.  We do this after the PTE has been
727 		 * invalidated because vm_map_pageable winds up in
728 		 * pmap_pageable which clears the modify bit for the
729 		 * PT page.
730 		 */
731 		if (pmap != kernel_pmap) {
732 			pte = pmap_pte(pmap, va);
733 			vm_map_pageable(pt_map, trunc_page(pte),
734 					round_page(pte+1), TRUE);
735 #ifdef DEBUG
736 			if (pmapdebug & PDB_WIRING)
737 				pmap_check_wiring("remove", trunc_page(pte));
738 #endif
739 		}
740 		/*
741 		 * Remove from the PV table (raise IPL since we
742 		 * may be called at interrupt time).
743 		 */
744 		if (pa < vm_first_phys || pa >= vm_last_phys)
745 			continue;
746 		pv = pa_to_pvh(pa);
747 		ste = (int *)0;
748 		s = splimp();
749 		/*
750 		 * If it is the first entry on the list, it is actually
751 		 * in the header and we must copy the following entry up
752 		 * to the header.  Otherwise we must search the list for
753 		 * the entry.  In either case we free the now unused entry.
754 		 */
755 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
756 			ste = (int *)pv->pv_ptste;
757 			ptpmap = pv->pv_ptpmap;
758 			npv = pv->pv_next;
759 			if (npv) {
760 				*pv = *npv;
761 				free((caddr_t)npv, M_VMPVENT);
762 			} else
763 				pv->pv_pmap = NULL;
764 #ifdef DEBUG
765 			remove_stats.pvfirst++;
766 #endif
767 		} else {
768 			for (npv = pv->pv_next; npv; npv = npv->pv_next) {
769 #ifdef DEBUG
770 				remove_stats.pvsearch++;
771 #endif
772 				if (pmap == npv->pv_pmap && va == npv->pv_va)
773 					break;
774 				pv = npv;
775 			}
776 #ifdef DEBUG
777 			if (npv == NULL)
778 				panic("pmap_remove: PA not in pv_tab");
779 #endif
780 			ste = (int *)npv->pv_ptste;
781 			ptpmap = npv->pv_ptpmap;
782 			pv->pv_next = npv->pv_next;
783 			free((caddr_t)npv, M_VMPVENT);
784 			pv = pa_to_pvh(pa);
785 		}
786 		/*
787 		 * If only one mapping left we no longer need to cache inhibit
788 		 */
789 		if (pv->pv_pmap &&
790 		    pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
791 #ifdef DEBUG
792 			if (pmapdebug & PDB_CACHE)
793 				printf("remove: clearing CI for pa %x\n", pa);
794 #endif
795 			pv->pv_flags &= ~PV_CI;
796 			pmap_changebit(pa, PG_CI, FALSE);
797 #ifdef DEBUG
798 			if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
799 			    (PDB_CACHE|PDB_PVDUMP))
800 				pmap_pvdump(pa);
801 #endif
802 		}
803 
804 		/*
805 		 * If this was a PT page we must also remove the
806 		 * mapping from the associated segment table.
807 		 */
808 		if (ste) {
809 #ifdef DEBUG
810 			remove_stats.ptinvalid++;
811 			if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) {
812 				printf("remove: ste was %x@%x pte was %x@%x\n",
813 				       *ste, ste,
814 				       *(int *)&opte, pmap_pte(pmap, va));
815 			}
816 #endif
817 			*ste = SG_NV;
818 			/*
819 			 * If it was a user PT page, we decrement the
820 			 * reference count on the segment table as well,
821 			 * freeing it if it is now empty.
822 			 */
823 			if (ptpmap != kernel_pmap) {
824 #ifdef DEBUG
825 				if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
826 					printf("remove: stab %x, refcnt %d\n",
827 					       ptpmap->pm_stab,
828 					       ptpmap->pm_sref - 1);
829 				if ((pmapdebug & PDB_PARANOIA) &&
830 				    ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
831 					panic("remove: bogus ste");
832 #endif
833 				if (--(ptpmap->pm_sref) == 0) {
834 #ifdef DEBUG
835 					if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
836 					printf("remove: free stab %x\n",
837 					       ptpmap->pm_stab);
838 #endif
839 					kmem_free(kernel_map,
840 						  (vm_offset_t)ptpmap->pm_stab,
841 						  HP_STSIZE);
842 					ptpmap->pm_stab = Segtabzero;
843 					ptpmap->pm_stchanged = TRUE;
844 					/*
845 					 * XXX may have changed segment table
846 					 * pointer for current process so
847 					 * update now to reload hardware.
848 					 */
849 					if (ptpmap == curproc->p_vmspace->vm_map.pmap)
850 						PMAP_ACTIVATE(ptpmap,
851 							(struct pcb *)curproc->p_addr, 1);
852 				}
853 			}
854 			if (ptpmap == kernel_pmap)
855 				TBIAS();
856 			else
857 				TBIAU();
858 			pv->pv_flags &= ~PV_PTPAGE;
859 			ptpmap->pm_ptpages--;
860 		}
861 		/*
862 		 * Update saved attributes for managed page
863 		 */
864 		pmap_attributes[pa_index(pa)] |= bits;
865 		splx(s);
866 	}
867 #ifdef DEBUG
868 	if (pmapvacflush & PVF_REMOVE) {
869 		if (pmapvacflush & PVF_TOTAL)
870 			DCIA();
871 		else if (pmap == kernel_pmap)
872 			DCIS();
873 		else
874 			DCIU();
875 	}
876 #endif
877 	if (flushcache) {
878 		if (pmap == kernel_pmap) {
879 			DCIS();
880 #ifdef DEBUG
881 			remove_stats.sflushes++;
882 #endif
883 		} else {
884 			DCIU();
885 #ifdef DEBUG
886 			remove_stats.uflushes++;
887 #endif
888 		}
889 	}
890 }
891 
892 /*
893  *	pmap_page_protect:
894  *
895  *	Lower the permission for all mappings to a given page.
896  */
897 void
898 pmap_page_protect(pa, prot)
899 	vm_offset_t	pa;
900 	vm_prot_t	prot;
901 {
902 	register pv_entry_t pv;
903 	int s;
904 
905 #ifdef DEBUG
906 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
907 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
908 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
909 #endif
910 	if (pa < vm_first_phys || pa >= vm_last_phys)
911 		return;
912 
913 	switch (prot) {
914 	case VM_PROT_ALL:
915 		break;
916 	/* copy_on_write */
917 	case VM_PROT_READ:
918 	case VM_PROT_READ|VM_PROT_EXECUTE:
919 		pmap_changebit(pa, PG_RO, TRUE);
920 		break;
921 	/* remove_all */
922 	default:
923 		pv = pa_to_pvh(pa);
924 		s = splimp();
925 		while (pv->pv_pmap != NULL) {
926 #ifdef DEBUG
927 			if (!pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)) ||
928 			    pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
929 				panic("pmap_page_protect: bad mapping");
930 #endif
931 			pmap_remove(pv->pv_pmap, pv->pv_va,
932 				    pv->pv_va + PAGE_SIZE);
933 		}
934 		splx(s);
935 		break;
936 	}
937 }
938 
939 /*
940  *	Set the physical protection on the
941  *	specified range of this map as requested.
942  */
943 void
944 pmap_protect(pmap, sva, eva, prot)
945 	register pmap_t	pmap;
946 	vm_offset_t	sva, eva;
947 	vm_prot_t	prot;
948 {
949 	register pt_entry_t *pte;
950 	register vm_offset_t va;
951 	register int ix;
952 	int hpprot;
953 	boolean_t firstpage = TRUE;
954 
955 #ifdef DEBUG
956 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
957 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
958 #endif
959 	if (pmap == NULL)
960 		return;
961 
962 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
963 		pmap_remove(pmap, sva, eva);
964 		return;
965 	}
966 	if (prot & VM_PROT_WRITE)
967 		return;
968 
969 	pte = pmap_pte(pmap, sva);
970 	hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
971 	for (va = sva; va < eva; va += PAGE_SIZE) {
972 		/*
973 		 * Page table page is not allocated.
974 		 * Skip it, we don't want to force allocation
975 		 * of unnecessary PTE pages just to set the protection.
976 		 */
977 		if (!pmap_ste_v(pmap_ste(pmap, va))) {
978 			/* XXX: avoid address wrap around */
979 			if (va >= hp300_trunc_seg((vm_offset_t)-1))
980 				break;
981 			va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
982 			pte = pmap_pte(pmap, va);
983 			pte += hppagesperpage;
984 			continue;
985 		}
986 		/*
987 		 * Page not valid.  Again, skip it.
988 		 * Should we do this?  Or set protection anyway?
989 		 */
990 		if (!pmap_pte_v(pte)) {
991 			pte += hppagesperpage;
992 			continue;
993 		}
994 		/*
995 		 * Flush VAC to ensure we get correct state of HW bits
996 		 * so we don't clobber them.
997 		 */
998 		if (firstpage && pmap_aliasmask) {
999 			firstpage = FALSE;
1000 			DCIS();
1001 		}
1002 		ix = 0;
1003 		do {
1004 			/* clear VAC here if PG_RO? */
1005 			pmap_pte_set_prot(pte++, hpprot);
1006 			TBIS(va + ix * HP_PAGE_SIZE);
1007 		} while (++ix != hppagesperpage);
1008 	}
1009 #ifdef DEBUG
1010 	if (hpprot && (pmapvacflush & PVF_PROTECT)) {
1011 		if (pmapvacflush & PVF_TOTAL)
1012 			DCIA();
1013 		else if (pmap == kernel_pmap)
1014 			DCIS();
1015 		else
1016 			DCIU();
1017 	}
1018 #endif
1019 }
1020 
1021 /*
1022  *	Insert the given physical page (p) at
1023  *	the specified virtual address (v) in the
1024  *	target physical map with the protection requested.
1025  *
1026  *	If specified, the page will be wired down, meaning
1027  *	that the related pte can not be reclaimed.
1028  *
1029  *	NB:  This is the only routine which MAY NOT lazy-evaluate
1030  *	or lose information.  That is, this routine must actually
1031  *	insert this page into the given map NOW.
1032  */
1033 void
1034 pmap_enter(pmap, va, pa, prot, wired)
1035 	register pmap_t pmap;
1036 	vm_offset_t va;
1037 	register vm_offset_t pa;
1038 	vm_prot_t prot;
1039 	boolean_t wired;
1040 {
1041 	register pt_entry_t *pte;
1042 	register int npte, ix;
1043 	vm_offset_t opa;
1044 	boolean_t cacheable = TRUE;
1045 	boolean_t checkpv = TRUE;
1046 
1047 #ifdef DEBUG
1048 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1049 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
1050 		       pmap, va, pa, prot, wired);
1051 #endif
1052 	if (pmap == NULL)
1053 		return;
1054 
1055 #ifdef DEBUG
1056 	if (pmap == kernel_pmap)
1057 		enter_stats.kernel++;
1058 	else
1059 		enter_stats.user++;
1060 #endif
1061 	/*
1062 	 * For user mapping, allocate kernel VM resources if necessary.
1063 	 */
1064 	if (pmap->pm_ptab == NULL)
1065 		pmap->pm_ptab = (pt_entry_t *)
1066 			kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
1067 
1068 	/*
1069 	 * Segment table entry not valid, we need a new PT page
1070 	 */
1071 	if (!pmap_ste_v(pmap_ste(pmap, va)))
1072 		pmap_enter_ptpage(pmap, va);
1073 
1074 	pte = pmap_pte(pmap, va);
1075 	opa = pmap_pte_pa(pte);
1076 #ifdef DEBUG
1077 	if (pmapdebug & PDB_ENTER)
1078 		printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);
1079 #endif
1080 
1081 	/*
1082 	 * Mapping has not changed, must be protection or wiring change.
1083 	 */
1084 	if (opa == pa) {
1085 #ifdef DEBUG
1086 		enter_stats.pwchange++;
1087 #endif
1088 		/*
1089 		 * Wiring change, just update stats.
1090 		 * We don't worry about wiring PT pages as they remain
1091 		 * resident as long as there are valid mappings in them.
1092 		 * Hence, if a user page is wired, the PT page will be also.
1093 		 */
1094 		if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1095 #ifdef DEBUG
1096 			if (pmapdebug & PDB_ENTER)
1097 				printf("enter: wiring change -> %x\n", wired);
1098 #endif
1099 			if (wired)
1100 				pmap->pm_stats.wired_count++;
1101 			else
1102 				pmap->pm_stats.wired_count--;
1103 #ifdef DEBUG
1104 			enter_stats.wchange++;
1105 #endif
1106 		}
1107 		/*
1108 		 * Retain cache inhibition status
1109 		 */
1110 		checkpv = FALSE;
1111 		if (pmap_pte_ci(pte))
1112 			cacheable = FALSE;
1113 		goto validate;
1114 	}
1115 
1116 	/*
1117 	 * Mapping has changed, invalidate old range and fall through to
1118 	 * handle validating new mapping.
1119 	 */
1120 	if (opa) {
1121 #ifdef DEBUG
1122 		if (pmapdebug & PDB_ENTER)
1123 			printf("enter: removing old mapping %x\n", va);
1124 #endif
1125 		pmap_remove(pmap, va, va + PAGE_SIZE);
1126 #ifdef DEBUG
1127 		enter_stats.mchange++;
1128 #endif
1129 	}
1130 
1131 	/*
1132 	 * If this is a new user mapping, increment the wiring count
1133 	 * on this PT page.  PT pages are wired down as long as there
1134 	 * is a valid mapping in the page.
1135 	 */
1136 	if (pmap != kernel_pmap)
1137 		vm_map_pageable(pt_map, trunc_page(pte),
1138 				round_page(pte+1), FALSE);
1139 
1140 	/*
1141 	 * Enter on the PV list if part of our managed memory
1142 	 * Note that we raise IPL while manipulating pv_table
1143 	 * since pmap_enter can be called at interrupt time.
1144 	 */
1145 	if (pa >= vm_first_phys && pa < vm_last_phys) {
1146 		register pv_entry_t pv, npv;
1147 		int s;
1148 
1149 #ifdef DEBUG
1150 		enter_stats.managed++;
1151 #endif
1152 		pv = pa_to_pvh(pa);
1153 		s = splimp();
1154 #ifdef DEBUG
1155 		if (pmapdebug & PDB_ENTER)
1156 			printf("enter: pv at %x: %x/%x/%x\n",
1157 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1158 #endif
1159 		/*
1160 		 * No entries yet, use header as the first entry
1161 		 */
1162 		if (pv->pv_pmap == NULL) {
1163 #ifdef DEBUG
1164 			enter_stats.firstpv++;
1165 #endif
1166 			pv->pv_va = va;
1167 			pv->pv_pmap = pmap;
1168 			pv->pv_next = NULL;
1169 			pv->pv_ptste = NULL;
1170 			pv->pv_ptpmap = NULL;
1171 			pv->pv_flags = 0;
1172 		}
1173 		/*
1174 		 * There is at least one other VA mapping this page.
1175 		 * Place this entry after the header.
1176 		 */
1177 		else {
1178 #ifdef DEBUG
1179 			for (npv = pv; npv; npv = npv->pv_next)
1180 				if (pmap == npv->pv_pmap && va == npv->pv_va)
1181 					panic("pmap_enter: already in pv_tab");
1182 #endif
1183 			npv = (pv_entry_t)
1184 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
1185 			npv->pv_va = va;
1186 			npv->pv_pmap = pmap;
1187 			npv->pv_next = pv->pv_next;
1188 			npv->pv_ptste = NULL;
1189 			npv->pv_ptpmap = NULL;
1190 			pv->pv_next = npv;
1191 #ifdef DEBUG
1192 			if (!npv->pv_next)
1193 				enter_stats.secondpv++;
1194 #endif
1195 			/*
1196 			 * Since there is another logical mapping for the
1197 			 * same page we may need to cache-inhibit the
1198 			 * descriptors on those CPUs with external VACs.
1199 			 * We don't need to CI if:
1200 			 *
1201 			 * - No two mappings belong to the same user pmaps.
1202 			 *   Since the cache is flushed on context switches
1203 			 *   there is no problem between user processes.
1204 			 *
1205 			 * - Mappings within a single pmap are a certain
1206 			 *   magic distance apart.  VAs at these appropriate
1207 			 *   boundaries map to the same cache entries or
1208 			 *   otherwise don't conflict.
1209 			 *
1210 			 * To keep it simple, we only check for these special
1211 			 * cases if there are only two mappings, otherwise we
1212 			 * punt and always CI.
1213 			 *
1214 			 * Note that there are no aliasing problems with the
1215 			 * on-chip data-cache when the WA bit is set.
1216 			 */
1217 			if (pmap_aliasmask) {
1218 				if (pv->pv_flags & PV_CI) {
1219 #ifdef DEBUG
1220 					if (pmapdebug & PDB_CACHE)
1221 					printf("enter: pa %x already CI'ed\n",
1222 					       pa);
1223 #endif
1224 					checkpv = cacheable = FALSE;
1225 				} else if (npv->pv_next ||
1226 					   ((pmap == pv->pv_pmap ||
1227 					     pmap == kernel_pmap ||
1228 					     pv->pv_pmap == kernel_pmap) &&
1229 					    ((pv->pv_va & pmap_aliasmask) !=
1230 					     (va & pmap_aliasmask)))) {
1231 #ifdef DEBUG
1232 					if (pmapdebug & PDB_CACHE)
1233 					printf("enter: pa %x CI'ing all\n",
1234 					       pa);
1235 #endif
1236 					cacheable = FALSE;
1237 					pv->pv_flags |= PV_CI;
1238 #ifdef DEBUG
1239 					enter_stats.ci++;
1240 #endif
1241 				}
1242 			}
1243 		}
1244 		splx(s);
1245 	}
1246 	/*
1247 	 * Assumption: if it is not part of our managed memory
1248 	 * then it must be device memory which may be volitile.
1249 	 */
1250 	else if (pmap_initialized) {
1251 		checkpv = cacheable = FALSE;
1252 #ifdef DEBUG
1253 		enter_stats.unmanaged++;
1254 #endif
1255 	}
1256 
1257 	/*
1258 	 * Increment counters
1259 	 */
1260 	pmap->pm_stats.resident_count++;
1261 	if (wired)
1262 		pmap->pm_stats.wired_count++;
1263 
1264 validate:
1265 	/*
1266 	 * Flush VAC to ensure we get correct state of HW bits
1267 	 * so we don't clobber them.
1268 	 */
1269 	if (pmap_aliasmask)
1270 		DCIS();
1271 	/*
1272 	 * Now validate mapping with desired protection/wiring.
1273 	 * Assume uniform modified and referenced status for all
1274 	 * HP pages in a MACH page.
1275 	 */
1276 	npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1277 	npte |= (*(int *)pte & (PG_M|PG_U));
1278 	if (wired)
1279 		npte |= PG_W;
1280 	if (!checkpv && !cacheable)
1281 		npte |= PG_CI;
1282 #ifdef DEBUG
1283 	if (pmapdebug & PDB_ENTER)
1284 		printf("enter: new pte value %x\n", npte);
1285 #endif
1286 	ix = 0;
1287 	do {
1288 		*(int *)pte++ = npte;
1289 		TBIS(va);
1290 		npte += HP_PAGE_SIZE;
1291 		va += HP_PAGE_SIZE;
1292 	} while (++ix != hppagesperpage);
1293 	/*
1294 	 * The following is executed if we are entering a second
1295 	 * (or greater) mapping for a physical page and the mappings
1296 	 * may create an aliasing problem.  In this case we must
1297 	 * cache inhibit the descriptors involved and flush any
1298 	 * external VAC.
1299 	 */
1300 	if (checkpv && !cacheable) {
1301 		pmap_changebit(pa, PG_CI, TRUE);
1302 		DCIA();
1303 #ifdef DEBUG
1304 		enter_stats.flushes++;
1305 #endif
1306 #ifdef DEBUG
1307 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1308 		    (PDB_CACHE|PDB_PVDUMP))
1309 			pmap_pvdump(pa);
1310 #endif
1311 	}
1312 #ifdef DEBUG
1313 	else if (pmapvacflush & PVF_ENTER) {
1314 		if (pmapvacflush & PVF_TOTAL)
1315 			DCIA();
1316 		else if (pmap == kernel_pmap)
1317 			DCIS();
1318 		else
1319 			DCIU();
1320 	}
1321 	if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
1322 		va -= PAGE_SIZE;
1323 		pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
1324 	}
1325 #endif
1326 }
1327 
1328 /*
1329  *	Routine:	pmap_change_wiring
1330  *	Function:	Change the wiring attribute for a map/virtual-address
1331  *			pair.
1332  *	In/out conditions:
1333  *			The mapping must already exist in the pmap.
1334  */
1335 void
1336 pmap_change_wiring(pmap, va, wired)
1337 	register pmap_t	pmap;
1338 	vm_offset_t	va;
1339 	boolean_t	wired;
1340 {
1341 	register pt_entry_t *pte;
1342 	register int ix;
1343 
1344 #ifdef DEBUG
1345 	if (pmapdebug & PDB_FOLLOW)
1346 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1347 #endif
1348 	if (pmap == NULL)
1349 		return;
1350 
1351 	pte = pmap_pte(pmap, va);
1352 #ifdef DEBUG
1353 	/*
1354 	 * Page table page is not allocated.
1355 	 * Should this ever happen?  Ignore it for now,
1356 	 * we don't want to force allocation of unnecessary PTE pages.
1357 	 */
1358 	if (!pmap_ste_v(pmap_ste(pmap, va))) {
1359 		if (pmapdebug & PDB_PARANOIA)
1360 			printf("pmap_change_wiring: invalid STE for %x\n", va);
1361 		return;
1362 	}
1363 	/*
1364 	 * Page not valid.  Should this ever happen?
1365 	 * Just continue and change wiring anyway.
1366 	 */
1367 	if (!pmap_pte_v(pte)) {
1368 		if (pmapdebug & PDB_PARANOIA)
1369 			printf("pmap_change_wiring: invalid PTE for %x\n", va);
1370 	}
1371 #endif
1372 	if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1373 		if (wired)
1374 			pmap->pm_stats.wired_count++;
1375 		else
1376 			pmap->pm_stats.wired_count--;
1377 	}
1378 	/*
1379 	 * Wiring is not a hardware characteristic so there is no need
1380 	 * to invalidate TLB.
1381 	 */
1382 	ix = 0;
1383 	do {
1384 		pmap_pte_set_w(pte++, wired);
1385 	} while (++ix != hppagesperpage);
1386 }
1387 
1388 /*
1389  *	Routine:	pmap_extract
1390  *	Function:
1391  *		Extract the physical page address associated
1392  *		with the given map/virtual_address pair.
1393  */
1394 
1395 vm_offset_t
1396 pmap_extract(pmap, va)
1397 	register pmap_t	pmap;
1398 	vm_offset_t va;
1399 {
1400 	register vm_offset_t pa;
1401 
1402 #ifdef DEBUG
1403 	if (pmapdebug & PDB_FOLLOW)
1404 		printf("pmap_extract(%x, %x) -> ", pmap, va);
1405 #endif
1406 	pa = 0;
1407 	if (pmap && pmap_ste_v(pmap_ste(pmap, va)))
1408 		pa = *(int *)pmap_pte(pmap, va);
1409 	if (pa)
1410 		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1411 #ifdef DEBUG
1412 	if (pmapdebug & PDB_FOLLOW)
1413 		printf("%x\n", pa);
1414 #endif
1415 	return(pa);
1416 }
1417 
1418 /*
1419  *	Copy the range specified by src_addr/len
1420  *	from the source map to the range dst_addr/len
1421  *	in the destination map.
1422  *
1423  *	This routine is only advisory and need not do anything.
1424  */
1425 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1426 	pmap_t		dst_pmap;
1427 	pmap_t		src_pmap;
1428 	vm_offset_t	dst_addr;
1429 	vm_size_t	len;
1430 	vm_offset_t	src_addr;
1431 {
1432 #ifdef DEBUG
1433 	if (pmapdebug & PDB_FOLLOW)
1434 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1435 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1436 #endif
1437 }
1438 
1439 /*
1440  *	Require that all active physical maps contain no
1441  *	incorrect entries NOW.  [This update includes
1442  *	forcing updates of any address map caching.]
1443  *
1444  *	Generally used to insure that a thread about
1445  *	to run will see a semantically correct world.
1446  */
1447 void pmap_update()
1448 {
1449 #ifdef DEBUG
1450 	if (pmapdebug & PDB_FOLLOW)
1451 		printf("pmap_update()\n");
1452 #endif
1453 	TBIA();
1454 }
1455 
1456 /*
1457  *	Routine:	pmap_collect
1458  *	Function:
1459  *		Garbage collects the physical map system for
1460  *		pages which are no longer used.
1461  *		Success need not be guaranteed -- that is, there
1462  *		may well be pages which are not referenced, but
1463  *		others may be collected.
1464  *	Usage:
1465  *		Called by the pageout daemon when pages are scarce.
1466  */
1467 void
1468 pmap_collect(pmap)
1469 	pmap_t		pmap;
1470 {
1471 	register vm_offset_t pa;
1472 	register pv_entry_t pv;
1473 	register int *pte;
1474 	vm_offset_t kpa;
1475 	int s;
1476 
1477 #ifdef DEBUG
1478 	int *ste;
1479 	int opmapdebug;
1480 #endif
1481 	if (pmap != kernel_pmap)
1482 		return;
1483 
1484 #ifdef DEBUG
1485 	if (pmapdebug & PDB_FOLLOW)
1486 		printf("pmap_collect(%x)\n", pmap);
1487 	kpt_stats.collectscans++;
1488 #endif
1489 	s = splimp();
1490 	for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) {
1491 		register struct kpt_page *kpt, **pkpt;
1492 
1493 		/*
1494 		 * Locate physical pages which are being used as kernel
1495 		 * page table pages.
1496 		 */
1497 		pv = pa_to_pvh(pa);
1498 		if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
1499 			continue;
1500 		do {
1501 			if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
1502 				break;
1503 		} while (pv = pv->pv_next);
1504 		if (pv == NULL)
1505 			continue;
1506 #ifdef DEBUG
1507 		if (pv->pv_va < (vm_offset_t)Sysmap ||
1508 		    pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
1509 			printf("collect: kernel PT VA out of range\n");
1510 		else
1511 			goto ok;
1512 		pmap_pvdump(pa);
1513 		continue;
1514 ok:
1515 #endif
1516 		pte = (int *)(pv->pv_va + HP_PAGE_SIZE);
1517 		while (--pte >= (int *)pv->pv_va && *pte == PG_NV)
1518 			;
1519 		if (pte >= (int *)pv->pv_va)
1520 			continue;
1521 
1522 #ifdef DEBUG
1523 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1524 			printf("collect: freeing KPT page at %x (ste %x@%x)\n",
1525 			       pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);
1526 			opmapdebug = pmapdebug;
1527 			pmapdebug |= PDB_PTPAGE;
1528 		}
1529 
1530 		ste = (int *)pv->pv_ptste;
1531 #endif
1532 		/*
1533 		 * If all entries were invalid we can remove the page.
1534 		 * We call pmap_remove to take care of invalidating ST
1535 		 * and Sysptmap entries.
1536 		 */
1537 		kpa = pmap_extract(pmap, pv->pv_va);
1538 		pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE);
1539 		/*
1540 		 * Use the physical address to locate the original
1541 		 * (kmem_alloc assigned) address for the page and put
1542 		 * that page back on the free list.
1543 		 */
1544 		for (pkpt = &kpt_used_list, kpt = *pkpt;
1545 		     kpt != (struct kpt_page *)0;
1546 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
1547 			if (kpt->kpt_pa == kpa)
1548 				break;
1549 #ifdef DEBUG
1550 		if (kpt == (struct kpt_page *)0)
1551 			panic("pmap_collect: lost a KPT page");
1552 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1553 			printf("collect: %x (%x) to free list\n",
1554 			       kpt->kpt_va, kpa);
1555 #endif
1556 		*pkpt = kpt->kpt_next;
1557 		kpt->kpt_next = kpt_free_list;
1558 		kpt_free_list = kpt;
1559 #ifdef DEBUG
1560 		kpt_stats.kptinuse--;
1561 		kpt_stats.collectpages++;
1562 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1563 			pmapdebug = opmapdebug;
1564 
1565 		if (*ste)
1566 			printf("collect: kernel STE at %x still valid (%x)\n",
1567 			       ste, *ste);
1568 		ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
1569 		if (*ste)
1570 			printf("collect: kernel PTmap at %x still valid (%x)\n",
1571 			       ste, *ste);
1572 #endif
1573 	}
1574 	splx(s);
1575 }
1576 
1577 void
1578 pmap_activate(pmap, pcbp)
1579 	register pmap_t pmap;
1580 	struct pcb *pcbp;
1581 {
1582 #ifdef DEBUG
1583 	if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
1584 		printf("pmap_activate(%x, %x)\n", pmap, pcbp);
1585 #endif
1586 	PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);
1587 }
1588 
1589 /*
1590  *	pmap_zero_page zeros the specified (machine independent)
1591  *	page by mapping the page into virtual memory and using
1592  *	bzero to clear its contents, one machine dependent page
1593  *	at a time.
1594  */
1595 void
1596 pmap_zero_page(phys)
1597 	register vm_offset_t	phys;
1598 {
1599 	register int ix;
1600 
1601 #ifdef DEBUG
1602 	if (pmapdebug & PDB_FOLLOW)
1603 		printf("pmap_zero_page(%x)\n", phys);
1604 #endif
1605 	phys >>= PG_SHIFT;
1606 	ix = 0;
1607 	do {
1608 		clearseg(phys++);
1609 	} while (++ix != hppagesperpage);
1610 }
1611 
1612 /*
1613  *	pmap_copy_page copies the specified (machine independent)
1614  *	page by mapping the page into virtual memory and using
1615  *	bcopy to copy the page, one machine dependent page at a
1616  *	time.
1617  */
1618 void
1619 pmap_copy_page(src, dst)
1620 	register vm_offset_t	src, dst;
1621 {
1622 	register int ix;
1623 
1624 #ifdef DEBUG
1625 	if (pmapdebug & PDB_FOLLOW)
1626 		printf("pmap_copy_page(%x, %x)\n", src, dst);
1627 #endif
1628 	src >>= PG_SHIFT;
1629 	dst >>= PG_SHIFT;
1630 	ix = 0;
1631 	do {
1632 		physcopyseg(src++, dst++);
1633 	} while (++ix != hppagesperpage);
1634 }
1635 
1636 
1637 /*
1638  *	Routine:	pmap_pageable
1639  *	Function:
1640  *		Make the specified pages (by pmap, offset)
1641  *		pageable (or not) as requested.
1642  *
1643  *		A page which is not pageable may not take
1644  *		a fault; therefore, its page table entry
1645  *		must remain valid for the duration.
1646  *
1647  *		This routine is merely advisory; pmap_enter
1648  *		will specify that these pages are to be wired
1649  *		down (or not) as appropriate.
1650  */
1651 void
1652 pmap_pageable(pmap, sva, eva, pageable)
1653 	pmap_t		pmap;
1654 	vm_offset_t	sva, eva;
1655 	boolean_t	pageable;
1656 {
1657 #ifdef DEBUG
1658 	if (pmapdebug & PDB_FOLLOW)
1659 		printf("pmap_pageable(%x, %x, %x, %x)\n",
1660 		       pmap, sva, eva, pageable);
1661 #endif
1662 	/*
1663 	 * If we are making a PT page pageable then all valid
1664 	 * mappings must be gone from that page.  Hence it should
1665 	 * be all zeros and there is no need to clean it.
1666 	 * Assumptions:
1667 	 *	- we are called with only one page at a time
1668 	 *	- PT pages have only one pv_table entry
1669 	 */
1670 	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1671 		register pv_entry_t pv;
1672 		register vm_offset_t pa;
1673 
1674 #ifdef DEBUG
1675 		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1676 			printf("pmap_pageable(%x, %x, %x, %x)\n",
1677 			       pmap, sva, eva, pageable);
1678 #endif
1679 		if (!pmap_ste_v(pmap_ste(pmap, sva)))
1680 			return;
1681 		pa = pmap_pte_pa(pmap_pte(pmap, sva));
1682 		if (pa < vm_first_phys || pa >= vm_last_phys)
1683 			return;
1684 		pv = pa_to_pvh(pa);
1685 		if (pv->pv_ptste == NULL)
1686 			return;
1687 #ifdef DEBUG
1688 		if (pv->pv_va != sva || pv->pv_next) {
1689 			printf("pmap_pageable: bad PT page va %x next %x\n",
1690 			       pv->pv_va, pv->pv_next);
1691 			return;
1692 		}
1693 #endif
1694 		/*
1695 		 * Mark it unmodified to avoid pageout
1696 		 */
1697 		pmap_changebit(pa, PG_M, FALSE);
1698 #ifdef DEBUG
1699 		if (pmapdebug & PDB_PTPAGE)
1700 			printf("pmap_pageable: PT page %x(%x) unmodified\n",
1701 			       sva, *(int *)pmap_pte(pmap, sva));
1702 		if (pmapdebug & PDB_WIRING)
1703 			pmap_check_wiring("pageable", sva);
1704 #endif
1705 	}
1706 }
1707 
1708 /*
1709  *	Clear the modify bits on the specified physical page.
1710  */
1711 
1712 void
1713 pmap_clear_modify(pa)
1714 	vm_offset_t	pa;
1715 {
1716 #ifdef DEBUG
1717 	if (pmapdebug & PDB_FOLLOW)
1718 		printf("pmap_clear_modify(%x)\n", pa);
1719 #endif
1720 	pmap_changebit(pa, PG_M, FALSE);
1721 }
1722 
1723 /*
1724  *	pmap_clear_reference:
1725  *
1726  *	Clear the reference bit on the specified physical page.
1727  */
1728 
1729 void pmap_clear_reference(pa)
1730 	vm_offset_t	pa;
1731 {
1732 #ifdef DEBUG
1733 	if (pmapdebug & PDB_FOLLOW)
1734 		printf("pmap_clear_reference(%x)\n", pa);
1735 #endif
1736 	pmap_changebit(pa, PG_U, FALSE);
1737 }
1738 
1739 /*
1740  *	pmap_is_referenced:
1741  *
1742  *	Return whether or not the specified physical page is referenced
1743  *	by any physical maps.
1744  */
1745 
1746 boolean_t
1747 pmap_is_referenced(pa)
1748 	vm_offset_t	pa;
1749 {
1750 #ifdef DEBUG
1751 	if (pmapdebug & PDB_FOLLOW) {
1752 		boolean_t rv = pmap_testbit(pa, PG_U);
1753 		printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);
1754 		return(rv);
1755 	}
1756 #endif
1757 	return(pmap_testbit(pa, PG_U));
1758 }
1759 
1760 /*
1761  *	pmap_is_modified:
1762  *
1763  *	Return whether or not the specified physical page is modified
1764  *	by any physical maps.
1765  */
1766 
1767 boolean_t
1768 pmap_is_modified(pa)
1769 	vm_offset_t	pa;
1770 {
1771 #ifdef DEBUG
1772 	if (pmapdebug & PDB_FOLLOW) {
1773 		boolean_t rv = pmap_testbit(pa, PG_M);
1774 		printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);
1775 		return(rv);
1776 	}
1777 #endif
1778 	return(pmap_testbit(pa, PG_M));
1779 }
1780 
1781 vm_offset_t
1782 pmap_phys_address(ppn)
1783 	int ppn;
1784 {
1785 	return(hp300_ptob(ppn));
1786 }
1787 
1788 /*
1789  * Miscellaneous support routines follow
1790  */
1791 
1792 /* static */
1793 hp300_protection_init()
1794 {
1795 	register int *kp, prot;
1796 
1797 	kp = protection_codes;
1798 	for (prot = 0; prot < 8; prot++) {
1799 		switch (prot) {
1800 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1801 			*kp++ = 0;
1802 			break;
1803 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1804 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1805 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1806 			*kp++ = PG_RO;
1807 			break;
1808 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1809 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1810 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1811 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1812 			*kp++ = PG_RW;
1813 			break;
1814 		}
1815 	}
1816 }
1817 
1818 /* static */
1819 boolean_t
1820 pmap_testbit(pa, bit)
1821 	register vm_offset_t pa;
1822 	int bit;
1823 {
1824 	register pv_entry_t pv;
1825 	register int *pte, ix;
1826 	int s;
1827 
1828 	if (pa < vm_first_phys || pa >= vm_last_phys)
1829 		return(FALSE);
1830 
1831 	pv = pa_to_pvh(pa);
1832 	s = splimp();
1833 	/*
1834 	 * Check saved info first
1835 	 */
1836 	if (pmap_attributes[pa_index(pa)] & bit) {
1837 		splx(s);
1838 		return(TRUE);
1839 	}
1840 	/*
1841 	 * Flush VAC to get correct state of any hardware maintained bits.
1842 	 */
1843 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
1844 		DCIS();
1845 	/*
1846 	 * Not found, check current mappings returning
1847 	 * immediately if found.
1848 	 */
1849 	if (pv->pv_pmap != NULL) {
1850 		for (; pv; pv = pv->pv_next) {
1851 			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1852 			ix = 0;
1853 			do {
1854 				if (*pte++ & bit) {
1855 					splx(s);
1856 					return(TRUE);
1857 				}
1858 			} while (++ix != hppagesperpage);
1859 		}
1860 	}
1861 	splx(s);
1862 	return(FALSE);
1863 }
1864 
1865 /* static */
1866 pmap_changebit(pa, bit, setem)
1867 	register vm_offset_t pa;
1868 	int bit;
1869 	boolean_t setem;
1870 {
1871 	register pv_entry_t pv;
1872 	register int *pte, npte, ix;
1873 	vm_offset_t va;
1874 	int s;
1875 	boolean_t firstpage = TRUE;
1876 
1877 #ifdef DEBUG
1878 	if (pmapdebug & PDB_BITS)
1879 		printf("pmap_changebit(%x, %x, %s)\n",
1880 		       pa, bit, setem ? "set" : "clear");
1881 #endif
1882 	if (pa < vm_first_phys || pa >= vm_last_phys)
1883 		return;
1884 
1885 	pv = pa_to_pvh(pa);
1886 	s = splimp();
1887 	/*
1888 	 * Clear saved attributes (modify, reference)
1889 	 */
1890 	if (!setem)
1891 		pmap_attributes[pa_index(pa)] &= ~bit;
1892 	/*
1893 	 * Loop over all current mappings setting/clearing as appropos
1894 	 * If setting RO do we need to clear the VAC?
1895 	 */
1896 	if (pv->pv_pmap != NULL) {
1897 #ifdef DEBUG
1898 		int toflush = 0;
1899 #endif
1900 		for (; pv; pv = pv->pv_next) {
1901 #ifdef DEBUG
1902 			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
1903 #endif
1904 			va = pv->pv_va;
1905 
1906 			/*
1907 			 * XXX don't write protect pager mappings
1908 			 */
1909 			if (bit == PG_RO) {
1910 				extern vm_offset_t pager_sva, pager_eva;
1911 
1912 				if (va >= pager_sva && va < pager_eva)
1913 					continue;
1914 			}
1915 
1916 			pte = (int *) pmap_pte(pv->pv_pmap, va);
1917 			/*
1918 			 * Flush VAC to ensure we get correct state of HW bits
1919 			 * so we don't clobber them.
1920 			 */
1921 			if (firstpage && pmap_aliasmask) {
1922 				firstpage = FALSE;
1923 				DCIS();
1924 			}
1925 			ix = 0;
1926 			do {
1927 				if (setem)
1928 					npte = *pte | bit;
1929 				else
1930 					npte = *pte & ~bit;
1931 				if (*pte != npte) {
1932 					*pte = npte;
1933 					TBIS(va);
1934 				}
1935 				va += HP_PAGE_SIZE;
1936 				pte++;
1937 			} while (++ix != hppagesperpage);
1938 		}
1939 #ifdef DEBUG
1940 		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
1941 			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
1942 				DCIA();
1943 			else if (toflush == 2)
1944 				DCIS();
1945 			else
1946 				DCIU();
1947 		}
1948 #endif
1949 	}
1950 	splx(s);
1951 }
1952 
1953 /* static */
1954 void
1955 pmap_enter_ptpage(pmap, va)
1956 	register pmap_t pmap;
1957 	register vm_offset_t va;
1958 {
1959 	register vm_offset_t ptpa;
1960 	register pv_entry_t pv;
1961 	st_entry_t *ste;
1962 	int s;
1963 
1964 #ifdef DEBUG
1965 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
1966 		printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
1967 	enter_stats.ptpneeded++;
1968 #endif
1969 	/*
1970 	 * Allocate a segment table if necessary.  Note that it is allocated
1971 	 * from kernel_map and not pt_map.  This keeps user page tables
1972 	 * aligned on segment boundaries in the kernel address space.
1973 	 * The segment table is wired down.  It will be freed whenever the
1974 	 * reference count drops to zero.
1975 	 */
1976 	if (pmap->pm_stab == Segtabzero) {
1977 		pmap->pm_stab = (st_entry_t *)
1978 			kmem_alloc(kernel_map, HP_STSIZE);
1979 		pmap->pm_stchanged = TRUE;
1980 		/*
1981 		 * XXX may have changed segment table pointer for current
1982 		 * process so update now to reload hardware.
1983 		 */
1984 		if (pmap == curproc->p_vmspace->vm_map.pmap)
1985 			PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
1986 #ifdef DEBUG
1987 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
1988 			printf("enter: pmap %x stab %x\n",
1989 			       pmap, pmap->pm_stab);
1990 #endif
1991 	}
1992 
1993 	ste = pmap_ste(pmap, va);
1994 	va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
1995 
1996 	/*
1997 	 * In the kernel we allocate a page from the kernel PT page
1998 	 * free list and map it into the kernel page table map (via
1999 	 * pmap_enter).
2000 	 */
2001 	if (pmap == kernel_pmap) {
2002 		register struct kpt_page *kpt;
2003 
2004 		s = splimp();
2005 		if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
2006 			/*
2007 			 * No PT pages available.
2008 			 * Try once to free up unused ones.
2009 			 */
2010 #ifdef DEBUG
2011 			if (pmapdebug & PDB_COLLECT)
2012 				printf("enter: no KPT pages, collecting...\n");
2013 #endif
2014 			pmap_collect(kernel_pmap);
2015 			if ((kpt = kpt_free_list) == (struct kpt_page *)0)
2016 				panic("pmap_enter_ptpage: can't get KPT page");
2017 		}
2018 #ifdef DEBUG
2019 		if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
2020 			kpt_stats.kptmaxuse = kpt_stats.kptinuse;
2021 #endif
2022 		kpt_free_list = kpt->kpt_next;
2023 		kpt->kpt_next = kpt_used_list;
2024 		kpt_used_list = kpt;
2025 		ptpa = kpt->kpt_pa;
2026 		bzero(kpt->kpt_va, HP_PAGE_SIZE);
2027 		pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
2028 #ifdef DEBUG
2029 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2030 			printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
2031 			       ste - pmap_ste(pmap, 0),
2032 			       *(int *)&Sysptmap[ste - pmap_ste(pmap, 0)],
2033 			       kpt->kpt_va);
2034 #endif
2035 		splx(s);
2036 	}
2037 	/*
2038 	 * For user processes we just simulate a fault on that location
2039 	 * letting the VM system allocate a zero-filled page.
2040 	 */
2041 	else {
2042 #ifdef DEBUG
2043 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2044 			printf("enter: about to fault UPT pg at %x\n", va);
2045 #endif
2046 		if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
2047 		    != KERN_SUCCESS)
2048 			panic("pmap_enter: vm_fault failed");
2049 		ptpa = pmap_extract(kernel_pmap, va);
2050 #ifdef DEBUG
2051 		PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE;
2052 #endif
2053 	}
2054 
2055 	/*
2056 	 * Locate the PV entry in the kernel for this PT page and
2057 	 * record the STE address.  This is so that we can invalidate
2058 	 * the STE when we remove the mapping for the page.
2059 	 */
2060 	pv = pa_to_pvh(ptpa);
2061 	s = splimp();
2062 	if (pv) {
2063 		pv->pv_flags |= PV_PTPAGE;
2064 		do {
2065 			if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
2066 				break;
2067 		} while (pv = pv->pv_next);
2068 	}
2069 #ifdef DEBUG
2070 	if (pv == NULL)
2071 		panic("pmap_enter_ptpage: PT page not entered");
2072 #endif
2073 	pv->pv_ptste = ste;
2074 	pv->pv_ptpmap = pmap;
2075 #ifdef DEBUG
2076 	if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2077 		printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
2078 #endif
2079 
2080 	/*
2081 	 * Map the new PT page into the segment table.
2082 	 * Also increment the reference count on the segment table if this
2083 	 * was a user page table page.  Note that we don't use vm_map_pageable
2084 	 * to keep the count like we do for PT pages, this is mostly because
2085 	 * it would be difficult to identify ST pages in pmap_pageable to
2086 	 * release them.  We also avoid the overhead of vm_map_pageable.
2087 	 */
2088 	*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2089 	if (pmap != kernel_pmap) {
2090 		pmap->pm_sref++;
2091 #ifdef DEBUG
2092 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2093 			printf("enter: stab %x refcnt %d\n",
2094 			       pmap->pm_stab, pmap->pm_sref);
2095 #endif
2096 	}
2097 	/*
2098 	 * Flush stale TLB info.
2099 	 */
2100 	if (pmap == kernel_pmap)
2101 		TBIAS();
2102 	else
2103 		TBIAU();
2104 	pmap->pm_ptpages++;
2105 	splx(s);
2106 }
2107 
2108 #ifdef DEBUG
2109 pmap_pvdump(pa)
2110 	vm_offset_t pa;
2111 {
2112 	register pv_entry_t pv;
2113 
2114 	printf("pa %x", pa);
2115 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
2116 		printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
2117 		       pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
2118 		       pv->pv_flags);
2119 	printf("\n");
2120 }
2121 
2122 pmap_check_wiring(str, va)
2123 	char *str;
2124 	vm_offset_t va;
2125 {
2126 	vm_map_entry_t entry;
2127 	register int count, *pte;
2128 
2129 	va = trunc_page(va);
2130 	if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) ||
2131 	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
2132 		return;
2133 
2134 	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
2135 		printf("wired_check: entry for %x not found\n", va);
2136 		return;
2137 	}
2138 	count = 0;
2139 	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
2140 		if (*pte)
2141 			count++;
2142 	if (entry->wired_count != count)
2143 		printf("*%s*: %x: w%d/a%d\n",
2144 		       str, va, entry->wired_count, count);
2145 }
2146 #endif
2147