xref: /original-bsd/sys/hp300/hp300/pmap.c (revision ba762ddc)
1 /*
2  * Copyright (c) 1987 Carnegie-Mellon University
3  * Copyright (c) 1991 Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * The CMU software License Agreement specifies the terms and conditions
10  * for use and redistribution.
11  *
12  *	@(#)pmap.c	7.2 (Berkeley) 04/20/91
13  */
14 
15 /*
16  *	HP9000/300 series physical map management code.
17  *	For 68020/68030 machines with HP, 68551, or 68030 MMUs
18  *		(models 320,350,318,319,330,340,360,370,345,375)
19  *	Don't even pay lip service to multiprocessor support.
20  */
21 
22 /*
23  *	Manages physical address maps.
24  *
25  *	In addition to hardware address maps, this
26  *	module is called upon to provide software-use-only
27  *	maps which may or may not be stored in the same
28  *	form as hardware maps.  These pseudo-maps are
29  *	used to store intermediate results from copy
30  *	operations to and from address spaces.
31  *
32  *	Since the information managed by this module is
33  *	also stored by the logical address mapping module,
34  *	this module may throw away valid virtual-to-physical
35  *	mappings at almost any time.  However, invalidations
36  *	of virtual-to-physical mappings must be done as
37  *	requested.
38  *
39  *	In order to cope with hardware architectures which
40  *	make virtual-to-physical map invalidates expensive,
41  *	this module may delay invalidate or reduced protection
42  *	operations until such time as they are actually
43  *	necessary.  This module is given full information as
44  *	to which processors are currently using which maps,
45  *	and to when physical maps must be made correct.
46  */
47 
48 #include "param.h"
49 #include "proc.h"
50 #include "malloc.h"
51 #include "user.h"
52 
53 #include "pte.h"
54 
55 #include "vm/vm.h"
56 #include "vm/vm_kern.h"
57 #include "vm/vm_page.h"
58 #include "vm/vm_statistics.h"
59 
60 #include "../include/cpu.h"
61 
62 /*
63  * Allocate various and sundry SYSMAPs used in the days of old VM
64  * and not yet converted.  XXX.
65  */
66 #define BSDVM_COMPAT	1
67 
68 #ifdef DEBUG
69 struct {
70 	int collectscans;
71 	int collectpages;
72 	int kpttotal;
73 	int kptinuse;
74 	int kptmaxuse;
75 } kpt_stats;
76 struct {
77 	int kernel;	/* entering kernel mapping */
78 	int user;	/* entering user mapping */
79 	int ptpneeded;	/* needed to allocate a PT page */
80 	int pwchange;	/* no mapping change, just wiring or protection */
81 	int wchange;	/* no mapping change, just wiring */
82 	int mchange;	/* was mapped but mapping to different page */
83 	int managed;	/* a managed page */
84 	int firstpv;	/* first mapping for this PA */
85 	int secondpv;	/* second mapping for this PA */
86 	int ci;		/* cache inhibited */
87 	int unmanaged;	/* not a managed page */
88 	int flushes;	/* cache flushes */
89 } enter_stats;
90 struct {
91 	int calls;
92 	int removes;
93 	int pvfirst;
94 	int pvsearch;
95 	int ptinvalid;
96 	int uflushes;
97 	int sflushes;
98 } remove_stats;
99 
100 int debugmap = 0;
101 int pmapdebug = 0x2000;
102 #define PDB_FOLLOW	0x0001
103 #define PDB_INIT	0x0002
104 #define PDB_ENTER	0x0004
105 #define PDB_REMOVE	0x0008
106 #define PDB_CREATE	0x0010
107 #define PDB_PTPAGE	0x0020
108 #define PDB_CACHE	0x0040
109 #define PDB_BITS	0x0080
110 #define PDB_COLLECT	0x0100
111 #define PDB_PROTECT	0x0200
112 #define PDB_SEGTAB	0x0400
113 #define PDB_PARANOIA	0x2000
114 #define PDB_WIRING	0x4000
115 #define PDB_PVDUMP	0x8000
116 
117 int pmapvacflush = 0;
118 #define	PVF_ENTER	0x01
119 #define	PVF_REMOVE	0x02
120 #define	PVF_PROTECT	0x04
121 #define	PVF_TOTAL	0x80
122 #endif
123 
124 /*
125  * Get STEs and PTEs for user/kernel address space
126  */
127 #define	pmap_ste(m, v)	(&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
128 #define pmap_pte(m, v)	(&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
129 
130 #define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
131 
132 #define pmap_ste_v(pte)		((pte)->sg_v)
133 #define pmap_pte_w(pte)		((pte)->pg_w)
134 #define pmap_pte_ci(pte)	((pte)->pg_ci)
135 #define pmap_pte_m(pte)		((pte)->pg_m)
136 #define pmap_pte_u(pte)		((pte)->pg_u)
137 #define pmap_pte_v(pte)		((pte)->pg_v)
138 #define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))
139 #define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))
140 
141 /*
142  * Given a map and a machine independent protection code,
143  * convert to a vax protection code.
144  */
145 #define pte_prot(m, p)	(protection_codes[p])
146 int	protection_codes[8];
147 
148 /*
149  * Kernel page table page management.
150  */
151 struct kpt_page {
152 	struct kpt_page *kpt_next;	/* link on either used or free list */
153 	vm_offset_t	kpt_va;		/* always valid kernel VA */
154 	vm_offset_t	kpt_pa;		/* PA of this page (for speed) */
155 };
156 struct kpt_page *kpt_free_list, *kpt_used_list;
157 struct kpt_page *kpt_pages;
158 
159 /*
160  * Kernel segment/page table and page table map.
161  * The page table map gives us a level of indirection we need to dynamically
162  * expand the page table.  It is essentially a copy of the segment table
163  * with PTEs instead of STEs.  All are initialized in locore at boot time.
164  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
165  * Segtabzero is an empty segment table which all processes share til they
166  * reference something.
167  */
168 st_entry_t	*Sysseg;
169 pt_entry_t	*Sysmap, *Sysptmap;
170 st_entry_t	*Segtabzero;
171 #if BSDVM_COMPAT
172 vm_size_t	Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
173 #else
174 vm_size_t	Sysptsize = VM_KERNEL_PT_PAGES;
175 #endif
176 
177 struct pmap	kernel_pmap_store;
178 pmap_t		kernel_pmap;
179 vm_map_t	pt_map;
180 
181 vm_offset_t    	avail_start;	/* PA of first available physical page */
182 vm_offset_t	avail_end;	/* PA of last available physical page */
183 vm_size_t	mem_size;	/* memory size in bytes */
184 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
185 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
186 vm_offset_t	vm_first_phys;	/* PA of first managed page */
187 vm_offset_t	vm_last_phys;	/* PA just past last managed page */
188 int		hppagesperpage;	/* PAGE_SIZE / HP_PAGE_SIZE */
189 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
190 int		pmap_aliasmask;	/* seperation at which VA aliasing ok */
191 char		*pmap_attributes;	/* reference and modify bits */
192 
193 boolean_t	pmap_testbit();
194 void		pmap_enter_ptpage();
195 
196 #if BSDVM_COMPAT
197 #include "msgbuf.h"
198 
199 /*
200  * All those kernel PT submaps that BSD is so fond of
201  */
202 struct pte	*CMAP1, *CMAP2, *mmap;
203 caddr_t		CADDR1, CADDR2, vmmap;
204 struct pte	*msgbufmap;
205 struct msgbuf	*msgbufp;
206 #endif
207 
208 /*
209  *	Bootstrap the system enough to run with virtual memory.
210  *	Map the kernel's code and data, and allocate the system page table.
211  *
212  *	On the HP this is called after mapping has already been enabled
213  *	and just syncs the pmap module with what has already been done.
214  *	[We can't call it easily with mapping off since the kernel is not
215  *	mapped with PA == VA, hence we would have to relocate every address
216  *	from the linked base (virtual) address 0 to the actual (physical)
217  *	address of 0xFFxxxxxx.]
218  */
219 void
220 pmap_bootstrap(firstaddr, loadaddr)
221 	vm_offset_t firstaddr;
222 	vm_offset_t loadaddr;
223 {
224 #if BSDVM_COMPAT
225 	vm_offset_t va;
226 	struct pte *pte;
227 #endif
228 	extern vm_offset_t maxmem, physmem;
229 
230 	avail_start = firstaddr;
231 	avail_end = maxmem << PGSHIFT;
232 
233 	/* XXX: allow for msgbuf */
234 	avail_end -= hp300_round_page(sizeof(struct msgbuf));
235 
236 	mem_size = physmem << PGSHIFT;
237 	virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
238 	virtual_end = VM_MAX_KERNEL_ADDRESS;
239 	hppagesperpage = PAGE_SIZE / HP_PAGE_SIZE;
240 
241 	/*
242 	 * Determine VA aliasing distance if any
243 	 */
244 	if (ectype == EC_VIRT)
245 		switch (machineid) {
246 		case HP_320:
247 			pmap_aliasmask = 0x3fff;	/* 16k */
248 			break;
249 		case HP_350:
250 			pmap_aliasmask = 0x7fff;	/* 32k */
251 			break;
252 		}
253 
254 	/*
255 	 * Initialize protection array.
256 	 */
257 	hp300_protection_init();
258 
259 	/*
260 	 * The kernel's pmap is statically allocated so we don't
261 	 * have to use pmap_create, which is unlikely to work
262 	 * correctly at this part of the boot sequence.
263 	 */
264 	kernel_pmap = &kernel_pmap_store;
265 
266 	/*
267 	 * Kernel page/segment table allocated in locore,
268 	 * just initialize pointers.
269 	 */
270 	kernel_pmap->pm_stab = Sysseg;
271 	kernel_pmap->pm_ptab = Sysmap;
272 
273 	simple_lock_init(&kernel_pmap->pm_lock);
274 	kernel_pmap->pm_count = 1;
275 
276 #if BSDVM_COMPAT
277 	/*
278 	 * Allocate all the submaps we need
279 	 */
280 #define	SYSMAP(c, p, v, n)	\
281 	v = (c)va; va += ((n)*HP_PAGE_SIZE); p = pte; pte += (n);
282 
283 	va = virtual_avail;
284 	pte = pmap_pte(kernel_pmap, va);
285 
286 	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
287 	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
288 	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
289 	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
290 	virtual_avail = va;
291 #endif
292 }
293 
294 /*
295  *	Initialize the pmap module.
296  *	Called by vm_init, to initialize any structures that the pmap
297  *	system needs to map virtual memory.
298  */
299 void
300 pmap_init(phys_start, phys_end)
301 	vm_offset_t	phys_start, phys_end;
302 {
303 	vm_offset_t	addr, addr2;
304 	vm_size_t	npg, s;
305 	int		rv;
306 	extern vm_offset_t	DIObase;
307 
308 #ifdef DEBUG
309 	if (pmapdebug & PDB_FOLLOW)
310 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
311 #endif
312 	/*
313 	 * Now that kernel map has been allocated, we can mark as
314 	 * unavailable regions which we have mapped in locore.
315 	 */
316 	addr = DIObase;
317 	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
318 			   &addr, hp300_ptob(IOMAPSIZE), FALSE);
319 	if (addr != DIObase)
320 		goto bogons;
321 	addr = (vm_offset_t) Sysmap;
322 	vm_object_reference(kernel_object);
323 	(void) vm_map_find(kernel_map, kernel_object, addr,
324 			   &addr, HP_MAX_PTSIZE, FALSE);
325 	/*
326 	 * If this fails it is probably because the static portion of
327 	 * the kernel page table isn't big enough and we overran the
328 	 * page table map.   Need to adjust pmap_size() in hp300_init.c.
329 	 */
330 	if (addr != (vm_offset_t)Sysmap)
331 		goto bogons;
332 
333 	addr = (vm_offset_t) &u;
334 	vm_object_reference(kernel_object);
335 	(void) vm_map_find(kernel_map, kernel_object, addr,
336 			   &addr, hp300_ptob(UPAGES), FALSE);
337 	if (addr != (vm_offset_t)&u)
338 bogons:
339 		panic("pmap_init: bogons in the VM system!\n");
340 
341 #ifdef DEBUG
342 	if (pmapdebug & PDB_INIT) {
343 		printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
344 		       Sysseg, Sysmap, Sysptmap);
345 		printf("  pstart %x, pend %x, vstart %x, vend %x\n",
346 		       avail_start, avail_end, virtual_avail, virtual_end);
347 	}
348 #endif
349 
350 	/*
351 	 * Allocate memory for random pmap data structures.  Includes the
352 	 * initial segment table, pv_head_table and pmap_attributes.
353 	 */
354 	npg = atop(phys_end - phys_start);
355 	s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg);
356 	s = round_page(s);
357 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
358 	Segtabzero = (st_entry_t *) addr;
359 	addr += HP_STSIZE;
360 	pv_table = (pv_entry_t) addr;
361 	addr += sizeof(struct pv_entry) * npg;
362 	pmap_attributes = (char *) addr;
363 #ifdef DEBUG
364 	if (pmapdebug & PDB_INIT)
365 		printf("pmap_init: %x bytes (%x pgs): seg %x tbl %x attr %x\n",
366 		       s, npg, Segtabzero, pv_table, pmap_attributes);
367 #endif
368 
369 	/*
370 	 * Allocate physical memory for kernel PT pages and their management.
371 	 * We need 1 PT page per possible task plus some slop.
372 	 */
373 	npg = min(atop(HP_MAX_KPTSIZE), maxproc+16);
374 	s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
375 
376 	/*
377 	 * Verify that space will be allocated in region for which
378 	 * we already have kernel PT pages.
379 	 */
380 	addr = 0;
381 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
382 	if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
383 		panic("pmap_init: kernel PT too small");
384 	vm_map_remove(kernel_map, addr, addr + s);
385 
386 	/*
387 	 * Now allocate the space and link the pages together to
388 	 * form the KPT free list.
389 	 */
390 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
391 	s = ptoa(npg);
392 	addr2 = addr + s;
393 	kpt_pages = &((struct kpt_page *)addr2)[npg];
394 	kpt_free_list = (struct kpt_page *) 0;
395 	do {
396 		addr2 -= HP_PAGE_SIZE;
397 		(--kpt_pages)->kpt_next = kpt_free_list;
398 		kpt_free_list = kpt_pages;
399 		kpt_pages->kpt_va = addr2;
400 		kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
401 	} while (addr != addr2);
402 #ifdef DEBUG
403 	kpt_stats.kpttotal = atop(s);
404 	if (pmapdebug & PDB_INIT)
405 		printf("pmap_init: KPT: %d pages from %x to %x\n",
406 		       atop(s), addr, addr + s);
407 #endif
408 
409 	/*
410 	 * Slightly modified version of kmem_suballoc() to get page table
411 	 * map where we want it.
412 	 */
413 	addr = HP_PTBASE;
414 	s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE);
415 	addr2 = addr + s;
416 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
417 	if (rv != KERN_SUCCESS)
418 		panic("pmap_init: cannot allocate space for PT map");
419 	pmap_reference(vm_map_pmap(kernel_map));
420 	pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
421 	if (pt_map == NULL)
422 		panic("pmap_init: cannot create pt_map");
423 	rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
424 	if (rv != KERN_SUCCESS)
425 		panic("pmap_init: cannot map range to pt_map");
426 #ifdef DEBUG
427 	if (pmapdebug & PDB_INIT)
428 		printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
429 #endif
430 
431 	/*
432 	 * Now it is safe to enable pv_table recording.
433 	 */
434 	vm_first_phys = phys_start;
435 	vm_last_phys = phys_end;
436 	pmap_initialized = TRUE;
437 }
438 
439 /*
440  *	Used to map a range of physical addresses into kernel
441  *	virtual address space.
442  *
443  *	For now, VM is already on, we only need to map the
444  *	specified memory.
445  */
446 vm_offset_t
447 pmap_map(virt, start, end, prot)
448 	vm_offset_t	virt;
449 	vm_offset_t	start;
450 	vm_offset_t	end;
451 	int		prot;
452 {
453 #ifdef DEBUG
454 	if (pmapdebug & PDB_FOLLOW)
455 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
456 #endif
457 	while (start < end) {
458 		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
459 		virt += PAGE_SIZE;
460 		start += PAGE_SIZE;
461 	}
462 	return(virt);
463 }
464 
465 /*
466  *	Create and return a physical map.
467  *
468  *	If the size specified for the map
469  *	is zero, the map is an actual physical
470  *	map, and may be referenced by the
471  *	hardware.
472  *
473  *	If the size specified is non-zero,
474  *	the map will be used in software only, and
475  *	is bounded by that size.
476  */
477 pmap_t
478 pmap_create(size)
479 	vm_size_t	size;
480 {
481 	register pmap_t pmap;
482 
483 #ifdef DEBUG
484 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
485 		printf("pmap_create(%x)\n", size);
486 #endif
487 	/*
488 	 * Software use map does not need a pmap
489 	 */
490 	if (size)
491 		return(NULL);
492 
493 	/* XXX: is it ok to wait here? */
494 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
495 #ifdef notifwewait
496 	if (pmap == NULL)
497 		panic("pmap_create: cannot allocate a pmap");
498 #endif
499 	bzero(pmap, sizeof(*pmap));
500 	pmap_pinit(pmap);
501 	return (pmap);
502 }
503 
504 /*
505  * Initialize a preallocated and zeroed pmap structure,
506  * such as one in a vmspace structure.
507  */
508 void
509 pmap_pinit(pmap)
510 	register struct pmap *pmap;
511 {
512 
513 #ifdef DEBUG
514 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
515 		printf("pmap_pinit(%x)\n", pmap);
516 #endif
517 	/*
518 	 * No need to allocate page table space yet but we do need a
519 	 * valid segment table.  Initially, we point everyone at the
520 	 * "null" segment table.  On the first pmap_enter, a real
521 	 * segment table will be allocated.
522 	 */
523 	pmap->pm_stab = Segtabzero;
524 	pmap->pm_stchanged = TRUE;
525 	pmap->pm_count = 1;
526 	simple_lock_init(&pmap->pm_lock);
527 }
528 
529 /*
530  *	Retire the given physical map from service.
531  *	Should only be called if the map contains
532  *	no valid mappings.
533  */
534 void
535 pmap_destroy(pmap)
536 	register pmap_t pmap;
537 {
538 	int count;
539 
540 #ifdef DEBUG
541 	if (pmapdebug & PDB_FOLLOW)
542 		printf("pmap_destroy(%x)\n", pmap);
543 #endif
544 	if (pmap == NULL)
545 		return;
546 
547 	simple_lock(&pmap->pm_lock);
548 	count = --pmap->pm_count;
549 	simple_unlock(&pmap->pm_lock);
550 	if (count == 0) {
551 		pmap_release(pmap);
552 		free((caddr_t)pmap, M_VMPMAP);
553 	}
554 }
555 
556 /*
557  * Release any resources held by the given physical map.
558  * Called when a pmap initialized by pmap_pinit is being released.
559  * Should only be called if the map contains no valid mappings.
560  */
561 void
562 pmap_release(pmap)
563 	register struct pmap *pmap;
564 {
565 
566 #ifdef DEBUG
567 	if (pmapdebug & PDB_FOLLOW)
568 		printf("pmap_release(%x)\n", pmap);
569 #endif
570 #ifdef notdef /* DIAGNOSTIC */
571 	/* count would be 0 from pmap_destroy... */
572 	simple_lock(&pmap->pm_lock);
573 	if (pmap->pm_count != 1)
574 		panic("pmap_release count");
575 #endif
576 	if (pmap->pm_ptab)
577 		kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
578 				 HP_MAX_PTSIZE);
579 	if (pmap->pm_stab != Segtabzero)
580 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
581 }
582 
583 /*
584  *	Add a reference to the specified pmap.
585  */
586 void
587 pmap_reference(pmap)
588 	pmap_t	pmap;
589 {
590 #ifdef DEBUG
591 	if (pmapdebug & PDB_FOLLOW)
592 		printf("pmap_reference(%x)\n", pmap);
593 #endif
594 	if (pmap != NULL) {
595 		simple_lock(&pmap->pm_lock);
596 		pmap->pm_count++;
597 		simple_unlock(&pmap->pm_lock);
598 	}
599 }
600 
601 /*
602  *	Remove the given range of addresses from the specified map.
603  *
604  *	It is assumed that the start and end are properly
605  *	rounded to the page size.
606  */
607 void
608 pmap_remove(pmap, sva, eva)
609 	register pmap_t pmap;
610 	vm_offset_t sva, eva;
611 {
612 	register vm_offset_t pa, va;
613 	register pt_entry_t *pte;
614 	register pv_entry_t pv, npv;
615 	register int ix;
616 	pmap_t ptpmap;
617 	int *ste, s, bits;
618 	boolean_t firstpage = TRUE;
619 	boolean_t flushcache = FALSE;
620 #ifdef DEBUG
621 	pt_entry_t opte;
622 
623 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
624 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
625 #endif
626 
627 	if (pmap == NULL)
628 		return;
629 
630 #ifdef DEBUG
631 	remove_stats.calls++;
632 #endif
633 	for (va = sva; va < eva; va += PAGE_SIZE) {
634 		/*
635 		 * Weed out invalid mappings.
636 		 * Note: we assume that the segment table is always allocated.
637 		 */
638 		if (!pmap_ste_v(pmap_ste(pmap, va))) {
639 			/* XXX: avoid address wrap around */
640 			if (va >= hp300_trunc_seg((vm_offset_t)-1))
641 				break;
642 			va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
643 			continue;
644 		}
645 		pte = pmap_pte(pmap, va);
646 		pa = pmap_pte_pa(pte);
647 		if (pa == 0)
648 			continue;
649 		/*
650 		 * Invalidating a non-CI page, must flush external VAC
651 		 * unless it is a supervisor mapping and we have already
652 		 * flushed the supervisor side.
653 		 */
654 		if (pmap_aliasmask && !pmap_pte_ci(pte) &&
655 		    !(pmap == kernel_pmap && firstpage))
656 			flushcache = TRUE;
657 #ifdef DEBUG
658 		opte = *pte;
659 		remove_stats.removes++;
660 #endif
661 		/*
662 		 * Update statistics
663 		 */
664 		if (pmap_pte_w(pte))
665 			pmap->pm_stats.wired_count--;
666 		pmap->pm_stats.resident_count--;
667 
668 		/*
669 		 * Invalidate the PTEs.
670 		 * XXX: should cluster them up and invalidate as many
671 		 * as possible at once.
672 		 */
673 #ifdef DEBUG
674 		if (pmapdebug & PDB_REMOVE)
675 			printf("remove: invalidating %x ptes at %x\n",
676 			       hppagesperpage, pte);
677 #endif
678 		/*
679 		 * Flush VAC to ensure we get the correct state of any
680 		 * hardware maintained bits.
681 		 */
682 		if (firstpage && pmap_aliasmask) {
683 			firstpage = FALSE;
684 			if (pmap == kernel_pmap)
685 				flushcache = FALSE;
686 			DCIS();
687 #ifdef DEBUG
688 			remove_stats.sflushes++;
689 #endif
690 		}
691 		bits = ix = 0;
692 		do {
693 			bits |= *(int *)pte & (PG_U|PG_M);
694 			*(int *)pte++ = PG_NV;
695 			TBIS(va + ix * HP_PAGE_SIZE);
696 		} while (++ix != hppagesperpage);
697 
698 		/*
699 		 * For user mappings decrement the wiring count on
700 		 * the PT page.  We do this after the PTE has been
701 		 * invalidated because vm_map_pageable winds up in
702 		 * pmap_pageable which clears the modify bit for the
703 		 * PT page.
704 		 */
705 		if (pmap != kernel_pmap) {
706 			pte = pmap_pte(pmap, va);
707 			vm_map_pageable(pt_map, trunc_page(pte),
708 					round_page(pte+1), TRUE);
709 #ifdef DEBUG
710 			if (pmapdebug & PDB_WIRING)
711 				pmap_check_wiring("remove", trunc_page(pte));
712 #endif
713 		}
714 		/*
715 		 * Remove from the PV table (raise IPL since we
716 		 * may be called at interrupt time).
717 		 */
718 		if (pa < vm_first_phys || pa >= vm_last_phys)
719 			continue;
720 		pv = pa_to_pvh(pa);
721 		ste = (int *)0;
722 		s = splimp();
723 		/*
724 		 * If it is the first entry on the list, it is actually
725 		 * in the header and we must copy the following entry up
726 		 * to the header.  Otherwise we must search the list for
727 		 * the entry.  In either case we free the now unused entry.
728 		 */
729 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
730 			ste = (int *)pv->pv_ptste;
731 			ptpmap = pv->pv_ptpmap;
732 			npv = pv->pv_next;
733 			if (npv) {
734 				*pv = *npv;
735 				free((caddr_t)npv, M_VMPVENT);
736 			} else
737 				pv->pv_pmap = NULL;
738 #ifdef DEBUG
739 			remove_stats.pvfirst++;
740 #endif
741 		} else {
742 			for (npv = pv->pv_next; npv; npv = npv->pv_next) {
743 #ifdef DEBUG
744 				remove_stats.pvsearch++;
745 #endif
746 				if (pmap == npv->pv_pmap && va == npv->pv_va)
747 					break;
748 				pv = npv;
749 			}
750 #ifdef DEBUG
751 			if (npv == NULL)
752 				panic("pmap_remove: PA not in pv_tab");
753 #endif
754 			ste = (int *)npv->pv_ptste;
755 			ptpmap = npv->pv_ptpmap;
756 			pv->pv_next = npv->pv_next;
757 			free((caddr_t)npv, M_VMPVENT);
758 			pv = pa_to_pvh(pa);
759 		}
760 		/*
761 		 * If only one mapping left we no longer need to cache inhibit
762 		 */
763 		if (pv->pv_pmap &&
764 		    pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
765 #ifdef DEBUG
766 			if (pmapdebug & PDB_CACHE)
767 				printf("remove: clearing CI for pa %x\n", pa);
768 #endif
769 			pv->pv_flags &= ~PV_CI;
770 			pmap_changebit(pa, PG_CI, FALSE);
771 #ifdef DEBUG
772 			if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
773 			    (PDB_CACHE|PDB_PVDUMP))
774 				pmap_pvdump(pa);
775 #endif
776 		}
777 
778 		/*
779 		 * If this was a PT page we must also remove the
780 		 * mapping from the associated segment table.
781 		 */
782 		if (ste) {
783 #ifdef DEBUG
784 			remove_stats.ptinvalid++;
785 			if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) {
786 				printf("remove: ste was %x@%x pte was %x@%x\n",
787 				       *ste, ste,
788 				       *(int *)&opte, pmap_pte(pmap, va));
789 			}
790 #endif
791 			*ste = SG_NV;
792 			/*
793 			 * If it was a user PT page, we decrement the
794 			 * reference count on the segment table as well,
795 			 * freeing it if it is now empty.
796 			 */
797 			if (ptpmap != kernel_pmap) {
798 #ifdef DEBUG
799 				if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
800 					printf("remove: stab %x, refcnt %d\n",
801 					       ptpmap->pm_stab,
802 					       ptpmap->pm_sref - 1);
803 				if ((pmapdebug & PDB_PARANOIA) &&
804 				    ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
805 					panic("remove: bogus ste");
806 #endif
807 				if (--(ptpmap->pm_sref) == 0) {
808 #ifdef DEBUG
809 					if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
810 					printf("remove: free stab %x\n",
811 					       ptpmap->pm_stab);
812 #endif
813 					kmem_free(kernel_map,
814 						  (vm_offset_t)ptpmap->pm_stab,
815 						  HP_STSIZE);
816 					ptpmap->pm_stab = Segtabzero;
817 					ptpmap->pm_stchanged = TRUE;
818 					/*
819 					 * XXX may have changed segment table
820 					 * pointer for current process so
821 					 * update now to reload hardware.
822 					 */
823 					if (ptpmap == curproc->p_vmspace->vm_map.pmap)
824 						PMAP_ACTIVATE(ptpmap,
825 							(struct pcb *)curproc->p_addr, 1);
826 				}
827 			}
828 			if (ptpmap == kernel_pmap)
829 				TBIAS();
830 			else
831 				TBIAU();
832 			pv->pv_flags &= ~PV_PTPAGE;
833 			ptpmap->pm_ptpages--;
834 		}
835 		/*
836 		 * Update saved attributes for managed page
837 		 */
838 		pmap_attributes[pa_index(pa)] |= bits;
839 		splx(s);
840 	}
841 #ifdef DEBUG
842 	if (pmapvacflush & PVF_REMOVE) {
843 		if (pmapvacflush & PVF_TOTAL)
844 			DCIA();
845 		else if (pmap == kernel_pmap)
846 			DCIS();
847 		else
848 			DCIU();
849 	}
850 #endif
851 	if (flushcache) {
852 		if (pmap == kernel_pmap) {
853 			DCIS();
854 #ifdef DEBUG
855 			remove_stats.sflushes++;
856 #endif
857 		} else {
858 			DCIU();
859 #ifdef DEBUG
860 			remove_stats.uflushes++;
861 #endif
862 		}
863 	}
864 }
865 
866 /*
867  *	Routine:	pmap_remove_all
868  *	Function:
869  *		Removes this physical page from
870  *		all physical maps in which it resides.
871  *		Reflects back modify bits to the pager.
872  */
873 void
874 pmap_remove_all(pa)
875 	vm_offset_t pa;
876 {
877 	register pv_entry_t pv;
878 	int s;
879 
880 #ifdef DEBUG
881 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
882 		printf("pmap_remove_all(%x)\n", pa);
883 #endif
884 	/*
885 	 * Not one of ours
886 	 */
887 	if (pa < vm_first_phys || pa >= vm_last_phys)
888 		return;
889 
890 	pv = pa_to_pvh(pa);
891 	s = splimp();
892 	/*
893 	 * Do it the easy way for now
894 	 */
895 	while (pv->pv_pmap != NULL) {
896 #ifdef DEBUG
897 		if (!pmap_ste_v(pmap_ste(pv->pv_pmap, pv->pv_va)) ||
898 		    pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
899 			panic("pmap_remove_all: bad mapping");
900 #endif
901 		pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
902 	}
903 	splx(s);
904 }
905 
906 /*
907  *	Routine:	pmap_copy_on_write
908  *	Function:
909  *		Remove write privileges from all
910  *		physical maps for this physical page.
911  */
912 void
913 pmap_copy_on_write(pa)
914 	vm_offset_t pa;
915 {
916 #ifdef DEBUG
917 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
918 		printf("pmap_copy_on_write(%x)\n", pa);
919 #endif
920 	pmap_changebit(pa, PG_RO, TRUE);
921 }
922 
923 /*
924  *	Set the physical protection on the
925  *	specified range of this map as requested.
926  */
927 void
928 pmap_protect(pmap, sva, eva, prot)
929 	register pmap_t	pmap;
930 	vm_offset_t	sva, eva;
931 	vm_prot_t	prot;
932 {
933 	register pt_entry_t *pte;
934 	register vm_offset_t va;
935 	register int ix;
936 	int hpprot;
937 	boolean_t firstpage = TRUE;
938 
939 #ifdef DEBUG
940 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
941 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
942 #endif
943 	if (pmap == NULL)
944 		return;
945 
946 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
947 		pmap_remove(pmap, sva, eva);
948 		return;
949 	}
950 	if (prot & VM_PROT_WRITE)
951 		return;
952 
953 	pte = pmap_pte(pmap, sva);
954 	hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
955 	for (va = sva; va < eva; va += PAGE_SIZE) {
956 		/*
957 		 * Page table page is not allocated.
958 		 * Skip it, we don't want to force allocation
959 		 * of unnecessary PTE pages just to set the protection.
960 		 */
961 		if (!pmap_ste_v(pmap_ste(pmap, va))) {
962 			/* XXX: avoid address wrap around */
963 			if (va >= hp300_trunc_seg((vm_offset_t)-1))
964 				break;
965 			va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
966 			pte = pmap_pte(pmap, va);
967 			pte += hppagesperpage;
968 			continue;
969 		}
970 		/*
971 		 * Page not valid.  Again, skip it.
972 		 * Should we do this?  Or set protection anyway?
973 		 */
974 		if (!pmap_pte_v(pte)) {
975 			pte += hppagesperpage;
976 			continue;
977 		}
978 		/*
979 		 * Flush VAC to ensure we get correct state of HW bits
980 		 * so we don't clobber them.
981 		 */
982 		if (firstpage && pmap_aliasmask) {
983 			firstpage = FALSE;
984 			DCIS();
985 		}
986 		ix = 0;
987 		do {
988 			/* clear VAC here if PG_RO? */
989 			pmap_pte_set_prot(pte++, hpprot);
990 			TBIS(va + ix * HP_PAGE_SIZE);
991 		} while (++ix != hppagesperpage);
992 	}
993 #ifdef DEBUG
994 	if (hpprot && (pmapvacflush & PVF_PROTECT)) {
995 		if (pmapvacflush & PVF_TOTAL)
996 			DCIA();
997 		else if (pmap == kernel_pmap)
998 			DCIS();
999 		else
1000 			DCIU();
1001 	}
1002 #endif
1003 }
1004 
1005 /*
1006  *	Insert the given physical page (p) at
1007  *	the specified virtual address (v) in the
1008  *	target physical map with the protection requested.
1009  *
1010  *	If specified, the page will be wired down, meaning
1011  *	that the related pte can not be reclaimed.
1012  *
1013  *	NB:  This is the only routine which MAY NOT lazy-evaluate
1014  *	or lose information.  That is, this routine must actually
1015  *	insert this page into the given map NOW.
1016  */
1017 void
1018 pmap_enter(pmap, va, pa, prot, wired)
1019 	register pmap_t pmap;
1020 	vm_offset_t va;
1021 	register vm_offset_t pa;
1022 	vm_prot_t prot;
1023 	boolean_t wired;
1024 {
1025 	register pt_entry_t *pte;
1026 	register int npte, ix;
1027 	vm_offset_t opa;
1028 	boolean_t cacheable = TRUE;
1029 	boolean_t checkpv = TRUE;
1030 
1031 #ifdef DEBUG
1032 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1033 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
1034 		       pmap, va, pa, prot, wired);
1035 #endif
1036 	if (pmap == NULL)
1037 		return;
1038 
1039 #ifdef DEBUG
1040 	if (pmap == kernel_pmap)
1041 		enter_stats.kernel++;
1042 	else
1043 		enter_stats.user++;
1044 #endif
1045 	/*
1046 	 * For user mapping, allocate kernel VM resources if necessary.
1047 	 */
1048 	if (pmap->pm_ptab == NULL)
1049 		pmap->pm_ptab = (pt_entry_t *)
1050 			kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
1051 
1052 	/*
1053 	 * Segment table entry not valid, we need a new PT page
1054 	 */
1055 	if (!pmap_ste_v(pmap_ste(pmap, va)))
1056 		pmap_enter_ptpage(pmap, va);
1057 
1058 	pte = pmap_pte(pmap, va);
1059 	opa = pmap_pte_pa(pte);
1060 #ifdef DEBUG
1061 	if (pmapdebug & PDB_ENTER)
1062 		printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);
1063 #endif
1064 
1065 	/*
1066 	 * Mapping has not changed, must be protection or wiring change.
1067 	 */
1068 	if (opa == pa) {
1069 #ifdef DEBUG
1070 		enter_stats.pwchange++;
1071 #endif
1072 		/*
1073 		 * Wiring change, just update stats.
1074 		 * We don't worry about wiring PT pages as they remain
1075 		 * resident as long as there are valid mappings in them.
1076 		 * Hence, if a user page is wired, the PT page will be also.
1077 		 */
1078 		if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1079 #ifdef DEBUG
1080 			if (pmapdebug & PDB_ENTER)
1081 				printf("enter: wiring change -> %x\n", wired);
1082 #endif
1083 			if (wired)
1084 				pmap->pm_stats.wired_count++;
1085 			else
1086 				pmap->pm_stats.wired_count--;
1087 #ifdef DEBUG
1088 			enter_stats.wchange++;
1089 #endif
1090 		}
1091 		/*
1092 		 * Retain cache inhibition status
1093 		 */
1094 		checkpv = FALSE;
1095 		if (pmap_pte_ci(pte))
1096 			cacheable = FALSE;
1097 		goto validate;
1098 	}
1099 
1100 	/*
1101 	 * Mapping has changed, invalidate old range and fall through to
1102 	 * handle validating new mapping.
1103 	 */
1104 	if (opa) {
1105 #ifdef DEBUG
1106 		if (pmapdebug & PDB_ENTER)
1107 			printf("enter: removing old mapping %x\n", va);
1108 #endif
1109 		pmap_remove(pmap, va, va + PAGE_SIZE);
1110 #ifdef DEBUG
1111 		enter_stats.mchange++;
1112 #endif
1113 	}
1114 
1115 	/*
1116 	 * If this is a new user mapping, increment the wiring count
1117 	 * on this PT page.  PT pages are wired down as long as there
1118 	 * is a valid mapping in the page.
1119 	 */
1120 	if (pmap != kernel_pmap)
1121 		vm_map_pageable(pt_map, trunc_page(pte),
1122 				round_page(pte+1), FALSE);
1123 
1124 	/*
1125 	 * Enter on the PV list if part of our managed memory
1126 	 * Note that we raise IPL while manipulating pv_table
1127 	 * since pmap_enter can be called at interrupt time.
1128 	 */
1129 	if (pa >= vm_first_phys && pa < vm_last_phys) {
1130 		register pv_entry_t pv, npv;
1131 		int s;
1132 
1133 #ifdef DEBUG
1134 		enter_stats.managed++;
1135 #endif
1136 		pv = pa_to_pvh(pa);
1137 		s = splimp();
1138 #ifdef DEBUG
1139 		if (pmapdebug & PDB_ENTER)
1140 			printf("enter: pv at %x: %x/%x/%x\n",
1141 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1142 #endif
1143 		/*
1144 		 * No entries yet, use header as the first entry
1145 		 */
1146 		if (pv->pv_pmap == NULL) {
1147 #ifdef DEBUG
1148 			enter_stats.firstpv++;
1149 #endif
1150 			pv->pv_va = va;
1151 			pv->pv_pmap = pmap;
1152 			pv->pv_next = NULL;
1153 			pv->pv_ptste = NULL;
1154 			pv->pv_ptpmap = NULL;
1155 			pv->pv_flags = 0;
1156 		}
1157 		/*
1158 		 * There is at least one other VA mapping this page.
1159 		 * Place this entry after the header.
1160 		 */
1161 		else {
1162 #ifdef DEBUG
1163 			for (npv = pv; npv; npv = npv->pv_next)
1164 				if (pmap == npv->pv_pmap && va == npv->pv_va)
1165 					panic("pmap_enter: already in pv_tab");
1166 #endif
1167 			npv = (pv_entry_t)
1168 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
1169 			npv->pv_va = va;
1170 			npv->pv_pmap = pmap;
1171 			npv->pv_next = pv->pv_next;
1172 			npv->pv_ptste = NULL;
1173 			npv->pv_ptpmap = NULL;
1174 			pv->pv_next = npv;
1175 #ifdef DEBUG
1176 			if (!npv->pv_next)
1177 				enter_stats.secondpv++;
1178 #endif
1179 			/*
1180 			 * Since there is another logical mapping for the
1181 			 * same page we may need to cache-inhibit the
1182 			 * descriptors on those CPUs with external VACs.
1183 			 * We don't need to CI if:
1184 			 *
1185 			 * - No two mappings belong to the same user pmaps.
1186 			 *   Since the cache is flushed on context switches
1187 			 *   there is no problem between user processes.
1188 			 *
1189 			 * - Mappings within a single pmap are a certain
1190 			 *   magic distance apart.  VAs at these appropriate
1191 			 *   boundaries map to the same cache entries or
1192 			 *   otherwise don't conflict.
1193 			 *
1194 			 * To keep it simple, we only check for these special
1195 			 * cases if there are only two mappings, otherwise we
1196 			 * punt and always CI.
1197 			 *
1198 			 * Note that there are no aliasing problems with the
1199 			 * on-chip data-cache when the WA bit is set.
1200 			 */
1201 			if (pmap_aliasmask) {
1202 				if (pv->pv_flags & PV_CI) {
1203 #ifdef DEBUG
1204 					if (pmapdebug & PDB_CACHE)
1205 					printf("enter: pa %x already CI'ed\n",
1206 					       pa);
1207 #endif
1208 					checkpv = cacheable = FALSE;
1209 				} else if (npv->pv_next ||
1210 					   ((pmap == pv->pv_pmap ||
1211 					     pmap == kernel_pmap ||
1212 					     pv->pv_pmap == kernel_pmap) &&
1213 					    ((pv->pv_va & pmap_aliasmask) !=
1214 					     (va & pmap_aliasmask)))) {
1215 #ifdef DEBUG
1216 					if (pmapdebug & PDB_CACHE)
1217 					printf("enter: pa %x CI'ing all\n",
1218 					       pa);
1219 #endif
1220 					cacheable = FALSE;
1221 					pv->pv_flags |= PV_CI;
1222 #ifdef DEBUG
1223 					enter_stats.ci++;
1224 #endif
1225 				}
1226 			}
1227 		}
1228 		splx(s);
1229 	}
1230 	/*
1231 	 * Assumption: if it is not part of our managed memory
1232 	 * then it must be device memory which may be volitile.
1233 	 */
1234 	else if (pmap_initialized) {
1235 		checkpv = cacheable = FALSE;
1236 #ifdef DEBUG
1237 		enter_stats.unmanaged++;
1238 #endif
1239 	}
1240 
1241 	/*
1242 	 * Increment counters
1243 	 */
1244 	pmap->pm_stats.resident_count++;
1245 	if (wired)
1246 		pmap->pm_stats.wired_count++;
1247 
1248 validate:
1249 	/*
1250 	 * Flush VAC to ensure we get correct state of HW bits
1251 	 * so we don't clobber them.
1252 	 */
1253 	if (pmap_aliasmask)
1254 		DCIS();
1255 	/*
1256 	 * Now validate mapping with desired protection/wiring.
1257 	 * Assume uniform modified and referenced status for all
1258 	 * HP pages in a MACH page.
1259 	 */
1260 	npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1261 	npte |= (*(int *)pte & (PG_M|PG_U));
1262 	if (wired)
1263 		npte |= PG_W;
1264 	if (!checkpv && !cacheable)
1265 		npte |= PG_CI;
1266 #ifdef DEBUG
1267 	if (pmapdebug & PDB_ENTER)
1268 		printf("enter: new pte value %x\n", npte);
1269 #endif
1270 	ix = 0;
1271 	do {
1272 		*(int *)pte++ = npte;
1273 		TBIS(va);
1274 		npte += HP_PAGE_SIZE;
1275 		va += HP_PAGE_SIZE;
1276 	} while (++ix != hppagesperpage);
1277 	/*
1278 	 * The following is executed if we are entering a second
1279 	 * (or greater) mapping for a physical page and the mappings
1280 	 * may create an aliasing problem.  In this case we must
1281 	 * cache inhibit the descriptors involved and flush any
1282 	 * external VAC.
1283 	 */
1284 	if (checkpv && !cacheable) {
1285 		pmap_changebit(pa, PG_CI, TRUE);
1286 		DCIA();
1287 #ifdef DEBUG
1288 		enter_stats.flushes++;
1289 #endif
1290 #ifdef DEBUG
1291 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1292 		    (PDB_CACHE|PDB_PVDUMP))
1293 			pmap_pvdump(pa);
1294 #endif
1295 	}
1296 #ifdef DEBUG
1297 	else if (pmapvacflush & PVF_ENTER) {
1298 		if (pmapvacflush & PVF_TOTAL)
1299 			DCIA();
1300 		else if (pmap == kernel_pmap)
1301 			DCIS();
1302 		else
1303 			DCIU();
1304 	}
1305 	if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
1306 		va -= PAGE_SIZE;
1307 		pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
1308 	}
1309 #endif
1310 }
1311 
1312 /*
1313  *	Routine:	pmap_change_wiring
1314  *	Function:	Change the wiring attribute for a map/virtual-address
1315  *			pair.
1316  *	In/out conditions:
1317  *			The mapping must already exist in the pmap.
1318  */
1319 void
1320 pmap_change_wiring(pmap, va, wired)
1321 	register pmap_t	pmap;
1322 	vm_offset_t	va;
1323 	boolean_t	wired;
1324 {
1325 	register pt_entry_t *pte;
1326 	register int ix;
1327 
1328 #ifdef DEBUG
1329 	if (pmapdebug & PDB_FOLLOW)
1330 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1331 #endif
1332 	if (pmap == NULL)
1333 		return;
1334 
1335 	pte = pmap_pte(pmap, va);
1336 #ifdef DEBUG
1337 	/*
1338 	 * Page table page is not allocated.
1339 	 * Should this ever happen?  Ignore it for now,
1340 	 * we don't want to force allocation of unnecessary PTE pages.
1341 	 */
1342 	if (!pmap_ste_v(pmap_ste(pmap, va))) {
1343 		if (pmapdebug & PDB_PARANOIA)
1344 			printf("pmap_change_wiring: invalid STE for %x\n", va);
1345 		return;
1346 	}
1347 	/*
1348 	 * Page not valid.  Should this ever happen?
1349 	 * Just continue and change wiring anyway.
1350 	 */
1351 	if (!pmap_pte_v(pte)) {
1352 		if (pmapdebug & PDB_PARANOIA)
1353 			printf("pmap_change_wiring: invalid PTE for %x\n", va);
1354 	}
1355 #endif
1356 	if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1357 		if (wired)
1358 			pmap->pm_stats.wired_count++;
1359 		else
1360 			pmap->pm_stats.wired_count--;
1361 	}
1362 	/*
1363 	 * Wiring is not a hardware characteristic so there is no need
1364 	 * to invalidate TLB.
1365 	 */
1366 	ix = 0;
1367 	do {
1368 		pmap_pte_set_w(pte++, wired);
1369 	} while (++ix != hppagesperpage);
1370 }
1371 
1372 /*
1373  *	Routine:	pmap_extract
1374  *	Function:
1375  *		Extract the physical page address associated
1376  *		with the given map/virtual_address pair.
1377  */
1378 
1379 vm_offset_t
1380 pmap_extract(pmap, va)
1381 	register pmap_t	pmap;
1382 	vm_offset_t va;
1383 {
1384 	register vm_offset_t pa;
1385 
1386 #ifdef DEBUG
1387 	if (pmapdebug & PDB_FOLLOW)
1388 		printf("pmap_extract(%x, %x) -> ", pmap, va);
1389 #endif
1390 	pa = 0;
1391 	if (pmap && pmap_ste_v(pmap_ste(pmap, va)))
1392 		pa = *(int *)pmap_pte(pmap, va);
1393 	if (pa)
1394 		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1395 #ifdef DEBUG
1396 	if (pmapdebug & PDB_FOLLOW)
1397 		printf("%x\n", pa);
1398 #endif
1399 	return(pa);
1400 }
1401 
1402 /*
1403  *	Copy the range specified by src_addr/len
1404  *	from the source map to the range dst_addr/len
1405  *	in the destination map.
1406  *
1407  *	This routine is only advisory and need not do anything.
1408  */
1409 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1410 	pmap_t		dst_pmap;
1411 	pmap_t		src_pmap;
1412 	vm_offset_t	dst_addr;
1413 	vm_size_t	len;
1414 	vm_offset_t	src_addr;
1415 {
1416 #ifdef DEBUG
1417 	if (pmapdebug & PDB_FOLLOW)
1418 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1419 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1420 #endif
1421 }
1422 
1423 /*
1424  *	Require that all active physical maps contain no
1425  *	incorrect entries NOW.  [This update includes
1426  *	forcing updates of any address map caching.]
1427  *
1428  *	Generally used to insure that a thread about
1429  *	to run will see a semantically correct world.
1430  */
1431 void pmap_update()
1432 {
1433 #ifdef DEBUG
1434 	if (pmapdebug & PDB_FOLLOW)
1435 		printf("pmap_update()\n");
1436 #endif
1437 	TBIA();
1438 }
1439 
1440 /*
1441  *	Routine:	pmap_collect
1442  *	Function:
1443  *		Garbage collects the physical map system for
1444  *		pages which are no longer used.
1445  *		Success need not be guaranteed -- that is, there
1446  *		may well be pages which are not referenced, but
1447  *		others may be collected.
1448  *	Usage:
1449  *		Called by the pageout daemon when pages are scarce.
1450  */
1451 void
1452 pmap_collect(pmap)
1453 	pmap_t		pmap;
1454 {
1455 	register vm_offset_t pa;
1456 	register pv_entry_t pv;
1457 	register int *pte;
1458 	vm_offset_t kpa;
1459 	int s;
1460 
1461 #ifdef DEBUG
1462 	int *ste;
1463 	int opmapdebug;
1464 #endif
1465 	if (pmap != kernel_pmap)
1466 		return;
1467 
1468 #ifdef DEBUG
1469 	if (pmapdebug & PDB_FOLLOW)
1470 		printf("pmap_collect(%x)\n", pmap);
1471 	kpt_stats.collectscans++;
1472 #endif
1473 	s = splimp();
1474 	for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) {
1475 		register struct kpt_page *kpt, **pkpt;
1476 
1477 		/*
1478 		 * Locate physical pages which are being used as kernel
1479 		 * page table pages.
1480 		 */
1481 		pv = pa_to_pvh(pa);
1482 		if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
1483 			continue;
1484 		do {
1485 			if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
1486 				break;
1487 		} while (pv = pv->pv_next);
1488 		if (pv == NULL)
1489 			continue;
1490 #ifdef DEBUG
1491 		if (pv->pv_va < (vm_offset_t)Sysmap ||
1492 		    pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
1493 			printf("collect: kernel PT VA out of range\n");
1494 		else
1495 			goto ok;
1496 		pmap_pvdump(pa);
1497 		continue;
1498 ok:
1499 #endif
1500 		pte = (int *)(pv->pv_va + HP_PAGE_SIZE);
1501 		while (--pte >= (int *)pv->pv_va && *pte == PG_NV)
1502 			;
1503 		if (pte >= (int *)pv->pv_va)
1504 			continue;
1505 
1506 #ifdef DEBUG
1507 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1508 			printf("collect: freeing KPT page at %x (ste %x@%x)\n",
1509 			       pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);
1510 			opmapdebug = pmapdebug;
1511 			pmapdebug |= PDB_PTPAGE;
1512 		}
1513 
1514 		ste = (int *)pv->pv_ptste;
1515 #endif
1516 		/*
1517 		 * If all entries were invalid we can remove the page.
1518 		 * We call pmap_remove to take care of invalidating ST
1519 		 * and Sysptmap entries.
1520 		 */
1521 		kpa = pmap_extract(pmap, pv->pv_va);
1522 		pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE);
1523 		/*
1524 		 * Use the physical address to locate the original
1525 		 * (kmem_alloc assigned) address for the page and put
1526 		 * that page back on the free list.
1527 		 */
1528 		for (pkpt = &kpt_used_list, kpt = *pkpt;
1529 		     kpt != (struct kpt_page *)0;
1530 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
1531 			if (kpt->kpt_pa == kpa)
1532 				break;
1533 #ifdef DEBUG
1534 		if (kpt == (struct kpt_page *)0)
1535 			panic("pmap_collect: lost a KPT page");
1536 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1537 			printf("collect: %x (%x) to free list\n",
1538 			       kpt->kpt_va, kpa);
1539 #endif
1540 		*pkpt = kpt->kpt_next;
1541 		kpt->kpt_next = kpt_free_list;
1542 		kpt_free_list = kpt;
1543 #ifdef DEBUG
1544 		kpt_stats.kptinuse--;
1545 		kpt_stats.collectpages++;
1546 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1547 			pmapdebug = opmapdebug;
1548 
1549 		if (*ste)
1550 			printf("collect: kernel STE at %x still valid (%x)\n",
1551 			       ste, *ste);
1552 		ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
1553 		if (*ste)
1554 			printf("collect: kernel PTmap at %x still valid (%x)\n",
1555 			       ste, *ste);
1556 #endif
1557 	}
1558 	splx(s);
1559 }
1560 
1561 void
1562 pmap_activate(pmap, pcbp)
1563 	register pmap_t pmap;
1564 	struct pcb *pcbp;
1565 {
1566 #ifdef DEBUG
1567 	if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
1568 		printf("pmap_activate(%x, %x)\n", pmap, pcbp);
1569 #endif
1570 	PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);
1571 }
1572 
1573 /*
1574  *	Routine:	pmap_kernel
1575  *	Function:
1576  *		Returns the physical map handle for the kernel.
1577  */
1578 pmap_t
1579 pmap_kernel()
1580 {
1581     	return (kernel_pmap);
1582 }
1583 
1584 /*
1585  *	pmap_zero_page zeros the specified (machine independent)
1586  *	page by mapping the page into virtual memory and using
1587  *	bzero to clear its contents, one machine dependent page
1588  *	at a time.
1589  */
1590 pmap_zero_page(phys)
1591 	register vm_offset_t	phys;
1592 {
1593 	register int ix;
1594 
1595 #ifdef DEBUG
1596 	if (pmapdebug & PDB_FOLLOW)
1597 		printf("pmap_zero_page(%x)\n", phys);
1598 #endif
1599 	phys >>= PG_SHIFT;
1600 	ix = 0;
1601 	do {
1602 		clearseg(phys++);
1603 	} while (++ix != hppagesperpage);
1604 }
1605 
1606 /*
1607  *	pmap_copy_page copies the specified (machine independent)
1608  *	page by mapping the page into virtual memory and using
1609  *	bcopy to copy the page, one machine dependent page at a
1610  *	time.
1611  */
1612 pmap_copy_page(src, dst)
1613 	register vm_offset_t	src, dst;
1614 {
1615 	register int ix;
1616 
1617 #ifdef DEBUG
1618 	if (pmapdebug & PDB_FOLLOW)
1619 		printf("pmap_copy_page(%x, %x)\n", src, dst);
1620 #endif
1621 	src >>= PG_SHIFT;
1622 	dst >>= PG_SHIFT;
1623 	ix = 0;
1624 	do {
1625 		physcopyseg(src++, dst++);
1626 	} while (++ix != hppagesperpage);
1627 }
1628 
1629 
1630 /*
1631  *	Routine:	pmap_pageable
1632  *	Function:
1633  *		Make the specified pages (by pmap, offset)
1634  *		pageable (or not) as requested.
1635  *
1636  *		A page which is not pageable may not take
1637  *		a fault; therefore, its page table entry
1638  *		must remain valid for the duration.
1639  *
1640  *		This routine is merely advisory; pmap_enter
1641  *		will specify that these pages are to be wired
1642  *		down (or not) as appropriate.
1643  */
1644 pmap_pageable(pmap, sva, eva, pageable)
1645 	pmap_t		pmap;
1646 	vm_offset_t	sva, eva;
1647 	boolean_t	pageable;
1648 {
1649 #ifdef DEBUG
1650 	if (pmapdebug & PDB_FOLLOW)
1651 		printf("pmap_pageable(%x, %x, %x, %x)\n",
1652 		       pmap, sva, eva, pageable);
1653 #endif
1654 	/*
1655 	 * If we are making a PT page pageable then all valid
1656 	 * mappings must be gone from that page.  Hence it should
1657 	 * be all zeros and there is no need to clean it.
1658 	 * Assumptions:
1659 	 *	- we are called with only one page at a time
1660 	 *	- PT pages have only one pv_table entry
1661 	 */
1662 	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1663 		register pv_entry_t pv;
1664 		register vm_offset_t pa;
1665 
1666 #ifdef DEBUG
1667 		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1668 			printf("pmap_pageable(%x, %x, %x, %x)\n",
1669 			       pmap, sva, eva, pageable);
1670 #endif
1671 		if (!pmap_ste_v(pmap_ste(pmap, sva)))
1672 			return;
1673 		pa = pmap_pte_pa(pmap_pte(pmap, sva));
1674 		if (pa < vm_first_phys || pa >= vm_last_phys)
1675 			return;
1676 		pv = pa_to_pvh(pa);
1677 		if (pv->pv_ptste == NULL)
1678 			return;
1679 #ifdef DEBUG
1680 		if (pv->pv_va != sva || pv->pv_next) {
1681 			printf("pmap_pageable: bad PT page va %x next %x\n",
1682 			       pv->pv_va, pv->pv_next);
1683 			return;
1684 		}
1685 #endif
1686 		/*
1687 		 * Mark it unmodified to avoid pageout
1688 		 */
1689 		pmap_changebit(pa, PG_M, FALSE);
1690 #ifdef DEBUG
1691 		if (pmapdebug & PDB_PTPAGE)
1692 			printf("pmap_pageable: PT page %x(%x) unmodified\n",
1693 			       sva, *(int *)pmap_pte(pmap, sva));
1694 		if (pmapdebug & PDB_WIRING)
1695 			pmap_check_wiring("pageable", sva);
1696 #endif
1697 	}
1698 }
1699 
1700 /*
1701  *	Clear the modify bits on the specified physical page.
1702  */
1703 
1704 void
1705 pmap_clear_modify(pa)
1706 	vm_offset_t	pa;
1707 {
1708 #ifdef DEBUG
1709 	if (pmapdebug & PDB_FOLLOW)
1710 		printf("pmap_clear_modify(%x)\n", pa);
1711 #endif
1712 	pmap_changebit(pa, PG_M, FALSE);
1713 }
1714 
1715 /*
1716  *	pmap_clear_reference:
1717  *
1718  *	Clear the reference bit on the specified physical page.
1719  */
1720 
1721 void pmap_clear_reference(pa)
1722 	vm_offset_t	pa;
1723 {
1724 #ifdef DEBUG
1725 	if (pmapdebug & PDB_FOLLOW)
1726 		printf("pmap_clear_reference(%x)\n", pa);
1727 #endif
1728 	pmap_changebit(pa, PG_U, FALSE);
1729 }
1730 
1731 /*
1732  *	pmap_is_referenced:
1733  *
1734  *	Return whether or not the specified physical page is referenced
1735  *	by any physical maps.
1736  */
1737 
1738 boolean_t
1739 pmap_is_referenced(pa)
1740 	vm_offset_t	pa;
1741 {
1742 #ifdef DEBUG
1743 	if (pmapdebug & PDB_FOLLOW) {
1744 		boolean_t rv = pmap_testbit(pa, PG_U);
1745 		printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);
1746 		return(rv);
1747 	}
1748 #endif
1749 	return(pmap_testbit(pa, PG_U));
1750 }
1751 
1752 /*
1753  *	pmap_is_modified:
1754  *
1755  *	Return whether or not the specified physical page is modified
1756  *	by any physical maps.
1757  */
1758 
1759 boolean_t
1760 pmap_is_modified(pa)
1761 	vm_offset_t	pa;
1762 {
1763 #ifdef DEBUG
1764 	if (pmapdebug & PDB_FOLLOW) {
1765 		boolean_t rv = pmap_testbit(pa, PG_M);
1766 		printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);
1767 		return(rv);
1768 	}
1769 #endif
1770 	return(pmap_testbit(pa, PG_M));
1771 }
1772 
1773 vm_offset_t
1774 pmap_phys_address(ppn)
1775 	int ppn;
1776 {
1777 	return(hp300_ptob(ppn));
1778 }
1779 
1780 /*
1781  * Miscellaneous support routines follow
1782  */
1783 
1784 /* static */
1785 hp300_protection_init()
1786 {
1787 	register int *kp, prot;
1788 
1789 	kp = protection_codes;
1790 	for (prot = 0; prot < 8; prot++) {
1791 		switch (prot) {
1792 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1793 			*kp++ = 0;
1794 			break;
1795 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1796 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1797 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1798 			*kp++ = PG_RO;
1799 			break;
1800 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1801 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1802 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1803 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1804 			*kp++ = PG_RW;
1805 			break;
1806 		}
1807 	}
1808 }
1809 
1810 /* static */
1811 boolean_t
1812 pmap_testbit(pa, bit)
1813 	register vm_offset_t pa;
1814 	int bit;
1815 {
1816 	register pv_entry_t pv;
1817 	register int *pte, ix;
1818 	int s;
1819 
1820 	if (pa < vm_first_phys || pa >= vm_last_phys)
1821 		return(FALSE);
1822 
1823 	pv = pa_to_pvh(pa);
1824 	s = splimp();
1825 	/*
1826 	 * Check saved info first
1827 	 */
1828 	if (pmap_attributes[pa_index(pa)] & bit) {
1829 		splx(s);
1830 		return(TRUE);
1831 	}
1832 	/*
1833 	 * Flush VAC to get correct state of any hardware maintained bits.
1834 	 */
1835 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
1836 		DCIS();
1837 	/*
1838 	 * Not found, check current mappings returning
1839 	 * immediately if found.
1840 	 */
1841 	if (pv->pv_pmap != NULL) {
1842 		for (; pv; pv = pv->pv_next) {
1843 			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1844 			ix = 0;
1845 			do {
1846 				if (*pte++ & bit) {
1847 					splx(s);
1848 					return(TRUE);
1849 				}
1850 			} while (++ix != hppagesperpage);
1851 		}
1852 	}
1853 	splx(s);
1854 	return(FALSE);
1855 }
1856 
1857 /* static */
1858 pmap_changebit(pa, bit, setem)
1859 	register vm_offset_t pa;
1860 	int bit;
1861 	boolean_t setem;
1862 {
1863 	register pv_entry_t pv;
1864 	register int *pte, npte, ix;
1865 	vm_offset_t va;
1866 	int s;
1867 	boolean_t firstpage = TRUE;
1868 
1869 #ifdef DEBUG
1870 	if (pmapdebug & PDB_BITS)
1871 		printf("pmap_changebit(%x, %x, %s)\n",
1872 		       pa, bit, setem ? "set" : "clear");
1873 #endif
1874 	if (pa < vm_first_phys || pa >= vm_last_phys)
1875 		return;
1876 
1877 	pv = pa_to_pvh(pa);
1878 	s = splimp();
1879 	/*
1880 	 * Clear saved attributes (modify, reference)
1881 	 */
1882 	if (!setem)
1883 		pmap_attributes[pa_index(pa)] &= ~bit;
1884 	/*
1885 	 * Loop over all current mappings setting/clearing as appropos
1886 	 * If setting RO do we need to clear the VAC?
1887 	 */
1888 	if (pv->pv_pmap != NULL) {
1889 #ifdef DEBUG
1890 		int toflush = 0;
1891 #endif
1892 		for (; pv; pv = pv->pv_next) {
1893 #ifdef DEBUG
1894 			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
1895 #endif
1896 			va = pv->pv_va;
1897 			pte = (int *) pmap_pte(pv->pv_pmap, va);
1898 			/*
1899 			 * Flush VAC to ensure we get correct state of HW bits
1900 			 * so we don't clobber them.
1901 			 */
1902 			if (firstpage && pmap_aliasmask) {
1903 				firstpage = FALSE;
1904 				DCIS();
1905 			}
1906 			ix = 0;
1907 			do {
1908 				if (setem)
1909 					npte = *pte | bit;
1910 				else
1911 					npte = *pte & ~bit;
1912 				if (*pte != npte) {
1913 					*pte = npte;
1914 					TBIS(va);
1915 				}
1916 				va += HP_PAGE_SIZE;
1917 				pte++;
1918 			} while (++ix != hppagesperpage);
1919 		}
1920 #ifdef DEBUG
1921 		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
1922 			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
1923 				DCIA();
1924 			else if (toflush == 2)
1925 				DCIS();
1926 			else
1927 				DCIU();
1928 		}
1929 #endif
1930 	}
1931 	splx(s);
1932 }
1933 
1934 /* static */
1935 void
1936 pmap_enter_ptpage(pmap, va)
1937 	register pmap_t pmap;
1938 	register vm_offset_t va;
1939 {
1940 	register vm_offset_t ptpa;
1941 	register pv_entry_t pv;
1942 	st_entry_t *ste;
1943 	int s;
1944 
1945 #ifdef DEBUG
1946 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
1947 		printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
1948 	enter_stats.ptpneeded++;
1949 #endif
1950 	/*
1951 	 * Allocate a segment table if necessary.  Note that it is allocated
1952 	 * from kernel_map and not pt_map.  This keeps user page tables
1953 	 * aligned on segment boundaries in the kernel address space.
1954 	 * The segment table is wired down.  It will be freed whenever the
1955 	 * reference count drops to zero.
1956 	 */
1957 	if (pmap->pm_stab == Segtabzero) {
1958 		pmap->pm_stab = (st_entry_t *)
1959 			kmem_alloc(kernel_map, HP_STSIZE);
1960 		pmap->pm_stchanged = TRUE;
1961 		/*
1962 		 * XXX may have changed segment table pointer for current
1963 		 * process so update now to reload hardware.
1964 		 */
1965 		if (pmap == curproc->p_vmspace->vm_map.pmap)
1966 			PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
1967 #ifdef DEBUG
1968 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
1969 			printf("enter: pmap %x stab %x\n",
1970 			       pmap, pmap->pm_stab);
1971 #endif
1972 	}
1973 
1974 	ste = pmap_ste(pmap, va);
1975 	va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
1976 
1977 	/*
1978 	 * In the kernel we allocate a page from the kernel PT page
1979 	 * free list and map it into the kernel page table map (via
1980 	 * pmap_enter).
1981 	 */
1982 	if (pmap == kernel_pmap) {
1983 		register struct kpt_page *kpt;
1984 
1985 		s = splimp();
1986 		if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
1987 			/*
1988 			 * No PT pages available.
1989 			 * Try once to free up unused ones.
1990 			 */
1991 #ifdef DEBUG
1992 			if (pmapdebug & PDB_COLLECT)
1993 				printf("enter: no KPT pages, collecting...\n");
1994 #endif
1995 			pmap_collect(kernel_pmap);
1996 			if ((kpt = kpt_free_list) == (struct kpt_page *)0)
1997 				panic("pmap_enter_ptpage: can't get KPT page");
1998 		}
1999 #ifdef DEBUG
2000 		if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
2001 			kpt_stats.kptmaxuse = kpt_stats.kptinuse;
2002 #endif
2003 		kpt_free_list = kpt->kpt_next;
2004 		kpt->kpt_next = kpt_used_list;
2005 		kpt_used_list = kpt;
2006 		ptpa = kpt->kpt_pa;
2007 		bzero(kpt->kpt_va, HP_PAGE_SIZE);
2008 		pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
2009 #ifdef DEBUG
2010 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2011 			printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
2012 			       ste - pmap_ste(pmap, 0),
2013 			       *(int *)&Sysptmap[ste - pmap_ste(pmap, 0)],
2014 			       kpt->kpt_va);
2015 #endif
2016 		splx(s);
2017 	}
2018 	/*
2019 	 * For user processes we just simulate a fault on that location
2020 	 * letting the VM system allocate a zero-filled page.
2021 	 */
2022 	else {
2023 #ifdef DEBUG
2024 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2025 			printf("enter: about to fault UPT pg at %x\n", va);
2026 #endif
2027 		if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
2028 		    != KERN_SUCCESS)
2029 			panic("pmap_enter: vm_fault failed");
2030 		ptpa = pmap_extract(kernel_pmap, va);
2031 #ifdef DEBUG
2032 		PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE;
2033 #endif
2034 	}
2035 
2036 	/*
2037 	 * Locate the PV entry in the kernel for this PT page and
2038 	 * record the STE address.  This is so that we can invalidate
2039 	 * the STE when we remove the mapping for the page.
2040 	 */
2041 	pv = pa_to_pvh(ptpa);
2042 	s = splimp();
2043 	if (pv) {
2044 		pv->pv_flags |= PV_PTPAGE;
2045 		do {
2046 			if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
2047 				break;
2048 		} while (pv = pv->pv_next);
2049 	}
2050 #ifdef DEBUG
2051 	if (pv == NULL)
2052 		panic("pmap_enter_ptpage: PT page not entered");
2053 #endif
2054 	pv->pv_ptste = ste;
2055 	pv->pv_ptpmap = pmap;
2056 #ifdef DEBUG
2057 	if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2058 		printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
2059 #endif
2060 
2061 	/*
2062 	 * Map the new PT page into the segment table.
2063 	 * Also increment the reference count on the segment table if this
2064 	 * was a user page table page.  Note that we don't use vm_map_pageable
2065 	 * to keep the count like we do for PT pages, this is mostly because
2066 	 * it would be difficult to identify ST pages in pmap_pageable to
2067 	 * release them.  We also avoid the overhead of vm_map_pageable.
2068 	 */
2069 	*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2070 	if (pmap != kernel_pmap) {
2071 		pmap->pm_sref++;
2072 #ifdef DEBUG
2073 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2074 			printf("enter: stab %x refcnt %d\n",
2075 			       pmap->pm_stab, pmap->pm_sref);
2076 #endif
2077 	}
2078 	/*
2079 	 * Flush stale TLB info.
2080 	 */
2081 	if (pmap == kernel_pmap)
2082 		TBIAS();
2083 	else
2084 		TBIAU();
2085 	pmap->pm_ptpages++;
2086 	splx(s);
2087 }
2088 
2089 #ifdef DEBUG
2090 pmap_pvdump(pa)
2091 	vm_offset_t pa;
2092 {
2093 	register pv_entry_t pv;
2094 
2095 	printf("pa %x", pa);
2096 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
2097 		printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
2098 		       pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
2099 		       pv->pv_flags);
2100 	printf("\n");
2101 }
2102 
2103 pmap_check_wiring(str, va)
2104 	char *str;
2105 	vm_offset_t va;
2106 {
2107 	vm_map_entry_t entry;
2108 	register int count, *pte;
2109 
2110 	va = trunc_page(va);
2111 	if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) ||
2112 	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
2113 		return;
2114 
2115 	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
2116 		printf("wired_check: entry for %x not found\n", va);
2117 		return;
2118 	}
2119 	count = 0;
2120 	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
2121 		if (*pte)
2122 			count++;
2123 	if (entry->wired_count != count)
2124 		printf("*%s*: %x: w%d/a%d\n",
2125 		       str, va, entry->wired_count, count);
2126 }
2127 #endif
2128