xref: /original-bsd/sys/hp300/hp300/pmap.c (revision 95ecee29)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)pmap.c	8.2 (Berkeley) 11/14/93
12  */
13 
14 /*
15  * HP9000/300 series physical map management code.
16  *
17  * Supports:
18  *	68020 with HP MMU	models 320, 350
19  *	68020 with 68551 MMU	models 318, 319, 330 (all untested)
20  *	68030 with on-chip MMU	models 340, 360, 370, 345, 375, 400
21  *	68040 with on-chip MMU	models 380, 425, 433
22  *
23  * Notes:
24  *	Don't even pay lip service to multiprocessor support.
25  *
26  *	We assume TLB entries don't have process tags (except for the
27  *	supervisor/user distinction) so we only invalidate TLB entries
28  *	when changing mappings for the current (or kernel) pmap.  This is
29  *	technically not true for the 68551 but we flush the TLB on every
30  *	context switch, so it effectively winds up that way.
31  *
32  *	Bitwise and/or operations are significantly faster than bitfield
33  *	references so we use them when accessing STE/PTEs in the pmap_pte_*
34  *	macros.  Note also that the two are not always equivalent; e.g.:
35  *		(*(int *)pte & PG_PROT) [4] != pte->pg_prot [1]
36  *	and a couple of routines that deal with protection and wiring take
37  *	some shortcuts that assume the and/or definitions.
38  *
39  *	This implementation will only work for PAGE_SIZE == NBPG
40  *	(i.e. 4096 bytes).
41  */
42 
43 /*
44  *	Manages physical address maps.
45  *
46  *	In addition to hardware address maps, this
47  *	module is called upon to provide software-use-only
48  *	maps which may or may not be stored in the same
49  *	form as hardware maps.  These pseudo-maps are
50  *	used to store intermediate results from copy
51  *	operations to and from address spaces.
52  *
53  *	Since the information managed by this module is
54  *	also stored by the logical address mapping module,
55  *	this module may throw away valid virtual-to-physical
56  *	mappings at almost any time.  However, invalidations
57  *	of virtual-to-physical mappings must be done as
58  *	requested.
59  *
60  *	In order to cope with hardware architectures which
61  *	make virtual-to-physical map invalidates expensive,
62  *	this module may delay invalidate or reduced protection
63  *	operations until such time as they are actually
64  *	necessary.  This module is given full information as
65  *	to which processors are currently using which maps,
66  *	and to when physical maps must be made correct.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc.h>
72 #include <sys/malloc.h>
73 #include <sys/user.h>
74 
75 #include <hp300/hp300/pte.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_page.h>
80 
81 #include <machine/cpu.h>
82 
83 #ifdef PMAPSTATS
84 struct {
85 	int collectscans;
86 	int collectpages;
87 	int kpttotal;
88 	int kptinuse;
89 	int kptmaxuse;
90 } kpt_stats;
91 struct {
92 	int kernel;	/* entering kernel mapping */
93 	int user;	/* entering user mapping */
94 	int ptpneeded;	/* needed to allocate a PT page */
95 	int nochange;	/* no change at all */
96 	int pwchange;	/* no mapping change, just wiring or protection */
97 	int wchange;	/* no mapping change, just wiring */
98 	int pchange;	/* no mapping change, just protection */
99 	int mchange;	/* was mapped but mapping to different page */
100 	int managed;	/* a managed page */
101 	int firstpv;	/* first mapping for this PA */
102 	int secondpv;	/* second mapping for this PA */
103 	int ci;		/* cache inhibited */
104 	int unmanaged;	/* not a managed page */
105 	int flushes;	/* cache flushes */
106 } enter_stats;
107 struct {
108 	int calls;
109 	int removes;
110 	int pvfirst;
111 	int pvsearch;
112 	int ptinvalid;
113 	int uflushes;
114 	int sflushes;
115 } remove_stats;
116 struct {
117 	int calls;
118 	int changed;
119 	int alreadyro;
120 	int alreadyrw;
121 } protect_stats;
122 struct chgstats {
123 	int setcalls;
124 	int sethits;
125 	int setmiss;
126 	int clrcalls;
127 	int clrhits;
128 	int clrmiss;
129 } changebit_stats[16];
130 #endif
131 
132 #ifdef DEBUG
133 int debugmap = 0;
134 int pmapdebug = 0x2000;
135 #define PDB_FOLLOW	0x0001
136 #define PDB_INIT	0x0002
137 #define PDB_ENTER	0x0004
138 #define PDB_REMOVE	0x0008
139 #define PDB_CREATE	0x0010
140 #define PDB_PTPAGE	0x0020
141 #define PDB_CACHE	0x0040
142 #define PDB_BITS	0x0080
143 #define PDB_COLLECT	0x0100
144 #define PDB_PROTECT	0x0200
145 #define PDB_SEGTAB	0x0400
146 #define PDB_MULTIMAP	0x0800
147 #define PDB_PARANOIA	0x2000
148 #define PDB_WIRING	0x4000
149 #define PDB_PVDUMP	0x8000
150 
151 #ifdef HAVEVAC
152 int pmapvacflush = 0;
153 #define	PVF_ENTER	0x01
154 #define	PVF_REMOVE	0x02
155 #define	PVF_PROTECT	0x04
156 #define	PVF_TOTAL	0x80
157 #endif
158 
159 #if defined(HP380)
160 int dowriteback = 1;	/* 68040: enable writeback caching */
161 int dokwriteback = 1;	/* 68040: enable writeback caching of kernel AS */
162 #endif
163 
164 extern vm_offset_t pager_sva, pager_eva;
165 #endif
166 
167 /*
168  * Get STEs and PTEs for user/kernel address space
169  */
170 #if defined(HP380)
171 #define	pmap_ste1(m, v)	\
172 	(&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
173 /* XXX assumes physically contiguous ST pages (if more than one) */
174 #define pmap_ste2(m, v) \
175 	(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
176 			- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
177 #define	pmap_ste(m, v)	\
178 	(&((m)->pm_stab[(vm_offset_t)(v) \
179 			>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
180 #define pmap_ste_v(m, v) \
181 	(mmutype == MMU_68040 \
182 	 ? ((*(int *)pmap_ste1(m, v) & SG_V) && \
183 	    (*(int *)pmap_ste2(m, v) & SG_V)) \
184 	 : (*(int *)pmap_ste(m, v) & SG_V))
185 #else
186 #define	pmap_ste(m, v)	 (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
187 #define pmap_ste_v(m, v) (*(int *)pmap_ste(m, v) & SG_V)
188 #endif
189 
190 #define pmap_pte(m, v)	(&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
191 #define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
192 #define pmap_pte_w(pte)		(*(int *)(pte) & PG_W)
193 #define pmap_pte_ci(pte)	(*(int *)(pte) & PG_CI)
194 #define pmap_pte_m(pte)		(*(int *)(pte) & PG_M)
195 #define pmap_pte_u(pte)		(*(int *)(pte) & PG_U)
196 #define pmap_pte_prot(pte)	(*(int *)(pte) & PG_PROT)
197 #define pmap_pte_v(pte)		(*(int *)(pte) & PG_V)
198 
199 #define pmap_pte_set_w(pte, v) \
200 	if (v) *(int *)(pte) |= PG_W; else *(int *)(pte) &= ~PG_W
201 #define pmap_pte_set_prot(pte, v) \
202 	if (v) *(int *)(pte) |= PG_PROT; else *(int *)(pte) &= ~PG_PROT
203 #define pmap_pte_w_chg(pte, nw)		((nw) ^ pmap_pte_w(pte))
204 #define pmap_pte_prot_chg(pte, np)	((np) ^ pmap_pte_prot(pte))
205 
206 /*
207  * Given a map and a machine independent protection code,
208  * convert to an hp300 protection code.
209  */
210 #define pte_prot(m, p)	(protection_codes[p])
211 int	protection_codes[8];
212 
213 /*
214  * Kernel page table page management.
215  */
216 struct kpt_page {
217 	struct kpt_page *kpt_next;	/* link on either used or free list */
218 	vm_offset_t	kpt_va;		/* always valid kernel VA */
219 	vm_offset_t	kpt_pa;		/* PA of this page (for speed) */
220 };
221 struct kpt_page *kpt_free_list, *kpt_used_list;
222 struct kpt_page *kpt_pages;
223 
224 /*
225  * Kernel segment/page table and page table map.
226  * The page table map gives us a level of indirection we need to dynamically
227  * expand the page table.  It is essentially a copy of the segment table
228  * with PTEs instead of STEs.  All are initialized in locore at boot time.
229  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
230  * Segtabzero is an empty segment table which all processes share til they
231  * reference something.
232  */
233 st_entry_t	*Sysseg;
234 pt_entry_t	*Sysmap, *Sysptmap;
235 st_entry_t	*Segtabzero, *Segtabzeropa;
236 vm_size_t	Sysptsize = VM_KERNEL_PT_PAGES;
237 
238 struct pmap	kernel_pmap_store;
239 vm_map_t	pt_map;
240 
241 vm_offset_t    	avail_start;	/* PA of first available physical page */
242 vm_offset_t	avail_end;	/* PA of last available physical page */
243 vm_size_t	mem_size;	/* memory size in bytes */
244 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
245 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
246 vm_offset_t	vm_first_phys;	/* PA of first managed page */
247 vm_offset_t	vm_last_phys;	/* PA just past last managed page */
248 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
249 char		*pmap_attributes;	/* reference and modify bits */
250 #ifdef HAVEVAC
251 int		pmap_aliasmask;	/* seperation at which VA aliasing ok */
252 #endif
253 #if defined(HP380)
254 int		protostfree;	/* prototype (default) free ST map */
255 #endif
256 
257 /*
258  * Internal routines
259  */
260 void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
261 boolean_t pmap_testbit	__P((vm_offset_t, int));
262 void pmap_changebit	__P((vm_offset_t, int, boolean_t));
263 void pmap_enter_ptpage	__P((pmap_t, vm_offset_t));
264 #ifdef DEBUG
265 void pmap_pvdump	__P((vm_offset_t));
266 void pmap_check_wiring	__P((char *, vm_offset_t));
267 #endif
268 
269 /* pmap_remove_mapping flags */
270 #define	PRM_TFLUSH	1
271 #define	PRM_CFLUSH	2
272 
273 /*
274  * Bootstrap memory allocator. This function allows for early dynamic
275  * memory allocation until the virtual memory system has been bootstrapped.
276  * After that point, either kmem_alloc or malloc should be used. This
277  * function works by stealing pages from the (to be) managed page pool,
278  * stealing virtual address space, then mapping the pages and zeroing them.
279  *
280  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
281  * it cannot be used, and will generate a panic if tried. Note that this
282  * memory will never be freed, and in essence it is wired down.
283  */
284 void *
285 pmap_bootstrap_alloc(size) {
286 	vm_offset_t val;
287 	int i;
288 	extern boolean_t vm_page_startup_initialized;
289 
290 	if (vm_page_startup_initialized)
291 		panic("pmap_bootstrap_alloc: called after startup initialized");
292 	size = round_page(size);
293 	val = virtual_avail;
294 
295 	virtual_avail = pmap_map(virtual_avail, avail_start,
296 		avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
297 	avail_start += size;
298 
299 	blkclr ((caddr_t) val, size);
300 	return ((void *) val);
301 }
302 
303 /*
304  *	Initialize the pmap module.
305  *	Called by vm_init, to initialize any structures that the pmap
306  *	system needs to map virtual memory.
307  */
308 void
309 pmap_init(phys_start, phys_end)
310 	vm_offset_t	phys_start, phys_end;
311 {
312 	vm_offset_t	addr, addr2;
313 	vm_size_t	npg, s;
314 	int		rv;
315 	extern char kstack[];
316 
317 #ifdef DEBUG
318 	if (pmapdebug & PDB_FOLLOW)
319 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
320 #endif
321 	/*
322 	 * Now that kernel map has been allocated, we can mark as
323 	 * unavailable regions which we have mapped in locore.
324 	 */
325 	addr = (vm_offset_t) intiobase;
326 	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
327 			   &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
328 	if (addr != (vm_offset_t)intiobase)
329 		goto bogons;
330 	addr = (vm_offset_t) Sysmap;
331 	vm_object_reference(kernel_object);
332 	(void) vm_map_find(kernel_map, kernel_object, addr,
333 			   &addr, HP_MAX_PTSIZE, FALSE);
334 	/*
335 	 * If this fails it is probably because the static portion of
336 	 * the kernel page table isn't big enough and we overran the
337 	 * page table map.   Need to adjust pmap_size() in hp300_init.c.
338 	 */
339 	if (addr != (vm_offset_t)Sysmap)
340 		goto bogons;
341 
342 	addr = (vm_offset_t) kstack;
343 	vm_object_reference(kernel_object);
344 	(void) vm_map_find(kernel_map, kernel_object, addr,
345 			   &addr, hp300_ptob(UPAGES), FALSE);
346 	if (addr != (vm_offset_t)kstack)
347 bogons:
348 		panic("pmap_init: bogons in the VM system!\n");
349 
350 #ifdef DEBUG
351 	if (pmapdebug & PDB_INIT) {
352 		printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
353 		       Sysseg, Sysmap, Sysptmap);
354 		printf("  pstart %x, pend %x, vstart %x, vend %x\n",
355 		       avail_start, avail_end, virtual_avail, virtual_end);
356 	}
357 #endif
358 
359 	/*
360 	 * Allocate memory for random pmap data structures.  Includes the
361 	 * initial segment table, pv_head_table and pmap_attributes.
362 	 */
363 	npg = atop(phys_end - phys_start);
364 	s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg);
365 	s = round_page(s);
366 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
367 	Segtabzero = (st_entry_t *) addr;
368 	Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr);
369 	addr += HP_STSIZE;
370 	pv_table = (pv_entry_t) addr;
371 	addr += sizeof(struct pv_entry) * npg;
372 	pmap_attributes = (char *) addr;
373 #ifdef DEBUG
374 	if (pmapdebug & PDB_INIT)
375 		printf("pmap_init: %x bytes: npg %x s0 %x(%x) tbl %x atr %x\n",
376 		       s, npg, Segtabzero, Segtabzeropa,
377 		       pv_table, pmap_attributes);
378 #endif
379 
380 	/*
381 	 * Allocate physical memory for kernel PT pages and their management.
382 	 * We need 1 PT page per possible task plus some slop.
383 	 */
384 	npg = min(atop(HP_MAX_KPTSIZE), maxproc+16);
385 	s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
386 
387 	/*
388 	 * Verify that space will be allocated in region for which
389 	 * we already have kernel PT pages.
390 	 */
391 	addr = 0;
392 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
393 	if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
394 		panic("pmap_init: kernel PT too small");
395 	vm_map_remove(kernel_map, addr, addr + s);
396 
397 	/*
398 	 * Now allocate the space and link the pages together to
399 	 * form the KPT free list.
400 	 */
401 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
402 	s = ptoa(npg);
403 	addr2 = addr + s;
404 	kpt_pages = &((struct kpt_page *)addr2)[npg];
405 	kpt_free_list = (struct kpt_page *) 0;
406 	do {
407 		addr2 -= HP_PAGE_SIZE;
408 		(--kpt_pages)->kpt_next = kpt_free_list;
409 		kpt_free_list = kpt_pages;
410 		kpt_pages->kpt_va = addr2;
411 		kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
412 	} while (addr != addr2);
413 #ifdef PMAPSTATS
414 	kpt_stats.kpttotal = atop(s);
415 #endif
416 #ifdef DEBUG
417 	if (pmapdebug & PDB_INIT)
418 		printf("pmap_init: KPT: %d pages from %x to %x\n",
419 		       atop(s), addr, addr + s);
420 #endif
421 
422 	/*
423 	 * Slightly modified version of kmem_suballoc() to get page table
424 	 * map where we want it.
425 	 */
426 	addr = HP_PTBASE;
427 	s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE);
428 	addr2 = addr + s;
429 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
430 	if (rv != KERN_SUCCESS)
431 		panic("pmap_init: cannot allocate space for PT map");
432 	pmap_reference(vm_map_pmap(kernel_map));
433 	pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
434 	if (pt_map == NULL)
435 		panic("pmap_init: cannot create pt_map");
436 	rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
437 	if (rv != KERN_SUCCESS)
438 		panic("pmap_init: cannot map range to pt_map");
439 #ifdef DEBUG
440 	if (pmapdebug & PDB_INIT)
441 		printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
442 #endif
443 
444 #if defined(HP380)
445 	if (mmutype == MMU_68040) {
446 		protostfree = ~l2tobm(0);
447 		for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
448 			protostfree &= ~l2tobm(rv);
449 	}
450 #endif
451 
452 	/*
453 	 * Now it is safe to enable pv_table recording.
454 	 */
455 	vm_first_phys = phys_start;
456 	vm_last_phys = phys_end;
457 	pmap_initialized = TRUE;
458 }
459 
460 /*
461  *	Used to map a range of physical addresses into kernel
462  *	virtual address space.
463  *
464  *	For now, VM is already on, we only need to map the
465  *	specified memory.
466  */
467 vm_offset_t
468 pmap_map(virt, start, end, prot)
469 	vm_offset_t	virt;
470 	vm_offset_t	start;
471 	vm_offset_t	end;
472 	int		prot;
473 {
474 #ifdef DEBUG
475 	if (pmapdebug & PDB_FOLLOW)
476 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
477 #endif
478 	while (start < end) {
479 		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
480 		virt += PAGE_SIZE;
481 		start += PAGE_SIZE;
482 	}
483 	return(virt);
484 }
485 
486 /*
487  *	Create and return a physical map.
488  *
489  *	If the size specified for the map
490  *	is zero, the map is an actual physical
491  *	map, and may be referenced by the
492  *	hardware.
493  *
494  *	If the size specified is non-zero,
495  *	the map will be used in software only, and
496  *	is bounded by that size.
497  */
498 pmap_t
499 pmap_create(size)
500 	vm_size_t	size;
501 {
502 	register pmap_t pmap;
503 
504 #ifdef DEBUG
505 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
506 		printf("pmap_create(%x)\n", size);
507 #endif
508 	/*
509 	 * Software use map does not need a pmap
510 	 */
511 	if (size)
512 		return(NULL);
513 
514 	/* XXX: is it ok to wait here? */
515 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
516 #ifdef notifwewait
517 	if (pmap == NULL)
518 		panic("pmap_create: cannot allocate a pmap");
519 #endif
520 	bzero(pmap, sizeof(*pmap));
521 	pmap_pinit(pmap);
522 	return (pmap);
523 }
524 
525 /*
526  * Initialize a preallocated and zeroed pmap structure,
527  * such as one in a vmspace structure.
528  */
529 void
530 pmap_pinit(pmap)
531 	register struct pmap *pmap;
532 {
533 
534 #ifdef DEBUG
535 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
536 		printf("pmap_pinit(%x)\n", pmap);
537 #endif
538 	/*
539 	 * No need to allocate page table space yet but we do need a
540 	 * valid segment table.  Initially, we point everyone at the
541 	 * "null" segment table.  On the first pmap_enter, a real
542 	 * segment table will be allocated.
543 	 */
544 	pmap->pm_stab = Segtabzero;
545 	pmap->pm_stpa = Segtabzeropa;
546 #if defined(HP380)
547 	if (mmutype == MMU_68040)
548 		pmap->pm_stfree = protostfree;
549 #endif
550 	pmap->pm_stchanged = TRUE;
551 	pmap->pm_count = 1;
552 	simple_lock_init(&pmap->pm_lock);
553 }
554 
555 /*
556  *	Retire the given physical map from service.
557  *	Should only be called if the map contains
558  *	no valid mappings.
559  */
560 void
561 pmap_destroy(pmap)
562 	register pmap_t pmap;
563 {
564 	int count;
565 
566 #ifdef DEBUG
567 	if (pmapdebug & PDB_FOLLOW)
568 		printf("pmap_destroy(%x)\n", pmap);
569 #endif
570 	if (pmap == NULL)
571 		return;
572 
573 	simple_lock(&pmap->pm_lock);
574 	count = --pmap->pm_count;
575 	simple_unlock(&pmap->pm_lock);
576 	if (count == 0) {
577 		pmap_release(pmap);
578 		free((caddr_t)pmap, M_VMPMAP);
579 	}
580 }
581 
582 /*
583  * Release any resources held by the given physical map.
584  * Called when a pmap initialized by pmap_pinit is being released.
585  * Should only be called if the map contains no valid mappings.
586  */
587 void
588 pmap_release(pmap)
589 	register struct pmap *pmap;
590 {
591 
592 #ifdef DEBUG
593 	if (pmapdebug & PDB_FOLLOW)
594 		printf("pmap_release(%x)\n", pmap);
595 #endif
596 #ifdef notdef /* DIAGNOSTIC */
597 	/* count would be 0 from pmap_destroy... */
598 	simple_lock(&pmap->pm_lock);
599 	if (pmap->pm_count != 1)
600 		panic("pmap_release count");
601 #endif
602 	if (pmap->pm_ptab)
603 		kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
604 				 HP_MAX_PTSIZE);
605 	if (pmap->pm_stab != Segtabzero)
606 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE);
607 }
608 
609 /*
610  *	Add a reference to the specified pmap.
611  */
612 void
613 pmap_reference(pmap)
614 	pmap_t	pmap;
615 {
616 #ifdef DEBUG
617 	if (pmapdebug & PDB_FOLLOW)
618 		printf("pmap_reference(%x)\n", pmap);
619 #endif
620 	if (pmap != NULL) {
621 		simple_lock(&pmap->pm_lock);
622 		pmap->pm_count++;
623 		simple_unlock(&pmap->pm_lock);
624 	}
625 }
626 
627 /*
628  *	Remove the given range of addresses from the specified map.
629  *
630  *	It is assumed that the start and end are properly
631  *	rounded to the page size.
632  */
633 void
634 pmap_remove(pmap, sva, eva)
635 	register pmap_t pmap;
636 	register vm_offset_t sva, eva;
637 {
638 	register vm_offset_t nssva;
639 	register pt_entry_t *pte;
640 	boolean_t firstpage, needcflush;
641 	int flags;
642 
643 #ifdef DEBUG
644 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
645 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
646 #endif
647 
648 	if (pmap == NULL)
649 		return;
650 
651 #ifdef PMAPSTATS
652 	remove_stats.calls++;
653 #endif
654 	firstpage = TRUE;
655 	needcflush = FALSE;
656 	flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
657 	while (sva < eva) {
658 		nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
659 		if (nssva == 0 || nssva > eva)
660 			nssva = eva;
661 		/*
662 		 * If VA belongs to an unallocated segment,
663 		 * skip to the next segment boundary.
664 		 */
665 		if (!pmap_ste_v(pmap, sva)) {
666 			sva = nssva;
667 			continue;
668 		}
669 		/*
670 		 * Invalidate every valid mapping within this segment.
671 		 */
672 		pte = pmap_pte(pmap, sva);
673 		while (sva < nssva) {
674 			if (pmap_pte_v(pte)) {
675 #ifdef HAVEVAC
676 				if (pmap_aliasmask) {
677 					/*
678 					 * Purge kernel side of VAC to ensure
679 					 * we get the correct state of any
680 					 * hardware maintained bits.
681 					 */
682 					if (firstpage) {
683 						DCIS();
684 #ifdef PMAPSTATS
685 						remove_stats.sflushes++;
686 #endif
687 					}
688 					/*
689 					 * Remember if we may need to
690 					 * flush the VAC due to a non-CI
691 					 * mapping.
692 					 */
693 					if (!needcflush && !pmap_pte_ci(pte))
694 						needcflush = TRUE;
695 
696 				}
697 #endif
698 				pmap_remove_mapping(pmap, sva, pte, flags);
699 				firstpage = FALSE;
700 			}
701 			pte++;
702 			sva += PAGE_SIZE;
703 		}
704 	}
705 	/*
706 	 * Didn't do anything, no need for cache flushes
707 	 */
708 	if (firstpage)
709 		return;
710 #ifdef HAVEVAC
711 	/*
712 	 * In a couple of cases, we don't need to worry about flushing
713 	 * the VAC:
714 	 * 	1. if this is a kernel mapping,
715 	 *	   we have already done it
716 	 *	2. if it is a user mapping not for the current process,
717 	 *	   it won't be there
718 	 */
719 	if (pmap_aliasmask &&
720 	    (pmap == kernel_pmap || pmap != curproc->p_vmspace->vm_map.pmap))
721 		needcflush = FALSE;
722 #ifdef DEBUG
723 	if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) {
724 		if (pmapvacflush & PVF_TOTAL)
725 			DCIA();
726 		else if (pmap == kernel_pmap)
727 			DCIS();
728 		else
729 			DCIU();
730 	} else
731 #endif
732 	if (needcflush) {
733 		if (pmap == kernel_pmap) {
734 			DCIS();
735 #ifdef PMAPSTATS
736 			remove_stats.sflushes++;
737 #endif
738 		} else {
739 			DCIU();
740 #ifdef PMAPSTATS
741 			remove_stats.uflushes++;
742 #endif
743 		}
744 	}
745 #endif
746 }
747 
748 /*
749  *	pmap_page_protect:
750  *
751  *	Lower the permission for all mappings to a given page.
752  */
753 void
754 pmap_page_protect(pa, prot)
755 	vm_offset_t	pa;
756 	vm_prot_t	prot;
757 {
758 	register pv_entry_t pv;
759 	int s;
760 
761 #ifdef DEBUG
762 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
763 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
764 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
765 #endif
766 	if (pa < vm_first_phys || pa >= vm_last_phys)
767 		return;
768 
769 	switch (prot) {
770 	case VM_PROT_READ|VM_PROT_WRITE:
771 	case VM_PROT_ALL:
772 		break;
773 	/* copy_on_write */
774 	case VM_PROT_READ:
775 	case VM_PROT_READ|VM_PROT_EXECUTE:
776 		pmap_changebit(pa, PG_RO, TRUE);
777 		break;
778 	/* remove_all */
779 	default:
780 		pv = pa_to_pvh(pa);
781 		s = splimp();
782 		while (pv->pv_pmap != NULL) {
783 #ifdef DEBUG
784 			if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
785 			    pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
786 				panic("pmap_page_protect: bad mapping");
787 #endif
788 			pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
789 					    PT_ENTRY_NULL,
790 					    PRM_TFLUSH|PRM_CFLUSH);
791 		}
792 		splx(s);
793 		break;
794 	}
795 }
796 
797 /*
798  *	Set the physical protection on the
799  *	specified range of this map as requested.
800  */
801 void
802 pmap_protect(pmap, sva, eva, prot)
803 	register pmap_t	pmap;
804 	register vm_offset_t sva, eva;
805 	vm_prot_t prot;
806 {
807 	register vm_offset_t nssva;
808 	register pt_entry_t *pte;
809 	boolean_t firstpage, needtflush;
810 	int isro;
811 
812 #ifdef DEBUG
813 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
814 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
815 #endif
816 
817 	if (pmap == NULL)
818 		return;
819 
820 #ifdef PMAPSTATS
821 	protect_stats.calls++;
822 #endif
823 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
824 		pmap_remove(pmap, sva, eva);
825 		return;
826 	}
827 	if (prot & VM_PROT_WRITE)
828 		return;
829 
830 	isro = pte_prot(pmap, prot);
831 	needtflush = active_pmap(pmap);
832 	firstpage = TRUE;
833 	while (sva < eva) {
834 		nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
835 		if (nssva == 0 || nssva > eva)
836 			nssva = eva;
837 		/*
838 		 * If VA belongs to an unallocated segment,
839 		 * skip to the next segment boundary.
840 		 */
841 		if (!pmap_ste_v(pmap, sva)) {
842 			sva = nssva;
843 			continue;
844 		}
845 		/*
846 		 * Change protection on mapping if it is valid and doesn't
847 		 * already have the correct protection.
848 		 */
849 		pte = pmap_pte(pmap, sva);
850 		while (sva < nssva) {
851 			if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
852 #ifdef HAVEVAC
853 				/*
854 				 * Purge kernel side of VAC to ensure we
855 				 * get the correct state of any hardware
856 				 * maintained bits.
857 				 *
858 				 * XXX do we need to clear the VAC in
859 				 * general to reflect the new protection?
860 				 */
861 				if (firstpage && pmap_aliasmask)
862 					DCIS();
863 #endif
864 #if defined(HP380)
865 				/*
866 				 * Clear caches if making RO (see section
867 				 * "7.3 Cache Coherency" in the manual).
868 				 */
869 				if (isro && mmutype == MMU_68040) {
870 					vm_offset_t pa = pmap_pte_pa(pte);
871 
872 					DCFP(pa);
873 					ICPP(pa);
874 				}
875 #endif
876 				pmap_pte_set_prot(pte, isro);
877 				if (needtflush)
878 					TBIS(sva);
879 #ifdef PMAPSTATS
880 				protect_stats.changed++;
881 #endif
882 				firstpage = FALSE;
883 			}
884 #ifdef PMAPSTATS
885 			else if (pmap_pte_v(pte)) {
886 				if (isro)
887 					protect_stats.alreadyro++;
888 				else
889 					protect_stats.alreadyrw++;
890 			}
891 #endif
892 			pte++;
893 			sva += PAGE_SIZE;
894 		}
895 	}
896 #if defined(HAVEVAC) && defined(DEBUG)
897 	if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) {
898 		if (pmapvacflush & PVF_TOTAL)
899 			DCIA();
900 		else if (pmap == kernel_pmap)
901 			DCIS();
902 		else
903 			DCIU();
904 	}
905 #endif
906 }
907 
908 /*
909  *	Insert the given physical page (p) at
910  *	the specified virtual address (v) in the
911  *	target physical map with the protection requested.
912  *
913  *	If specified, the page will be wired down, meaning
914  *	that the related pte can not be reclaimed.
915  *
916  *	NB:  This is the only routine which MAY NOT lazy-evaluate
917  *	or lose information.  That is, this routine must actually
918  *	insert this page into the given map NOW.
919  */
920 void
921 pmap_enter(pmap, va, pa, prot, wired)
922 	register pmap_t pmap;
923 	vm_offset_t va;
924 	register vm_offset_t pa;
925 	vm_prot_t prot;
926 	boolean_t wired;
927 {
928 	register pt_entry_t *pte;
929 	register int npte;
930 	vm_offset_t opa;
931 	boolean_t cacheable = TRUE;
932 	boolean_t checkpv = TRUE;
933 
934 #ifdef DEBUG
935 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
936 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
937 		       pmap, va, pa, prot, wired);
938 #endif
939 	if (pmap == NULL)
940 		return;
941 
942 #ifdef PMAPSTATS
943 	if (pmap == kernel_pmap)
944 		enter_stats.kernel++;
945 	else
946 		enter_stats.user++;
947 #endif
948 	/*
949 	 * For user mapping, allocate kernel VM resources if necessary.
950 	 */
951 	if (pmap->pm_ptab == NULL)
952 		pmap->pm_ptab = (pt_entry_t *)
953 			kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
954 
955 	/*
956 	 * Segment table entry not valid, we need a new PT page
957 	 */
958 	if (!pmap_ste_v(pmap, va))
959 		pmap_enter_ptpage(pmap, va);
960 
961 	pa = hp300_trunc_page(pa);
962 	pte = pmap_pte(pmap, va);
963 	opa = pmap_pte_pa(pte);
964 #ifdef DEBUG
965 	if (pmapdebug & PDB_ENTER)
966 		printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);
967 #endif
968 
969 	/*
970 	 * Mapping has not changed, must be protection or wiring change.
971 	 */
972 	if (opa == pa) {
973 #ifdef PMAPSTATS
974 		enter_stats.pwchange++;
975 #endif
976 		/*
977 		 * Wiring change, just update stats.
978 		 * We don't worry about wiring PT pages as they remain
979 		 * resident as long as there are valid mappings in them.
980 		 * Hence, if a user page is wired, the PT page will be also.
981 		 */
982 		if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
983 #ifdef DEBUG
984 			if (pmapdebug & PDB_ENTER)
985 				printf("enter: wiring change -> %x\n", wired);
986 #endif
987 			if (wired)
988 				pmap->pm_stats.wired_count++;
989 			else
990 				pmap->pm_stats.wired_count--;
991 #ifdef PMAPSTATS
992 			if (pmap_pte_prot(pte) == pte_prot(pmap, prot))
993 				enter_stats.wchange++;
994 #endif
995 		}
996 #ifdef PMAPSTATS
997 		else if (pmap_pte_prot(pte) != pte_prot(pmap, prot))
998 			enter_stats.pchange++;
999 		else
1000 			enter_stats.nochange++;
1001 #endif
1002 		/*
1003 		 * Retain cache inhibition status
1004 		 */
1005 		checkpv = FALSE;
1006 		if (pmap_pte_ci(pte))
1007 			cacheable = FALSE;
1008 		goto validate;
1009 	}
1010 
1011 	/*
1012 	 * Mapping has changed, invalidate old range and fall through to
1013 	 * handle validating new mapping.
1014 	 */
1015 	if (opa) {
1016 #ifdef DEBUG
1017 		if (pmapdebug & PDB_ENTER)
1018 			printf("enter: removing old mapping %x\n", va);
1019 #endif
1020 		pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);
1021 #ifdef PMAPSTATS
1022 		enter_stats.mchange++;
1023 #endif
1024 	}
1025 
1026 	/*
1027 	 * If this is a new user mapping, increment the wiring count
1028 	 * on this PT page.  PT pages are wired down as long as there
1029 	 * is a valid mapping in the page.
1030 	 */
1031 	if (pmap != kernel_pmap)
1032 		(void) vm_map_pageable(pt_map, trunc_page(pte),
1033 				       round_page(pte+1), FALSE);
1034 
1035 	/*
1036 	 * Enter on the PV list if part of our managed memory
1037 	 * Note that we raise IPL while manipulating pv_table
1038 	 * since pmap_enter can be called at interrupt time.
1039 	 */
1040 	if (pa >= vm_first_phys && pa < vm_last_phys) {
1041 		register pv_entry_t pv, npv;
1042 		int s;
1043 
1044 #ifdef PMAPSTATS
1045 		enter_stats.managed++;
1046 #endif
1047 		pv = pa_to_pvh(pa);
1048 		s = splimp();
1049 #ifdef DEBUG
1050 		if (pmapdebug & PDB_ENTER)
1051 			printf("enter: pv at %x: %x/%x/%x\n",
1052 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1053 #endif
1054 		/*
1055 		 * No entries yet, use header as the first entry
1056 		 */
1057 		if (pv->pv_pmap == NULL) {
1058 #ifdef PMAPSTATS
1059 			enter_stats.firstpv++;
1060 #endif
1061 			pv->pv_va = va;
1062 			pv->pv_pmap = pmap;
1063 			pv->pv_next = NULL;
1064 			pv->pv_ptste = NULL;
1065 			pv->pv_ptpmap = NULL;
1066 			pv->pv_flags = 0;
1067 		}
1068 		/*
1069 		 * There is at least one other VA mapping this page.
1070 		 * Place this entry after the header.
1071 		 */
1072 		else {
1073 #ifdef DEBUG
1074 			for (npv = pv; npv; npv = npv->pv_next)
1075 				if (pmap == npv->pv_pmap && va == npv->pv_va)
1076 					panic("pmap_enter: already in pv_tab");
1077 #endif
1078 			npv = (pv_entry_t)
1079 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
1080 			npv->pv_va = va;
1081 			npv->pv_pmap = pmap;
1082 			npv->pv_next = pv->pv_next;
1083 			npv->pv_ptste = NULL;
1084 			npv->pv_ptpmap = NULL;
1085 			npv->pv_flags = 0;
1086 			pv->pv_next = npv;
1087 #ifdef PMAPSTATS
1088 			if (!npv->pv_next)
1089 				enter_stats.secondpv++;
1090 #endif
1091 #ifdef HAVEVAC
1092 			/*
1093 			 * Since there is another logical mapping for the
1094 			 * same page we may need to cache-inhibit the
1095 			 * descriptors on those CPUs with external VACs.
1096 			 * We don't need to CI if:
1097 			 *
1098 			 * - No two mappings belong to the same user pmaps.
1099 			 *   Since the cache is flushed on context switches
1100 			 *   there is no problem between user processes.
1101 			 *
1102 			 * - Mappings within a single pmap are a certain
1103 			 *   magic distance apart.  VAs at these appropriate
1104 			 *   boundaries map to the same cache entries or
1105 			 *   otherwise don't conflict.
1106 			 *
1107 			 * To keep it simple, we only check for these special
1108 			 * cases if there are only two mappings, otherwise we
1109 			 * punt and always CI.
1110 			 *
1111 			 * Note that there are no aliasing problems with the
1112 			 * on-chip data-cache when the WA bit is set.
1113 			 */
1114 			if (pmap_aliasmask) {
1115 				if (pv->pv_flags & PV_CI) {
1116 #ifdef DEBUG
1117 					if (pmapdebug & PDB_CACHE)
1118 					printf("enter: pa %x already CI'ed\n",
1119 					       pa);
1120 #endif
1121 					checkpv = cacheable = FALSE;
1122 				} else if (npv->pv_next ||
1123 					   ((pmap == pv->pv_pmap ||
1124 					     pmap == kernel_pmap ||
1125 					     pv->pv_pmap == kernel_pmap) &&
1126 					    ((pv->pv_va & pmap_aliasmask) !=
1127 					     (va & pmap_aliasmask)))) {
1128 #ifdef DEBUG
1129 					if (pmapdebug & PDB_CACHE)
1130 					printf("enter: pa %x CI'ing all\n",
1131 					       pa);
1132 #endif
1133 					cacheable = FALSE;
1134 					pv->pv_flags |= PV_CI;
1135 #ifdef PMAPSTATS
1136 					enter_stats.ci++;
1137 #endif
1138 				}
1139 			}
1140 #endif
1141 		}
1142 		splx(s);
1143 	}
1144 	/*
1145 	 * Assumption: if it is not part of our managed memory
1146 	 * then it must be device memory which may be volitile.
1147 	 */
1148 	else if (pmap_initialized) {
1149 		checkpv = cacheable = FALSE;
1150 #ifdef PMAPSTATS
1151 		enter_stats.unmanaged++;
1152 #endif
1153 	}
1154 
1155 	/*
1156 	 * Increment counters
1157 	 */
1158 	pmap->pm_stats.resident_count++;
1159 	if (wired)
1160 		pmap->pm_stats.wired_count++;
1161 
1162 validate:
1163 #ifdef HAVEVAC
1164 	/*
1165 	 * Purge kernel side of VAC to ensure we get correct state
1166 	 * of HW bits so we don't clobber them.
1167 	 */
1168 	if (pmap_aliasmask)
1169 		DCIS();
1170 #endif
1171 	/*
1172 	 * Build the new PTE.
1173 	 */
1174 	npte = pa | pte_prot(pmap, prot) | (*(int *)pte & (PG_M|PG_U)) | PG_V;
1175 	if (wired)
1176 		npte |= PG_W;
1177 	if (!checkpv && !cacheable)
1178 		npte |= PG_CI;
1179 #if defined(HP380)
1180 	if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
1181 #ifdef DEBUG
1182 		if (dowriteback && (dokwriteback || pmap != kernel_pmap))
1183 #endif
1184 		npte |= PG_CCB;
1185 #endif
1186 #ifdef DEBUG
1187 	if (pmapdebug & PDB_ENTER)
1188 		printf("enter: new pte value %x\n", npte);
1189 #endif
1190 	/*
1191 	 * Remember if this was a wiring-only change.
1192 	 * If so, we need not flush the TLB and caches.
1193 	 */
1194 	wired = ((*(int *)pte ^ npte) == PG_W);
1195 #if defined(HP380)
1196 	if (mmutype == MMU_68040 && !wired) {
1197 		DCFP(pa);
1198 		ICPP(pa);
1199 	}
1200 #endif
1201 	*(int *)pte = npte;
1202 	if (!wired && active_pmap(pmap))
1203 		TBIS(va);
1204 #ifdef HAVEVAC
1205 	/*
1206 	 * The following is executed if we are entering a second
1207 	 * (or greater) mapping for a physical page and the mappings
1208 	 * may create an aliasing problem.  In this case we must
1209 	 * cache inhibit the descriptors involved and flush any
1210 	 * external VAC.
1211 	 */
1212 	if (checkpv && !cacheable) {
1213 		pmap_changebit(pa, PG_CI, TRUE);
1214 		DCIA();
1215 #ifdef PMAPSTATS
1216 		enter_stats.flushes++;
1217 #endif
1218 #ifdef DEBUG
1219 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1220 		    (PDB_CACHE|PDB_PVDUMP))
1221 			pmap_pvdump(pa);
1222 #endif
1223 	}
1224 #ifdef DEBUG
1225 	else if (pmapvacflush & PVF_ENTER) {
1226 		if (pmapvacflush & PVF_TOTAL)
1227 			DCIA();
1228 		else if (pmap == kernel_pmap)
1229 			DCIS();
1230 		else
1231 			DCIU();
1232 	}
1233 #endif
1234 #endif
1235 #ifdef DEBUG
1236 	if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
1237 		pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
1238 #endif
1239 }
1240 
1241 /*
1242  *	Routine:	pmap_change_wiring
1243  *	Function:	Change the wiring attribute for a map/virtual-address
1244  *			pair.
1245  *	In/out conditions:
1246  *			The mapping must already exist in the pmap.
1247  */
1248 void
1249 pmap_change_wiring(pmap, va, wired)
1250 	register pmap_t	pmap;
1251 	vm_offset_t	va;
1252 	boolean_t	wired;
1253 {
1254 	register pt_entry_t *pte;
1255 
1256 #ifdef DEBUG
1257 	if (pmapdebug & PDB_FOLLOW)
1258 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1259 #endif
1260 	if (pmap == NULL)
1261 		return;
1262 
1263 	pte = pmap_pte(pmap, va);
1264 #ifdef DEBUG
1265 	/*
1266 	 * Page table page is not allocated.
1267 	 * Should this ever happen?  Ignore it for now,
1268 	 * we don't want to force allocation of unnecessary PTE pages.
1269 	 */
1270 	if (!pmap_ste_v(pmap, va)) {
1271 		if (pmapdebug & PDB_PARANOIA)
1272 			printf("pmap_change_wiring: invalid STE for %x\n", va);
1273 		return;
1274 	}
1275 	/*
1276 	 * Page not valid.  Should this ever happen?
1277 	 * Just continue and change wiring anyway.
1278 	 */
1279 	if (!pmap_pte_v(pte)) {
1280 		if (pmapdebug & PDB_PARANOIA)
1281 			printf("pmap_change_wiring: invalid PTE for %x\n", va);
1282 	}
1283 #endif
1284 	/*
1285 	 * If wiring actually changed (always?) set the wire bit and
1286 	 * update the wire count.  Note that wiring is not a hardware
1287 	 * characteristic so there is no need to invalidate the TLB.
1288 	 */
1289 	if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
1290 		pmap_pte_set_w(pte, wired);
1291 		if (wired)
1292 			pmap->pm_stats.wired_count++;
1293 		else
1294 			pmap->pm_stats.wired_count--;
1295 	}
1296 }
1297 
1298 /*
1299  *	Routine:	pmap_extract
1300  *	Function:
1301  *		Extract the physical page address associated
1302  *		with the given map/virtual_address pair.
1303  */
1304 
1305 vm_offset_t
1306 pmap_extract(pmap, va)
1307 	register pmap_t	pmap;
1308 	vm_offset_t va;
1309 {
1310 	register vm_offset_t pa;
1311 
1312 #ifdef DEBUG
1313 	if (pmapdebug & PDB_FOLLOW)
1314 		printf("pmap_extract(%x, %x) -> ", pmap, va);
1315 #endif
1316 	pa = 0;
1317 	if (pmap && pmap_ste_v(pmap, va))
1318 		pa = *(int *)pmap_pte(pmap, va);
1319 	if (pa)
1320 		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1321 #ifdef DEBUG
1322 	if (pmapdebug & PDB_FOLLOW)
1323 		printf("%x\n", pa);
1324 #endif
1325 	return(pa);
1326 }
1327 
1328 /*
1329  *	Copy the range specified by src_addr/len
1330  *	from the source map to the range dst_addr/len
1331  *	in the destination map.
1332  *
1333  *	This routine is only advisory and need not do anything.
1334  */
1335 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1336 	pmap_t		dst_pmap;
1337 	pmap_t		src_pmap;
1338 	vm_offset_t	dst_addr;
1339 	vm_size_t	len;
1340 	vm_offset_t	src_addr;
1341 {
1342 #ifdef DEBUG
1343 	if (pmapdebug & PDB_FOLLOW)
1344 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1345 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1346 #endif
1347 }
1348 
1349 /*
1350  *	Require that all active physical maps contain no
1351  *	incorrect entries NOW.  [This update includes
1352  *	forcing updates of any address map caching.]
1353  *
1354  *	Generally used to insure that a thread about
1355  *	to run will see a semantically correct world.
1356  */
1357 void pmap_update()
1358 {
1359 #ifdef DEBUG
1360 	if (pmapdebug & PDB_FOLLOW)
1361 		printf("pmap_update()\n");
1362 #endif
1363 	TBIA();
1364 }
1365 
1366 /*
1367  *	Routine:	pmap_collect
1368  *	Function:
1369  *		Garbage collects the physical map system for
1370  *		pages which are no longer used.
1371  *		Success need not be guaranteed -- that is, there
1372  *		may well be pages which are not referenced, but
1373  *		others may be collected.
1374  *	Usage:
1375  *		Called by the pageout daemon when pages are scarce.
1376  */
1377 void
1378 pmap_collect(pmap)
1379 	pmap_t		pmap;
1380 {
1381 	register vm_offset_t pa;
1382 	register pv_entry_t pv;
1383 	register int *pte;
1384 	vm_offset_t kpa;
1385 	int s;
1386 
1387 #ifdef DEBUG
1388 	int *ste;
1389 	int opmapdebug;
1390 #endif
1391 	if (pmap != kernel_pmap)
1392 		return;
1393 
1394 #ifdef DEBUG
1395 	if (pmapdebug & PDB_FOLLOW)
1396 		printf("pmap_collect(%x)\n", pmap);
1397 #endif
1398 #ifdef PMAPSTATS
1399 	kpt_stats.collectscans++;
1400 #endif
1401 	s = splimp();
1402 	for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) {
1403 		register struct kpt_page *kpt, **pkpt;
1404 
1405 		/*
1406 		 * Locate physical pages which are being used as kernel
1407 		 * page table pages.
1408 		 */
1409 		pv = pa_to_pvh(pa);
1410 		if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
1411 			continue;
1412 		do {
1413 			if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
1414 				break;
1415 		} while (pv = pv->pv_next);
1416 		if (pv == NULL)
1417 			continue;
1418 #ifdef DEBUG
1419 		if (pv->pv_va < (vm_offset_t)Sysmap ||
1420 		    pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
1421 			printf("collect: kernel PT VA out of range\n");
1422 		else
1423 			goto ok;
1424 		pmap_pvdump(pa);
1425 		continue;
1426 ok:
1427 #endif
1428 		pte = (int *)(pv->pv_va + HP_PAGE_SIZE);
1429 		while (--pte >= (int *)pv->pv_va && *pte == PG_NV)
1430 			;
1431 		if (pte >= (int *)pv->pv_va)
1432 			continue;
1433 
1434 #ifdef DEBUG
1435 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1436 			printf("collect: freeing KPT page at %x (ste %x@%x)\n",
1437 			       pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);
1438 			opmapdebug = pmapdebug;
1439 			pmapdebug |= PDB_PTPAGE;
1440 		}
1441 
1442 		ste = (int *)pv->pv_ptste;
1443 #endif
1444 		/*
1445 		 * If all entries were invalid we can remove the page.
1446 		 * We call pmap_remove_entry to take care of invalidating
1447 		 * ST and Sysptmap entries.
1448 		 */
1449 		kpa = pmap_extract(pmap, pv->pv_va);
1450 		pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
1451 				    PRM_TFLUSH|PRM_CFLUSH);
1452 		/*
1453 		 * Use the physical address to locate the original
1454 		 * (kmem_alloc assigned) address for the page and put
1455 		 * that page back on the free list.
1456 		 */
1457 		for (pkpt = &kpt_used_list, kpt = *pkpt;
1458 		     kpt != (struct kpt_page *)0;
1459 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
1460 			if (kpt->kpt_pa == kpa)
1461 				break;
1462 #ifdef DEBUG
1463 		if (kpt == (struct kpt_page *)0)
1464 			panic("pmap_collect: lost a KPT page");
1465 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1466 			printf("collect: %x (%x) to free list\n",
1467 			       kpt->kpt_va, kpa);
1468 #endif
1469 		*pkpt = kpt->kpt_next;
1470 		kpt->kpt_next = kpt_free_list;
1471 		kpt_free_list = kpt;
1472 #ifdef PMAPSTATS
1473 		kpt_stats.kptinuse--;
1474 		kpt_stats.collectpages++;
1475 #endif
1476 #ifdef DEBUG
1477 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1478 			pmapdebug = opmapdebug;
1479 
1480 		if (*ste)
1481 			printf("collect: kernel STE at %x still valid (%x)\n",
1482 			       ste, *ste);
1483 		ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
1484 		if (*ste)
1485 			printf("collect: kernel PTmap at %x still valid (%x)\n",
1486 			       ste, *ste);
1487 #endif
1488 	}
1489 	splx(s);
1490 }
1491 
1492 void
1493 pmap_activate(pmap, pcbp)
1494 	register pmap_t pmap;
1495 	struct pcb *pcbp;
1496 {
1497 #ifdef DEBUG
1498 	if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
1499 		printf("pmap_activate(%x, %x)\n", pmap, pcbp);
1500 #endif
1501 	PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);
1502 }
1503 
1504 /*
1505  *	pmap_zero_page zeros the specified (machine independent)
1506  *	page by mapping the page into virtual memory and using
1507  *	bzero to clear its contents, one machine dependent page
1508  *	at a time.
1509  *
1510  *	XXX this is a bad implementation for virtual cache machines
1511  *	(320/350) because pmap_enter doesn't cache-inhibit the temporary
1512  *	kernel mapping and we wind up with data cached for that KVA.
1513  *	It is probably a win for physical cache machines (370/380)
1514  *	as the cache loading is not wasted.
1515  */
1516 void
1517 pmap_zero_page(phys)
1518 	vm_offset_t phys;
1519 {
1520 	register vm_offset_t kva;
1521 	extern caddr_t CADDR1;
1522 
1523 #ifdef DEBUG
1524 	if (pmapdebug & PDB_FOLLOW)
1525 		printf("pmap_zero_page(%x)\n", phys);
1526 #endif
1527 	kva = (vm_offset_t) CADDR1;
1528 	pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1529 	bzero((caddr_t)kva, HP_PAGE_SIZE);
1530 	pmap_remove_mapping(kernel_pmap, kva, PT_ENTRY_NULL,
1531 			    PRM_TFLUSH|PRM_CFLUSH);
1532 }
1533 
1534 /*
1535  *	pmap_copy_page copies the specified (machine independent)
1536  *	page by mapping the page into virtual memory and using
1537  *	bcopy to copy the page, one machine dependent page at a
1538  *	time.
1539  *
1540  *
1541  *	XXX this is a bad implementation for virtual cache machines
1542  *	(320/350) because pmap_enter doesn't cache-inhibit the temporary
1543  *	kernel mapping and we wind up with data cached for that KVA.
1544  *	It is probably a win for physical cache machines (370/380)
1545  *	as the cache loading is not wasted.
1546  */
1547 void
1548 pmap_copy_page(src, dst)
1549 	vm_offset_t src, dst;
1550 {
1551 	register vm_offset_t skva, dkva;
1552 	extern caddr_t CADDR1, CADDR2;
1553 
1554 #ifdef DEBUG
1555 	if (pmapdebug & PDB_FOLLOW)
1556 		printf("pmap_copy_page(%x, %x)\n", src, dst);
1557 #endif
1558 	skva = (vm_offset_t) CADDR1;
1559 	dkva = (vm_offset_t) CADDR2;
1560 	pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
1561 	pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1562 	copypage((caddr_t)skva, (caddr_t)dkva);
1563 	/* CADDR1 and CADDR2 are virtually contiguous */
1564 	pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
1565 }
1566 
1567 /*
1568  *	Routine:	pmap_pageable
1569  *	Function:
1570  *		Make the specified pages (by pmap, offset)
1571  *		pageable (or not) as requested.
1572  *
1573  *		A page which is not pageable may not take
1574  *		a fault; therefore, its page table entry
1575  *		must remain valid for the duration.
1576  *
1577  *		This routine is merely advisory; pmap_enter
1578  *		will specify that these pages are to be wired
1579  *		down (or not) as appropriate.
1580  */
1581 void
1582 pmap_pageable(pmap, sva, eva, pageable)
1583 	pmap_t		pmap;
1584 	vm_offset_t	sva, eva;
1585 	boolean_t	pageable;
1586 {
1587 #ifdef DEBUG
1588 	if (pmapdebug & PDB_FOLLOW)
1589 		printf("pmap_pageable(%x, %x, %x, %x)\n",
1590 		       pmap, sva, eva, pageable);
1591 #endif
1592 	/*
1593 	 * If we are making a PT page pageable then all valid
1594 	 * mappings must be gone from that page.  Hence it should
1595 	 * be all zeros and there is no need to clean it.
1596 	 * Assumptions:
1597 	 *	- we are called with only one page at a time
1598 	 *	- PT pages have only one pv_table entry
1599 	 */
1600 	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1601 		register pv_entry_t pv;
1602 		register vm_offset_t pa;
1603 
1604 #ifdef DEBUG
1605 		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1606 			printf("pmap_pageable(%x, %x, %x, %x)\n",
1607 			       pmap, sva, eva, pageable);
1608 #endif
1609 		if (!pmap_ste_v(pmap, sva))
1610 			return;
1611 		pa = pmap_pte_pa(pmap_pte(pmap, sva));
1612 		if (pa < vm_first_phys || pa >= vm_last_phys)
1613 			return;
1614 		pv = pa_to_pvh(pa);
1615 		if (pv->pv_ptste == NULL)
1616 			return;
1617 #ifdef DEBUG
1618 		if (pv->pv_va != sva || pv->pv_next) {
1619 			printf("pmap_pageable: bad PT page va %x next %x\n",
1620 			       pv->pv_va, pv->pv_next);
1621 			return;
1622 		}
1623 #endif
1624 		/*
1625 		 * Mark it unmodified to avoid pageout
1626 		 */
1627 		pmap_changebit(pa, PG_M, FALSE);
1628 #ifdef DEBUG
1629 		if (pmapdebug & PDB_PTPAGE)
1630 			printf("pmap_pageable: PT page %x(%x) unmodified\n",
1631 			       sva, *(int *)pmap_pte(pmap, sva));
1632 		if (pmapdebug & PDB_WIRING)
1633 			pmap_check_wiring("pageable", sva);
1634 #endif
1635 	}
1636 }
1637 
1638 /*
1639  *	Clear the modify bits on the specified physical page.
1640  */
1641 
1642 void
1643 pmap_clear_modify(pa)
1644 	vm_offset_t	pa;
1645 {
1646 #ifdef DEBUG
1647 	if (pmapdebug & PDB_FOLLOW)
1648 		printf("pmap_clear_modify(%x)\n", pa);
1649 #endif
1650 	pmap_changebit(pa, PG_M, FALSE);
1651 }
1652 
1653 /*
1654  *	pmap_clear_reference:
1655  *
1656  *	Clear the reference bit on the specified physical page.
1657  */
1658 
1659 void pmap_clear_reference(pa)
1660 	vm_offset_t	pa;
1661 {
1662 #ifdef DEBUG
1663 	if (pmapdebug & PDB_FOLLOW)
1664 		printf("pmap_clear_reference(%x)\n", pa);
1665 #endif
1666 	pmap_changebit(pa, PG_U, FALSE);
1667 }
1668 
1669 /*
1670  *	pmap_is_referenced:
1671  *
1672  *	Return whether or not the specified physical page is referenced
1673  *	by any physical maps.
1674  */
1675 
1676 boolean_t
1677 pmap_is_referenced(pa)
1678 	vm_offset_t	pa;
1679 {
1680 #ifdef DEBUG
1681 	if (pmapdebug & PDB_FOLLOW) {
1682 		boolean_t rv = pmap_testbit(pa, PG_U);
1683 		printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);
1684 		return(rv);
1685 	}
1686 #endif
1687 	return(pmap_testbit(pa, PG_U));
1688 }
1689 
1690 /*
1691  *	pmap_is_modified:
1692  *
1693  *	Return whether or not the specified physical page is modified
1694  *	by any physical maps.
1695  */
1696 
1697 boolean_t
1698 pmap_is_modified(pa)
1699 	vm_offset_t	pa;
1700 {
1701 #ifdef DEBUG
1702 	if (pmapdebug & PDB_FOLLOW) {
1703 		boolean_t rv = pmap_testbit(pa, PG_M);
1704 		printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);
1705 		return(rv);
1706 	}
1707 #endif
1708 	return(pmap_testbit(pa, PG_M));
1709 }
1710 
1711 vm_offset_t
1712 pmap_phys_address(ppn)
1713 	int ppn;
1714 {
1715 	return(hp300_ptob(ppn));
1716 }
1717 
1718 #ifdef HPUXCOMPAT
1719 /*
1720  * 'PUX hack for dealing with the so called multi-mapped address space.
1721  * The first 256mb is mapped in at every 256mb region from 0x10000000
1722  * up to 0xF0000000.  This allows for 15 bits of tag information.
1723  *
1724  * We implement this at the segment table level, the machine independent
1725  * VM knows nothing about it.
1726  */
1727 pmap_mapmulti(pmap, va)
1728 	pmap_t pmap;
1729 	vm_offset_t va;
1730 {
1731 	int *ste, *bste;
1732 
1733 #ifdef DEBUG
1734 	if (pmapdebug & PDB_MULTIMAP) {
1735 		ste = (int *)pmap_ste(pmap, HPMMBASEADDR(va));
1736 		printf("pmap_mapmulti(%x, %x): bste %x(%x)",
1737 		       pmap, va, ste, *ste);
1738 		ste = (int *)pmap_ste(pmap, va);
1739 		printf(" ste %x(%x)\n", ste, *ste);
1740 	}
1741 #endif
1742 	bste = (int *) pmap_ste(pmap, HPMMBASEADDR(va));
1743 	ste = (int *) pmap_ste(pmap, va);
1744 	if (*ste == SG_NV && (*bste & SG_V)) {
1745 		*ste = *bste;
1746 		TBIAU();
1747 		return (KERN_SUCCESS);
1748 	}
1749 	return (KERN_INVALID_ADDRESS);
1750 }
1751 #endif
1752 
1753 /*
1754  * Miscellaneous support routines follow
1755  */
1756 
1757 /*
1758  * Invalidate a single page denoted by pmap/va.
1759  * If (pte != NULL), it is the already computed PTE for the page.
1760  * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
1761  * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
1762  */
1763 /* static */
1764 void
1765 pmap_remove_mapping(pmap, va, pte, flags)
1766 	register pmap_t pmap;
1767 	register vm_offset_t va;
1768 	register pt_entry_t *pte;
1769 	int flags;
1770 {
1771 	register vm_offset_t pa;
1772 	register pv_entry_t pv, npv;
1773 	pmap_t ptpmap;
1774 	int *ste, s, bits;
1775 #ifdef DEBUG
1776 	pt_entry_t opte;
1777 
1778 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
1779 		printf("pmap_remove_mapping(%x, %x, %x, %x)\n",
1780 		       pmap, va, pte, flags);
1781 #endif
1782 
1783 	/*
1784 	 * PTE not provided, compute it from pmap and va.
1785 	 */
1786 	if (pte == PT_ENTRY_NULL) {
1787 		pte = pmap_pte(pmap, va);
1788 		if (*(int *)pte == PG_NV)
1789 			return;
1790 	}
1791 #ifdef HAVEVAC
1792 	if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
1793 		/*
1794 		 * Purge kernel side of VAC to ensure we get the correct
1795 		 * state of any hardware maintained bits.
1796 		 */
1797 		DCIS();
1798 #ifdef PMAPSTATS
1799 		remove_stats.sflushes++;
1800 #endif
1801 		/*
1802 		 * If this is a non-CI user mapping for the current process,
1803 		 * flush the VAC.  Note that the kernel side was flushed
1804 		 * above so we don't worry about non-CI kernel mappings.
1805 		 */
1806 		if (pmap == curproc->p_vmspace->vm_map.pmap &&
1807 		    !pmap_pte_ci(pte)) {
1808 			DCIU();
1809 #ifdef PMAPSTATS
1810 			remove_stats.uflushes++;
1811 #endif
1812 		}
1813 	}
1814 #endif
1815 	pa = pmap_pte_pa(pte);
1816 #ifdef DEBUG
1817 	opte = *pte;
1818 #endif
1819 #ifdef PMAPSTATS
1820 	remove_stats.removes++;
1821 #endif
1822 	/*
1823 	 * Update statistics
1824 	 */
1825 	if (pmap_pte_w(pte))
1826 		pmap->pm_stats.wired_count--;
1827 	pmap->pm_stats.resident_count--;
1828 
1829 	/*
1830 	 * Invalidate the PTE after saving the reference modify info.
1831 	 */
1832 #ifdef DEBUG
1833 	if (pmapdebug & PDB_REMOVE)
1834 		printf("remove: invalidating pte at %x\n", pte);
1835 #endif
1836 	bits = *(int *)pte & (PG_U|PG_M);
1837 	*(int *)pte = PG_NV;
1838 	if ((flags & PRM_TFLUSH) && active_pmap(pmap))
1839 		TBIS(va);
1840 	/*
1841 	 * For user mappings decrement the wiring count on
1842 	 * the PT page.  We do this after the PTE has been
1843 	 * invalidated because vm_map_pageable winds up in
1844 	 * pmap_pageable which clears the modify bit for the
1845 	 * PT page.
1846 	 */
1847 	if (pmap != kernel_pmap) {
1848 		(void) vm_map_pageable(pt_map, trunc_page(pte),
1849 				       round_page(pte+1), TRUE);
1850 #ifdef DEBUG
1851 		if (pmapdebug & PDB_WIRING)
1852 			pmap_check_wiring("remove", trunc_page(pte));
1853 #endif
1854 	}
1855 	/*
1856 	 * If this isn't a managed page, we are all done.
1857 	 */
1858 	if (pa < vm_first_phys || pa >= vm_last_phys)
1859 		return;
1860 	/*
1861 	 * Otherwise remove it from the PV table
1862 	 * (raise IPL since we may be called at interrupt time).
1863 	 */
1864 	pv = pa_to_pvh(pa);
1865 	ste = (int *)0;
1866 	s = splimp();
1867 	/*
1868 	 * If it is the first entry on the list, it is actually
1869 	 * in the header and we must copy the following entry up
1870 	 * to the header.  Otherwise we must search the list for
1871 	 * the entry.  In either case we free the now unused entry.
1872 	 */
1873 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
1874 		ste = (int *)pv->pv_ptste;
1875 		ptpmap = pv->pv_ptpmap;
1876 		npv = pv->pv_next;
1877 		if (npv) {
1878 			npv->pv_flags = pv->pv_flags;
1879 			*pv = *npv;
1880 			free((caddr_t)npv, M_VMPVENT);
1881 		} else
1882 			pv->pv_pmap = NULL;
1883 #ifdef PMAPSTATS
1884 		remove_stats.pvfirst++;
1885 #endif
1886 	} else {
1887 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1888 #ifdef PMAPSTATS
1889 			remove_stats.pvsearch++;
1890 #endif
1891 			if (pmap == npv->pv_pmap && va == npv->pv_va)
1892 				break;
1893 			pv = npv;
1894 		}
1895 #ifdef DEBUG
1896 		if (npv == NULL)
1897 			panic("pmap_remove: PA not in pv_tab");
1898 #endif
1899 		ste = (int *)npv->pv_ptste;
1900 		ptpmap = npv->pv_ptpmap;
1901 		pv->pv_next = npv->pv_next;
1902 		free((caddr_t)npv, M_VMPVENT);
1903 		pv = pa_to_pvh(pa);
1904 	}
1905 #ifdef HAVEVAC
1906 	/*
1907 	 * If only one mapping left we no longer need to cache inhibit
1908 	 */
1909 	if (pmap_aliasmask &&
1910 	    pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
1911 #ifdef DEBUG
1912 		if (pmapdebug & PDB_CACHE)
1913 			printf("remove: clearing CI for pa %x\n", pa);
1914 #endif
1915 		pv->pv_flags &= ~PV_CI;
1916 		pmap_changebit(pa, PG_CI, FALSE);
1917 #ifdef DEBUG
1918 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1919 		    (PDB_CACHE|PDB_PVDUMP))
1920 			pmap_pvdump(pa);
1921 #endif
1922 	}
1923 #endif
1924 	/*
1925 	 * If this was a PT page we must also remove the
1926 	 * mapping from the associated segment table.
1927 	 */
1928 	if (ste) {
1929 #ifdef PMAPSTATS
1930 		remove_stats.ptinvalid++;
1931 #endif
1932 #ifdef DEBUG
1933 		if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
1934 			printf("remove: ste was %x@%x pte was %x@%x\n",
1935 			       *ste, ste, *(int *)&opte, pmap_pte(pmap, va));
1936 #endif
1937 #if defined(HP380)
1938 		if (mmutype == MMU_68040) {
1939 			int *este = &ste[NPTEPG/SG4_LEV3SIZE];
1940 
1941 			while (ste < este)
1942 				*ste++ = SG_NV;
1943 #ifdef DEBUG
1944 			ste -= NPTEPG/SG4_LEV3SIZE;
1945 #endif
1946 		} else
1947 #endif
1948 		*ste = SG_NV;
1949 		/*
1950 		 * If it was a user PT page, we decrement the
1951 		 * reference count on the segment table as well,
1952 		 * freeing it if it is now empty.
1953 		 */
1954 		if (ptpmap != kernel_pmap) {
1955 #ifdef DEBUG
1956 			if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
1957 				printf("remove: stab %x, refcnt %d\n",
1958 				       ptpmap->pm_stab, ptpmap->pm_sref - 1);
1959 			if ((pmapdebug & PDB_PARANOIA) &&
1960 			    ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
1961 				panic("remove: bogus ste");
1962 #endif
1963 			if (--(ptpmap->pm_sref) == 0) {
1964 #ifdef DEBUG
1965 				if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
1966 					printf("remove: free stab %x\n",
1967 					       ptpmap->pm_stab);
1968 #endif
1969 				kmem_free(kernel_map,
1970 					  (vm_offset_t)ptpmap->pm_stab,
1971 					  HP_STSIZE);
1972 				ptpmap->pm_stab = Segtabzero;
1973 				ptpmap->pm_stpa = Segtabzeropa;
1974 #if defined(HP380)
1975 				if (mmutype == MMU_68040)
1976 					ptpmap->pm_stfree = protostfree;
1977 #endif
1978 				ptpmap->pm_stchanged = TRUE;
1979 				/*
1980 				 * XXX may have changed segment table
1981 				 * pointer for current process so
1982 				 * update now to reload hardware.
1983 				 */
1984 				if (ptpmap == curproc->p_vmspace->vm_map.pmap)
1985 					PMAP_ACTIVATE(ptpmap,
1986 					    (struct pcb *)curproc->p_addr, 1);
1987 			}
1988 		}
1989 #if 0
1990 		/*
1991 		 * XXX this should be unnecessary as we have been
1992 		 * flushing individual mappings as we go.
1993 		 */
1994 		if (ptpmap == kernel_pmap)
1995 			TBIAS();
1996 		else
1997 			TBIAU();
1998 #endif
1999 		pv->pv_flags &= ~PV_PTPAGE;
2000 		ptpmap->pm_ptpages--;
2001 	}
2002 	/*
2003 	 * Update saved attributes for managed page
2004 	 */
2005 	pmap_attributes[pa_index(pa)] |= bits;
2006 	splx(s);
2007 }
2008 
2009 /* static */
2010 boolean_t
2011 pmap_testbit(pa, bit)
2012 	register vm_offset_t pa;
2013 	int bit;
2014 {
2015 	register pv_entry_t pv;
2016 	register int *pte;
2017 	int s;
2018 
2019 	if (pa < vm_first_phys || pa >= vm_last_phys)
2020 		return(FALSE);
2021 
2022 	pv = pa_to_pvh(pa);
2023 	s = splimp();
2024 	/*
2025 	 * Check saved info first
2026 	 */
2027 	if (pmap_attributes[pa_index(pa)] & bit) {
2028 		splx(s);
2029 		return(TRUE);
2030 	}
2031 #ifdef HAVEVAC
2032 	/*
2033 	 * Flush VAC to get correct state of any hardware maintained bits.
2034 	 */
2035 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
2036 		DCIS();
2037 #endif
2038 	/*
2039 	 * Not found, check current mappings returning
2040 	 * immediately if found.
2041 	 */
2042 	if (pv->pv_pmap != NULL) {
2043 		for (; pv; pv = pv->pv_next) {
2044 			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
2045 			if (*pte & bit) {
2046 				splx(s);
2047 				return(TRUE);
2048 			}
2049 		}
2050 	}
2051 	splx(s);
2052 	return(FALSE);
2053 }
2054 
2055 /* static */
2056 void
2057 pmap_changebit(pa, bit, setem)
2058 	register vm_offset_t pa;
2059 	int bit;
2060 	boolean_t setem;
2061 {
2062 	register pv_entry_t pv;
2063 	register int *pte, npte;
2064 	vm_offset_t va;
2065 	int s;
2066 	boolean_t firstpage = TRUE;
2067 #ifdef PMAPSTATS
2068 	struct chgstats *chgp;
2069 #endif
2070 
2071 #ifdef DEBUG
2072 	if (pmapdebug & PDB_BITS)
2073 		printf("pmap_changebit(%x, %x, %s)\n",
2074 		       pa, bit, setem ? "set" : "clear");
2075 #endif
2076 	if (pa < vm_first_phys || pa >= vm_last_phys)
2077 		return;
2078 
2079 #ifdef PMAPSTATS
2080 	chgp = &changebit_stats[(bit>>2)-1];
2081 	if (setem)
2082 		chgp->setcalls++;
2083 	else
2084 		chgp->clrcalls++;
2085 #endif
2086 	pv = pa_to_pvh(pa);
2087 	s = splimp();
2088 	/*
2089 	 * Clear saved attributes (modify, reference)
2090 	 */
2091 	if (!setem)
2092 		pmap_attributes[pa_index(pa)] &= ~bit;
2093 	/*
2094 	 * Loop over all current mappings setting/clearing as appropos
2095 	 * If setting RO do we need to clear the VAC?
2096 	 */
2097 	if (pv->pv_pmap != NULL) {
2098 #ifdef DEBUG
2099 		int toflush = 0;
2100 #endif
2101 		for (; pv; pv = pv->pv_next) {
2102 #ifdef DEBUG
2103 			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
2104 #endif
2105 			va = pv->pv_va;
2106 
2107 			/*
2108 			 * XXX don't write protect pager mappings
2109 			 */
2110 			if (bit == PG_RO) {
2111 				extern vm_offset_t pager_sva, pager_eva;
2112 
2113 				if (va >= pager_sva && va < pager_eva)
2114 					continue;
2115 			}
2116 
2117 			pte = (int *) pmap_pte(pv->pv_pmap, va);
2118 #ifdef HAVEVAC
2119 			/*
2120 			 * Flush VAC to ensure we get correct state of HW bits
2121 			 * so we don't clobber them.
2122 			 */
2123 			if (firstpage && pmap_aliasmask) {
2124 				firstpage = FALSE;
2125 				DCIS();
2126 			}
2127 #endif
2128 			if (setem)
2129 				npte = *pte | bit;
2130 			else
2131 				npte = *pte & ~bit;
2132 			if (*pte != npte) {
2133 #if defined(HP380)
2134 				/*
2135 				 * If we are changing caching status or
2136 				 * protection make sure the caches are
2137 				 * flushed (but only once).
2138 				 */
2139 				if (firstpage && mmutype == MMU_68040 &&
2140 				    (bit == PG_RO && setem ||
2141 				     (bit & PG_CMASK))) {
2142 					firstpage = FALSE;
2143 					DCFP(pa);
2144 					ICPP(pa);
2145 				}
2146 #endif
2147 				*pte = npte;
2148 				if (active_pmap(pv->pv_pmap))
2149 					TBIS(va);
2150 #ifdef PMAPSTATS
2151 				if (setem)
2152 					chgp->sethits++;
2153 				else
2154 					chgp->clrhits++;
2155 #endif
2156 			}
2157 #ifdef PMAPSTATS
2158 			else {
2159 				if (setem)
2160 					chgp->setmiss++;
2161 				else
2162 					chgp->clrmiss++;
2163 			}
2164 #endif
2165 		}
2166 #if defined(HAVEVAC) && defined(DEBUG)
2167 		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
2168 			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
2169 				DCIA();
2170 			else if (toflush == 2)
2171 				DCIS();
2172 			else
2173 				DCIU();
2174 		}
2175 #endif
2176 	}
2177 	splx(s);
2178 }
2179 
2180 /* static */
2181 void
2182 pmap_enter_ptpage(pmap, va)
2183 	register pmap_t pmap;
2184 	register vm_offset_t va;
2185 {
2186 	register vm_offset_t ptpa;
2187 	register pv_entry_t pv;
2188 	st_entry_t *ste;
2189 	int s;
2190 
2191 #ifdef DEBUG
2192 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
2193 		printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
2194 #endif
2195 #ifdef PMAPSTATS
2196 	enter_stats.ptpneeded++;
2197 #endif
2198 	/*
2199 	 * Allocate a segment table if necessary.  Note that it is allocated
2200 	 * from kernel_map and not pt_map.  This keeps user page tables
2201 	 * aligned on segment boundaries in the kernel address space.
2202 	 * The segment table is wired down.  It will be freed whenever the
2203 	 * reference count drops to zero.
2204 	 */
2205 	if (pmap->pm_stab == Segtabzero) {
2206 		pmap->pm_stab = (st_entry_t *)
2207 			kmem_alloc(kernel_map, HP_STSIZE);
2208 		pmap->pm_stpa = (st_entry_t *)
2209 			pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
2210 #if defined(HP380)
2211 		if (mmutype == MMU_68040) {
2212 #ifdef DEBUG
2213 			if (dowriteback && dokwriteback)
2214 #endif
2215 			pmap_changebit((vm_offset_t)pmap->pm_stab, PG_CCB, 0);
2216 			pmap->pm_stfree = protostfree;
2217 		}
2218 #endif
2219 		pmap->pm_stchanged = TRUE;
2220 		/*
2221 		 * XXX may have changed segment table pointer for current
2222 		 * process so update now to reload hardware.
2223 		 */
2224 		if (pmap == curproc->p_vmspace->vm_map.pmap)
2225 			PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
2226 #ifdef DEBUG
2227 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2228 			printf("enter: pmap %x stab %x(%x)\n",
2229 			       pmap, pmap->pm_stab, pmap->pm_stpa);
2230 #endif
2231 	}
2232 
2233 	ste = pmap_ste(pmap, va);
2234 #if defined(HP380)
2235 	/*
2236 	 * Allocate level 2 descriptor block if necessary
2237 	 */
2238 	if (mmutype == MMU_68040) {
2239 		if (!ste->sg_v) {
2240 			int ix;
2241 			caddr_t addr;
2242 
2243 			ix = bmtol2(pmap->pm_stfree);
2244 			if (ix == -1)
2245 				panic("enter: out of address space"); /* XXX */
2246 			pmap->pm_stfree &= ~l2tobm(ix);
2247 			addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
2248 			bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
2249 			addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
2250 			*(int *)ste = (u_int)addr | SG_RW | SG_U | SG_V;
2251 #ifdef DEBUG
2252 			if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2253 				printf("enter: alloc ste2 %d(%x)\n", ix, addr);
2254 #endif
2255 		}
2256 		ste = pmap_ste2(pmap, va);
2257 		/*
2258 		 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
2259 		 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
2260 		 * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
2261 		 * PT page--the unit of allocation.  We set `ste' to point
2262 		 * to the first entry of that chunk which is validated in its
2263 		 * entirety below.
2264 		 */
2265 		ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
2266 #ifdef DEBUG
2267 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2268 			printf("enter: ste2 %x (%x)\n",
2269 			       pmap_ste2(pmap, va), ste);
2270 #endif
2271 	}
2272 #endif
2273 	va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
2274 
2275 	/*
2276 	 * In the kernel we allocate a page from the kernel PT page
2277 	 * free list and map it into the kernel page table map (via
2278 	 * pmap_enter).
2279 	 */
2280 	if (pmap == kernel_pmap) {
2281 		register struct kpt_page *kpt;
2282 
2283 		s = splimp();
2284 		if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
2285 			/*
2286 			 * No PT pages available.
2287 			 * Try once to free up unused ones.
2288 			 */
2289 #ifdef DEBUG
2290 			if (pmapdebug & PDB_COLLECT)
2291 				printf("enter: no KPT pages, collecting...\n");
2292 #endif
2293 			pmap_collect(kernel_pmap);
2294 			if ((kpt = kpt_free_list) == (struct kpt_page *)0)
2295 				panic("pmap_enter_ptpage: can't get KPT page");
2296 		}
2297 #ifdef PMAPSTATS
2298 		if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
2299 			kpt_stats.kptmaxuse = kpt_stats.kptinuse;
2300 #endif
2301 		kpt_free_list = kpt->kpt_next;
2302 		kpt->kpt_next = kpt_used_list;
2303 		kpt_used_list = kpt;
2304 		ptpa = kpt->kpt_pa;
2305 		bzero((caddr_t)kpt->kpt_va, HP_PAGE_SIZE);
2306 		pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
2307 #ifdef DEBUG
2308 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
2309 			int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
2310 
2311 			printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
2312 			       ix, *(int *)&Sysptmap[ix], kpt->kpt_va);
2313 		}
2314 #endif
2315 		splx(s);
2316 	}
2317 	/*
2318 	 * For user processes we just simulate a fault on that location
2319 	 * letting the VM system allocate a zero-filled page.
2320 	 */
2321 	else {
2322 #ifdef DEBUG
2323 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2324 			printf("enter: about to fault UPT pg at %x\n", va);
2325 #endif
2326 		s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
2327 		if (s != KERN_SUCCESS) {
2328 			printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
2329 			panic("pmap_enter: vm_fault failed");
2330 		}
2331 		ptpa = pmap_extract(kernel_pmap, va);
2332 		/*
2333 		 * Mark the page clean now to avoid its pageout (and
2334 		 * hence creation of a pager) between now and when it
2335 		 * is wired; i.e. while it is on a paging queue.
2336 		 */
2337 		PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
2338 #ifdef DEBUG
2339 		PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
2340 #endif
2341 	}
2342 #if defined(HP380)
2343 	/*
2344 	 * Turn off copyback caching of page table pages,
2345 	 * could get ugly otherwise.
2346 	 */
2347 #ifdef DEBUG
2348 	if (dowriteback && dokwriteback)
2349 #endif
2350 	if (mmutype == MMU_68040) {
2351 		int *pte = (int *)pmap_pte(kernel_pmap, va);
2352 #ifdef DEBUG
2353 		if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
2354 			printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
2355 			       pmap == kernel_pmap ? "Kernel" : "User",
2356 			       va, ptpa, pte, *pte);
2357 #endif
2358 		pmap_changebit(ptpa, PG_CCB, 0);
2359 	}
2360 #endif
2361 	/*
2362 	 * Locate the PV entry in the kernel for this PT page and
2363 	 * record the STE address.  This is so that we can invalidate
2364 	 * the STE when we remove the mapping for the page.
2365 	 */
2366 	pv = pa_to_pvh(ptpa);
2367 	s = splimp();
2368 	if (pv) {
2369 		pv->pv_flags |= PV_PTPAGE;
2370 		do {
2371 			if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
2372 				break;
2373 		} while (pv = pv->pv_next);
2374 	}
2375 #ifdef DEBUG
2376 	if (pv == NULL)
2377 		panic("pmap_enter_ptpage: PT page not entered");
2378 #endif
2379 	pv->pv_ptste = ste;
2380 	pv->pv_ptpmap = pmap;
2381 #ifdef DEBUG
2382 	if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
2383 		printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
2384 #endif
2385 
2386 	/*
2387 	 * Map the new PT page into the segment table.
2388 	 * Also increment the reference count on the segment table if this
2389 	 * was a user page table page.  Note that we don't use vm_map_pageable
2390 	 * to keep the count like we do for PT pages, this is mostly because
2391 	 * it would be difficult to identify ST pages in pmap_pageable to
2392 	 * release them.  We also avoid the overhead of vm_map_pageable.
2393 	 */
2394 #if defined(HP380)
2395 	if (mmutype == MMU_68040) {
2396 		st_entry_t *este;
2397 
2398 		for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
2399 			*(int *)ste = ptpa | SG_U | SG_RW | SG_V;
2400 			ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
2401 		}
2402 	} else
2403 #endif
2404 	*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2405 	if (pmap != kernel_pmap) {
2406 		pmap->pm_sref++;
2407 #ifdef DEBUG
2408 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
2409 			printf("enter: stab %x refcnt %d\n",
2410 			       pmap->pm_stab, pmap->pm_sref);
2411 #endif
2412 	}
2413 #if 0
2414 	/*
2415 	 * Flush stale TLB info.
2416 	 */
2417 	if (pmap == kernel_pmap)
2418 		TBIAS();
2419 	else
2420 		TBIAU();
2421 #endif
2422 	pmap->pm_ptpages++;
2423 	splx(s);
2424 }
2425 
2426 #ifdef DEBUG
2427 /* static */
2428 void
2429 pmap_pvdump(pa)
2430 	vm_offset_t pa;
2431 {
2432 	register pv_entry_t pv;
2433 
2434 	printf("pa %x", pa);
2435 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
2436 		printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
2437 		       pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
2438 		       pv->pv_flags);
2439 	printf("\n");
2440 }
2441 
2442 /* static */
2443 void
2444 pmap_check_wiring(str, va)
2445 	char *str;
2446 	vm_offset_t va;
2447 {
2448 	vm_map_entry_t entry;
2449 	register int count, *pte;
2450 
2451 	va = trunc_page(va);
2452 	if (!pmap_ste_v(kernel_pmap, va) ||
2453 	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
2454 		return;
2455 
2456 	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
2457 		printf("wired_check: entry for %x not found\n", va);
2458 		return;
2459 	}
2460 	count = 0;
2461 	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
2462 		if (*pte)
2463 			count++;
2464 	if (entry->wired_count != count)
2465 		printf("*%s*: %x: w%d/a%d\n",
2466 		       str, va, entry->wired_count, count);
2467 }
2468 #endif
2469