xref: /netbsd/sys/arch/hppa/hppa/pmap.c (revision 6550d01e)
1 /*	$NetBSD: pmap.c,v 1.80 2011/01/14 02:06:26 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*	$OpenBSD: pmap.c,v 1.132 2008/04/18 06:42:21 djm Exp $	*/
33 
34 /*
35  * Copyright (c) 1998-2004 Michael Shalayeff
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57  * THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 /*
60  * References:
61  * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
62  * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
63  * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
64  *    Hewlett-Packard, February 1994, Third Edition
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.80 2011/01/14 02:06:26 rmind Exp $");
69 
70 #include "opt_cputype.h"
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/proc.h>
76 #include <sys/mutex.h>
77 
78 #include <uvm/uvm.h>
79 
80 #include <machine/reg.h>
81 #include <machine/psl.h>
82 #include <machine/cpu.h>
83 #include <machine/pcb.h>
84 #include <machine/pmap.h>
85 #include <machine/pte.h>
86 #include <machine/cpufunc.h>
87 #include <machine/iomod.h>
88 
89 #include <hppa/hppa/hpt.h>
90 #include <hppa/hppa/machdep.h>
91 
92 #if defined(DDB)
93 #include <ddb/db_output.h>
94 #endif
95 
96 #ifdef PMAPDEBUG
97 
98 #define	static	/**/
99 #define	inline	/**/
100 
101 #define	DPRINTF(l,s)	do {		\
102 	if ((pmapdebug & (l)) == (l))	\
103 		printf s;		\
104 } while(0)
105 
106 #define	PDB_FOLLOW	0x00000001
107 #define	PDB_INIT	0x00000002
108 #define	PDB_ENTER	0x00000004
109 #define	PDB_REMOVE	0x00000008
110 #define	PDB_CREATE	0x00000010
111 #define	PDB_PTPAGE	0x00000020
112 #define	PDB_CACHE	0x00000040
113 #define	PDB_BITS	0x00000080
114 #define	PDB_COLLECT	0x00000100
115 #define	PDB_PROTECT	0x00000200
116 #define	PDB_EXTRACT	0x00000400
117 #define	PDB_VP		0x00000800
118 #define	PDB_PV		0x00001000
119 #define	PDB_PARANOIA	0x00002000
120 #define	PDB_WIRING	0x00004000
121 #define	PDB_PMAP	0x00008000
122 #define	PDB_STEAL	0x00010000
123 #define	PDB_PHYS	0x00020000
124 #define	PDB_POOL	0x00040000
125 #define	PDB_ALIAS	0x00080000
126 int pmapdebug = 0
127 	| PDB_INIT
128 	| PDB_FOLLOW
129 	| PDB_VP
130 	| PDB_PV
131 	| PDB_ENTER
132 	| PDB_REMOVE
133 	| PDB_STEAL
134 	| PDB_PROTECT
135 	| PDB_PHYS
136 	| PDB_ALIAS
137 	;
138 #else
139 #define	DPRINTF(l,s)	/* */
140 #endif
141 
142 int		pmap_hptsize = 16 * PAGE_SIZE;	/* patchable */
143 vaddr_t		pmap_hpt;
144 
145 static struct pmap	kernel_pmap_store;
146 struct pmap		*const kernel_pmap_ptr = &kernel_pmap_store;
147 
148 int		hppa_sid_max = HPPA_SID_MAX;
149 struct pool	pmap_pool;
150 struct pool	pmap_pv_pool;
151 int		pmap_pvlowat = 252;
152 bool		pmap_initialized = false;
153 
154 static kmutex_t	pmaps_lock;
155 
156 u_int	hppa_prot[8];
157 u_int	sid_counter;
158 
159 /*
160  * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set
161  * Reference Manual" (HP part number 09740-90039) defines equivalent
162  * and non-equivalent virtual addresses in the cache.
163  *
164  * This macro evaluates to true iff the two space/virtual address
165  * combinations are non-equivalent aliases, and therefore will find
166  * two different locations in the cache.
167  *
168  * NB: currently, the CPU-specific desidhash() functions disable the
169  * use of the space in all cache hashing functions.  This means that
170  * this macro definition is stricter than it has to be (because it
171  * takes space into account), but one day cache space hashing should
172  * be re-enabled.  Cache space hashing should yield better performance
173  * through better utilization of the cache, assuming that most aliasing
174  * is the read-only kind, which we do allow in the cache.
175  */
176 #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \
177   (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \
178    ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0))
179 
180 /* Prototypes. */
181 struct vm_page *pmap_pagealloc(struct uvm_object *, voff_t);
182 void pmap_pagefree(struct vm_page *);
183 
184 static inline void pmap_sdir_set(pa_space_t, volatile uint32_t *);
185 static inline uint32_t *pmap_sdir_get(pa_space_t);
186 
187 static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t);
188 static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t);
189 static inline pt_entry_t *pmap_pde_alloc(pmap_t, vaddr_t, struct vm_page **);
190 static inline struct vm_page *pmap_pde_ptp(pmap_t, volatile pt_entry_t *);
191 static inline void pmap_pde_release(pmap_t, vaddr_t, struct vm_page *);
192 
193 static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t);
194 static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t);
195 
196 void pmap_pte_flush(pmap_t, vaddr_t, pt_entry_t);
197 
198 static inline pt_entry_t pmap_pte_get(volatile pt_entry_t *, vaddr_t);
199 static inline void pmap_pte_set(volatile pt_entry_t *, vaddr_t, pt_entry_t);
200 
201 static inline pt_entry_t pmap_vp_find(pmap_t, vaddr_t);
202 
203 static inline struct pv_entry *pmap_pv_alloc(void);
204 static inline void pmap_pv_free(struct pv_entry *);
205 static inline void pmap_pv_enter(struct vm_page *, struct pv_entry *, pmap_t,
206     vaddr_t , struct vm_page *, u_int);
207 static inline struct pv_entry *pmap_pv_remove(struct vm_page *, pmap_t,
208     vaddr_t);
209 
210 static inline void pmap_flush_page(struct vm_page *, bool);
211 
212 void pmap_copy_page(paddr_t, paddr_t);
213 
214 static void pmap_page_physload(paddr_t, paddr_t);
215 
216 #ifdef USE_HPT
217 static inline struct hpt_entry *pmap_hash(pmap_t, vaddr_t);
218 static inline uint32_t pmap_vtag(pmap_t, vaddr_t);
219 
220 #ifdef DDB
221 void pmap_hptdump(void);
222 #endif
223 #endif
224 
225 #ifdef DDB
226 void pmap_dump_table(pa_space_t, vaddr_t);
227 void pmap_dump_pv(paddr_t);
228 #endif
229 
230 void pmap_page_remove_locked(struct vm_page *);
231 int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t);
232 
233 /* un-invert PVF_REF */
234 #define pmap_pvh_attrs(a) \
235 	(((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF)
236 
237 #define PMAP_LOCK(pm)					\
238 	do {						\
239 		if ((pm) != pmap_kernel())		\
240 			mutex_enter(&(pm)->pm_lock);	\
241 	} while (/*CONSTCOND*/0)
242 
243 #define PMAP_UNLOCK(pm)					\
244 	do {						\
245 		if ((pm) != pmap_kernel())		\
246 			mutex_exit(&(pm)->pm_lock);	\
247 	} while (/*CONSTCOND*/0)
248 
249 struct vm_page *
250 pmap_pagealloc(struct uvm_object *obj, voff_t off)
251 {
252 	struct vm_page *pg;
253 
254 	if ((pg = uvm_pagealloc(obj, off, NULL,
255 	    UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL)
256 		printf("pmap_pagealloc fail\n");
257 
258 	return (pg);
259 }
260 
261 void
262 pmap_pagefree(struct vm_page *pg)
263 {
264 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
265 	pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
266 
267 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
268     defined(HP8500_CPU) || defined(HP8600_CPU)
269 	pdtlb(HPPA_SID_KERNEL, pa);
270 	pitlb(HPPA_SID_KERNEL, pa);
271 #endif
272 	uvm_pagefree(pg);
273 }
274 
275 #ifdef USE_HPT
276 /*
277  * This hash function is the one used by the hardware TLB walker on the 7100LC.
278  */
279 static inline struct hpt_entry *
280 pmap_hash(pmap_t pmap, vaddr_t va)
281 {
282 
283 	return (struct hpt_entry *)(pmap_hpt +
284 	    (((va >> 8) ^ (pmap->pm_space << 9)) & (pmap_hptsize - 1)));
285 }
286 
287 static inline uint32_t
288 pmap_vtag(pmap_t pmap, vaddr_t va)
289 {
290 
291 	return (0x80000000 | (pmap->pm_space & 0xffff) |
292 	    ((va >> 1) & 0x7fff0000));
293 }
294 #endif
295 
296 static inline void
297 pmap_sdir_set(pa_space_t space, volatile uint32_t *pd)
298 {
299 	volatile uint32_t *vtop;
300 
301 	mfctl(CR_VTOP, vtop);
302 
303 	KASSERT(vtop != NULL);
304 
305 	vtop[space] = (uint32_t)pd;
306 }
307 
308 static inline uint32_t *
309 pmap_sdir_get(pa_space_t space)
310 {
311 	uint32_t *vtop;
312 
313 	mfctl(CR_VTOP, vtop);
314 	return ((uint32_t *)vtop[space]);
315 }
316 
317 static inline volatile pt_entry_t *
318 pmap_pde_get(volatile uint32_t *pd, vaddr_t va)
319 {
320 
321 	return ((pt_entry_t *)pd[va >> 22]);
322 }
323 
324 static inline void
325 pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp)
326 {
327 
328 	DPRINTF(PDB_FOLLOW|PDB_VP,
329 	    ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pm, va, ptp));
330 
331 	KASSERT((ptp & PGOFSET) == 0);
332 
333 	pm->pm_pdir[va >> 22] = ptp;
334 }
335 
336 static inline pt_entry_t *
337 pmap_pde_alloc(pmap_t pm, vaddr_t va, struct vm_page **pdep)
338 {
339 	struct vm_page *pg;
340 	paddr_t pa;
341 
342 	DPRINTF(PDB_FOLLOW|PDB_VP,
343 	    ("%s(%p, 0x%lx, %p)\n", __func__, pm, va, pdep));
344 
345 	KASSERT(pm != pmap_kernel());
346 	KASSERT(mutex_owned(&pm->pm_lock));
347 
348 	pg = pmap_pagealloc(&pm->pm_obj, va);
349 
350 	if (pg == NULL)
351 		return NULL;
352 
353 	pa = VM_PAGE_TO_PHYS(pg);
354 
355 	DPRINTF(PDB_FOLLOW|PDB_VP, ("%s: pde %lx\n", __func__, pa));
356 
357 	pg->flags &= ~PG_BUSY;		/* never busy */
358 	pg->wire_count = 1;		/* no mappings yet */
359 	pmap_pde_set(pm, va, pa);
360 	pm->pm_stats.resident_count++;	/* count PTP as resident */
361 	pm->pm_ptphint = pg;
362 	if (pdep)
363 		*pdep = pg;
364 	return ((pt_entry_t *)pa);
365 }
366 
367 static inline struct vm_page *
368 pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde)
369 {
370 	paddr_t pa = (paddr_t)pde;
371 
372 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p)\n", __func__, pm, pde));
373 
374 	if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
375 		return (pm->pm_ptphint);
376 
377 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: lookup 0x%lx\n", __func__, pa));
378 
379 	return (PHYS_TO_VM_PAGE(pa));
380 }
381 
382 static inline void
383 pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp)
384 {
385 
386 	DPRINTF(PDB_FOLLOW|PDB_PV,
387 	    ("%s(%p, 0x%lx, %p)\n", __func__, pmap, va, ptp));
388 
389 	KASSERT(pmap != pmap_kernel());
390 	if (--ptp->wire_count <= 1) {
391 		DPRINTF(PDB_FOLLOW|PDB_PV,
392 		    ("%s: disposing ptp %p\n", __func__, ptp));
393 		pmap_pde_set(pmap, va, 0);
394 		pmap->pm_stats.resident_count--;
395 		if (pmap->pm_ptphint == ptp)
396 			pmap->pm_ptphint = TAILQ_FIRST(&pmap->pm_obj.memq);
397 		ptp->wire_count = 0;
398 
399 		KASSERT((ptp->flags & PG_BUSY) == 0);
400 
401 		pmap_pagefree(ptp);
402 	}
403 }
404 
405 static inline pt_entry_t
406 pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va)
407 {
408 
409 	return (pde[(va >> 12) & 0x3ff]);
410 }
411 
412 static inline void
413 pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
414 {
415 
416 	DPRINTF(PDB_FOLLOW|PDB_VP, ("%s(%p, 0x%lx, 0x%x)\n",
417 	    __func__, pde, va, pte));
418 
419 	KASSERT(pde != NULL);
420 	KASSERT(((paddr_t)pde & PGOFSET) == 0);
421 
422 	pde[(va >> 12) & 0x3ff] = pte;
423 }
424 
425 void
426 pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte)
427 {
428 
429 	fdcache(pmap->pm_space, va, PAGE_SIZE);
430 	if (pte & PTE_PROT(TLB_EXECUTE)) {
431 		ficache(pmap->pm_space, va, PAGE_SIZE);
432 		pitlb(pmap->pm_space, va);
433 	}
434 	pdtlb(pmap->pm_space, va);
435 #ifdef USE_HPT
436 	if (pmap_hpt) {
437 		struct hpt_entry *hpt;
438 		hpt = pmap_hash(pmap, va);
439 		if (hpt->hpt_valid &&
440 		    hpt->hpt_space == pmap->pm_space &&
441 		    hpt->hpt_vpn == ((va >> 1) & 0x7fff0000))
442 			hpt->hpt_space = 0xffff;
443 	}
444 #endif
445 }
446 
447 static inline pt_entry_t
448 pmap_vp_find(pmap_t pm, vaddr_t va)
449 {
450 	volatile pt_entry_t *pde;
451 
452 	if (!(pde = pmap_pde_get(pm->pm_pdir, va)))
453 		return (0);
454 
455 	return (pmap_pte_get(pde, va));
456 }
457 
458 #ifdef DDB
459 void
460 pmap_dump_table(pa_space_t space, vaddr_t sva)
461 {
462 	char buf[64];
463 	volatile pt_entry_t *pde = NULL;
464 	vaddr_t va = sva;
465 	vaddr_t pdemask = 1;
466 	pt_entry_t pte;
467 	uint32_t *pd;
468 
469 	if (space > hppa_sid_max)
470 		return;
471 
472 	pd = pmap_sdir_get(space);
473 	if (!pd)
474 		return;
475 
476 	do {
477 		if (pdemask != (va & PDE_MASK)) {
478 			pdemask = va & PDE_MASK;
479 			pde = pmap_pde_get(pd, va);
480 			if (!pde) {
481 				va = pdemask + PDE_SIZE;
482 				continue;
483 			}
484 			db_printf("%x:%8p:\n", space, pde);
485 		}
486 
487 		pte = pmap_pte_get(pde, va);
488 		if (pte) {
489 			snprintb(buf, sizeof(buf), TLB_BITS,
490 			   TLB_PROT(pte & PAGE_MASK));
491 			db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK,
492 			    buf);
493 		}
494 		va += PAGE_SIZE;
495 	} while (va != 0);
496 }
497 
498 void
499 pmap_dump_pv(paddr_t pa)
500 {
501 	struct vm_page *pg;
502 	struct vm_page_md *md;
503 	struct pv_entry *pve;
504 
505 	pg = PHYS_TO_VM_PAGE(pa);
506 	md = VM_PAGE_TO_MD(pg);
507 	mutex_enter(&md->pvh_lock);
508 	db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs,
509 	    md->pvh_aliases);
510 	for (pve = md->pvh_list; pve; pve = pve->pv_next)
511 		db_printf("%x:%lx\n", pve->pv_pmap->pm_space,
512 		    pve->pv_va & PV_VAMASK);
513 	mutex_exit(&md->pvh_lock);
514 }
515 #endif
516 
517 int
518 pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
519 {
520 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
521 	struct pv_entry *pve;
522 	int ret = 0;
523 
524 	/* check for non-equ aliased mappings */
525 	for (pve = md->pvh_list; pve; pve = pve->pv_next) {
526 		vaddr_t pva = pve->pv_va & PV_VAMASK;
527 
528 		pte |= pmap_vp_find(pve->pv_pmap, pva);
529 		if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) &&
530 		    (pte & PTE_PROT(TLB_WRITE))) {
531 
532 			DPRINTF(PDB_FOLLOW|PDB_ALIAS,
533                             ("%s: aliased writable mapping 0x%x:0x%lx\n",
534                             __func__, pve->pv_pmap->pm_space, pve->pv_va));
535 			ret++;
536 		}
537 	}
538 
539         return (ret);
540 }
541 
542 /*
543  * This allocates and returns a new struct pv_entry.
544  */
545 static inline struct pv_entry *
546 pmap_pv_alloc(void)
547 {
548 	struct pv_entry *pv;
549 
550 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s()\n", __func__));
551 
552 	pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
553 
554 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: %p\n", __func__, pv));
555 
556 	return (pv);
557 }
558 
559 static inline void
560 pmap_pv_free(struct pv_entry *pv)
561 {
562 
563 	if (pv->pv_ptp)
564 		pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK,
565 		    pv->pv_ptp);
566 
567 	pool_put(&pmap_pv_pool, pv);
568 }
569 
570 static inline void
571 pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
572     vaddr_t va, struct vm_page *pdep, u_int flags)
573 {
574 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
575 
576 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
577 	    __func__, pg, pve, pm, va, pdep, flags));
578 
579 	KASSERT(mutex_owned(&md->pvh_lock));
580 
581 	pve->pv_pmap = pm;
582 	pve->pv_va = va | flags;
583 	pve->pv_ptp = pdep;
584 	pve->pv_next = md->pvh_list;
585 	md->pvh_list = pve;
586 }
587 
588 static inline struct pv_entry *
589 pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
590 {
591 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
592 	struct pv_entry **pve, *pv;
593 
594 	KASSERT(mutex_owned(&md->pvh_lock));
595 
596 	for (pv = *(pve = &md->pvh_list);
597 	    pv; pv = *(pve = &(*pve)->pv_next))
598 		if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) {
599 			*pve = pv->pv_next;
600 			break;
601 		}
602 	return (pv);
603 }
604 
605 #define	FIRST_16M atop(16 * 1024 * 1024)
606 
607 static void
608 pmap_page_physload(paddr_t spa, paddr_t epa)
609 {
610 
611 	if (spa < FIRST_16M && epa <= FIRST_16M) {
612 		DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
613 		    __func__, spa, epa));
614 
615 		uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA);
616 	} else if (spa < FIRST_16M && epa > FIRST_16M) {
617 		DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
618 		    __func__, spa, FIRST_16M));
619 
620 		uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M,
621 		    VM_FREELIST_ISADMA);
622 
623 		DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
624 		    __func__, FIRST_16M, epa));
625 
626 		uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa,
627 		    VM_FREELIST_DEFAULT);
628 	} else {
629 		DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
630 		    __func__, spa, epa));
631 
632 		uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT);
633 	}
634 
635 	availphysmem += epa - spa;
636 }
637 
638 /*
639  * Bootstrap the system enough to run with virtual memory.
640  * Map the kernel's code, data and bss, and allocate the system page table.
641  * Called with mapping OFF.
642  *
643  * Parameters:
644  * vstart	PA of first available physical page
645  */
646 void
647 pmap_bootstrap(vaddr_t vstart)
648 {
649 	vaddr_t va, addr;
650 	vsize_t size;
651 	extern paddr_t hppa_vtop;
652 	pmap_t kpm;
653 	int npdes, nkpdes;
654 	extern int resvphysmem;
655 	vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got;
656 	paddr_t ksrx, kerx, ksro, kero, ksrw, kerw;
657 	extern int usebtlb;
658 
659 	/* Provided by the linker script */
660 	extern int kernel_text, etext;
661 	extern int __rodata_start, __rodata_end;
662 	extern int __data_start;
663 
664 	DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(0x%lx)\n", __func__, vstart));
665 
666 	uvm_setpagesize();
667 
668 	hppa_prot[UVM_PROT_NONE]  = TLB_AR_NA;
669 	hppa_prot[UVM_PROT_READ]  = TLB_AR_R;
670 	hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW;
671 	hppa_prot[UVM_PROT_RW]    = TLB_AR_RW;
672 	hppa_prot[UVM_PROT_EXEC]  = TLB_AR_RX;
673 	hppa_prot[UVM_PROT_RX]    = TLB_AR_RX;
674 	hppa_prot[UVM_PROT_WX]    = TLB_AR_RWX;
675 	hppa_prot[UVM_PROT_RWX]   = TLB_AR_RWX;
676 
677 	/*
678 	 * Initialize kernel pmap
679 	 */
680 	addr = round_page(vstart);
681 	kpm = pmap_kernel();
682 	memset(kpm, 0, sizeof(*kpm));
683 
684 	UVM_OBJ_INIT(&kpm->pm_obj, NULL, 1);
685 	kpm->pm_space = HPPA_SID_KERNEL;
686 	kpm->pm_pid = HPPA_PID_KERNEL;
687 	kpm->pm_pdir_pg = NULL;
688 	kpm->pm_pdir = (uint32_t *)addr;
689 
690 	memset((void *)addr, 0, PAGE_SIZE);
691 	fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
692 	addr += PAGE_SIZE;
693 
694 	/*
695 	 * Allocate various tables and structures.
696 	 */
697 	mtctl(addr, CR_VTOP);
698 	hppa_vtop = addr;
699 	size = round_page((hppa_sid_max + 1) * 4);
700 	memset((void *)addr, 0, size);
701 	fdcache(HPPA_SID_KERNEL, addr, size);
702 	DPRINTF(PDB_INIT, ("%s: vtop 0x%lx @ 0x%lx\n", __func__, size,
703 	    addr));
704 
705 	addr += size;
706 	pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir);
707 
708 	/*
709 	 * cpuid() found out how big the HPT should be, so align addr to
710 	 * what will be its beginning.  We don't waste the pages skipped
711 	 * for the alignment.
712 	 */
713 #ifdef USE_HPT
714 	if (pmap_hptsize) {
715 		struct hpt_entry *hptp;
716 		int i, error;
717 
718 		if (addr & (pmap_hptsize - 1))
719 			addr += pmap_hptsize;
720 		addr &= ~(pmap_hptsize - 1);
721 
722 		memset((void *)addr, 0, pmap_hptsize);
723 		hptp = (struct hpt_entry *)addr;
724 		for (i = pmap_hptsize / sizeof(struct hpt_entry); i--; ) {
725 			hptp[i].hpt_valid = 0;
726 			hptp[i].hpt_space = 0xffff;
727 			hptp[i].hpt_vpn = 0;
728 		}
729 		pmap_hpt = addr;
730 		addr += pmap_hptsize;
731 
732 		DPRINTF(PDB_INIT, ("%s: hpt_table 0x%x @ 0x%lx\n", __func__,
733 		    pmap_hptsize, addr));
734 
735 		if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) {
736 			printf("WARNING: HPT init error %d -- DISABLED\n",
737 			    error);
738 			pmap_hpt = 0;
739 		} else
740 			DPRINTF(PDB_INIT,
741 			    ("%s: HPT installed for %ld entries @ 0x%lx\n",
742 			    __func__, pmap_hptsize / sizeof(struct hpt_entry),
743 			    addr));
744 	}
745 #endif
746 
747 	/* Setup vtop in lwp0 trapframe. */
748 	lwp0.l_md.md_regs->tf_vtop = hppa_vtop;
749 
750 	/* Pre-allocate PDEs for kernel virtual */
751 	nkpdes = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PDE_SIZE;
752 	/* ... and io space too */
753 	nkpdes += HPPA_IOLEN / PDE_SIZE;
754 	/* ... and all physmem (VA == PA) */
755 	npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE);
756 
757 	DPRINTF(PDB_INIT, ("%s: npdes %d\n", __func__, npdes));
758 
759 	/* map the pdes */
760 	for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) {
761 		/* last nkpdes are for the kernel virtual */
762 		if (npdes == nkpdes - 1)
763 			va = SYSCALLGATE;
764 		if (npdes == HPPA_IOLEN / PDE_SIZE - 1)
765 			va = HPPA_IOBEGIN;
766 		/* now map the pde for the physmem */
767 		memset((void *)addr, 0, PAGE_SIZE);
768 		DPRINTF(PDB_INIT|PDB_VP,
769 		    ("%s: pde premap 0x%08lx 0x%08lx\n", __func__, va,
770 		    addr));
771 		pmap_pde_set(kpm, va, addr);
772 		kpm->pm_stats.resident_count++; /* count PTP as resident */
773 	}
774 
775 	/*
776 	 * At this point we've finished reserving memory for the kernel.
777 	 */
778 	/* XXXNH */
779 	resvphysmem = atop(addr);
780 
781 	ksrx = (paddr_t) &kernel_text;
782 	kerx = (paddr_t) &etext;
783 	ksro = (paddr_t) &__rodata_start;
784 	kero = (paddr_t) &__rodata_end;
785 	ksrw = (paddr_t) &__data_start;
786 	kerw = addr;
787 
788 	/*
789 	 * The kernel text, data, and bss must be direct-mapped,
790 	 * because the kernel often runs in physical mode, and
791 	 * anyways the loader loaded the kernel into physical
792 	 * memory exactly where it was linked.
793 	 *
794 	 * All memory already allocated after bss, either by
795 	 * our caller or by this function itself, must also be
796 	 * direct-mapped, because it's completely unmanaged
797 	 * and was allocated in physical mode.
798 	 *
799 	 * BTLB entries are used to do this direct mapping.
800 	 * BTLB entries have a minimum and maximum possible size,
801 	 * and MD code gives us these sizes in units of pages.
802 	 */
803 
804 	btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE;
805 	btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE;
806 
807 	/*
808 	 * To try to conserve BTLB entries, take a hint from how
809 	 * the kernel was linked: take the kernel text start as
810 	 * our effective minimum BTLB entry size, assuming that
811 	 * the data segment was also aligned to that size.
812 	 *
813 	 * In practice, linking the kernel at 2MB, and aligning
814 	 * the data segment to a 2MB boundary, should control well
815 	 * how much of the BTLB the pmap uses.  However, this code
816 	 * should not rely on this 2MB magic number, nor should
817 	 * it rely on the data segment being aligned at all.  This
818 	 * is to allow (smaller) kernels (linked lower) to work fine.
819 	 */
820 	btlb_entry_min = (vaddr_t) &kernel_text;
821 
822 	if (usebtlb) {
823 #define BTLB_SET_SIZE 16
824 		vaddr_t btlb_entry_start[BTLB_SET_SIZE];
825 		vsize_t btlb_entry_size[BTLB_SET_SIZE];
826 		int btlb_entry_vm_prot[BTLB_SET_SIZE];
827 		int btlb_i;
828 		int btlb_j;
829 
830 		/*
831 		 * Now make BTLB entries to direct-map the kernel text
832 		 * read- and execute-only as much as possible.  Note that
833 		 * if the data segment isn't nicely aligned, the last
834 		 * BTLB entry for the kernel text may also cover some of
835 		 * the data segment, meaning it will have to allow writing.
836 		 */
837 		addr = ksrx;
838 
839 		DPRINTF(PDB_INIT,
840 		    ("%s: BTLB mapping text and rodata @ %p - %p\n", __func__,
841 		    (void *)addr, (void *)kero));
842 
843 		btlb_j = 0;
844 		while (addr < (vaddr_t) kero) {
845 
846 			/* Set up the next BTLB entry. */
847 			KASSERT(btlb_j < BTLB_SET_SIZE);
848 			btlb_entry_start[btlb_j] = addr;
849 			btlb_entry_size[btlb_j] = btlb_entry_min;
850 			btlb_entry_vm_prot[btlb_j] =
851 			    VM_PROT_READ | VM_PROT_EXECUTE;
852 			if (addr + btlb_entry_min > kero)
853 				btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE;
854 
855 			/* Coalesce BTLB entries whenever possible. */
856 			while (btlb_j > 0 &&
857 			    btlb_entry_vm_prot[btlb_j] ==
858 				btlb_entry_vm_prot[btlb_j - 1] &&
859 			    btlb_entry_size[btlb_j] ==
860 				btlb_entry_size[btlb_j - 1] &&
861 			    !(btlb_entry_start[btlb_j - 1] &
862 				((btlb_entry_size[btlb_j - 1] << 1) - 1)) &&
863 			    (btlb_entry_size[btlb_j - 1] << 1) <=
864 				btlb_entry_max)
865 				btlb_entry_size[--btlb_j] <<= 1;
866 
867 			/* Move on. */
868 			addr =
869 			    btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
870 			btlb_j++;
871 		}
872 
873 		/*
874 		 * Now make BTLB entries to direct-map the kernel data,
875 		 * bss, and all of the preallocated space read-write.
876 		 *
877 		 * Note that, unlike above, we're not concerned with
878 		 * making these BTLB entries such that they finish as
879 		 * close as possible to the end of the space we need
880 		 * them to map.  Instead, to minimize the number of BTLB
881 		 * entries we need, we make them as large as possible.
882 		 * The only thing this wastes is kernel virtual space,
883 		 * which is plentiful.
884 		 */
885 
886 		DPRINTF(PDB_INIT, ("%s: mapping data, bss, etc @ %p - %p\n",
887 		    __func__, (void *)addr, (void *)kerw));
888 
889 		while (addr < kerw) {
890 
891 			/* Make the next BTLB entry. */
892 			KASSERT(btlb_j < BTLB_SET_SIZE);
893 			size = btlb_entry_min;
894 			while ((addr + size) < kerw &&
895 				(size << 1) < btlb_entry_max &&
896 			    !(addr & ((size << 1) - 1)))
897 				size <<= 1;
898 			btlb_entry_start[btlb_j] = addr;
899 			btlb_entry_size[btlb_j] = size;
900 			btlb_entry_vm_prot[btlb_j] =
901 			    VM_PROT_READ | VM_PROT_WRITE;
902 
903 			/* Move on. */
904 			addr =
905 			    btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
906 			btlb_j++;
907 		}
908 
909 		/* Now insert all of the BTLB entries. */
910 		for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) {
911 			int error;
912 			int prot;
913 
914 			btlb_entry_got = btlb_entry_size[btlb_i];
915 			prot = btlb_entry_vm_prot[btlb_i];
916 
917 			error = hppa_btlb_insert(kpm->pm_space,
918 			    btlb_entry_start[btlb_i], btlb_entry_start[btlb_i],
919 			    &btlb_entry_got,
920 			    kpm->pm_pid | pmap_prot(kpm, prot));
921 
922 			if (error)
923 				panic("%s: cannot insert BTLB entry",
924 				    __func__);
925 			if (btlb_entry_got != btlb_entry_size[btlb_i])
926 				panic("%s: BTLB entry mapped wrong amount",
927 				    __func__);
928 		}
929 
930 		kerw =
931 		    btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1];
932 	}
933 
934 	/*
935 	 * We now know the exact beginning of managed kernel virtual space.
936 	 *
937 	 * Finally, load physical pages into UVM.  There are three segments of
938 	 * pages.
939 	 */
940 
941 	availphysmem = 0;
942 
943 	pmap_page_physload(resvmem, atop(ksrx));
944 	pmap_page_physload(atop(kero), atop(ksrw));
945 	pmap_page_physload(atop(kerw), physmem);
946 
947 	mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE);
948 
949 	/* TODO optimize/inline the kenter */
950 	for (va = PAGE_SIZE; va < ptoa(physmem); va += PAGE_SIZE) {
951 		vm_prot_t prot = UVM_PROT_RW;
952 
953 		if (va < resvmem)
954 			prot = UVM_PROT_RX;
955 		else if (va >= ksrx && va < kerx)
956 			prot = UVM_PROT_RX;
957 		else if (va >= ksro && va < kero)
958 			prot = UVM_PROT_R;
959 #ifdef DIAGNOSTIC
960 		else if (va == uvm_lwp_getuarea(&lwp0) + USPACE - PAGE_SIZE)
961 			prot = UVM_PROT_NONE;
962 #endif
963 		pmap_kenter_pa(va, va, prot, 0);
964 	}
965 
966 	/* XXXNH update */
967 	DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksro,
968 	    kero));
969 	DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksrw,
970 	    kerw));
971 
972 }
973 
974 /*
975  * Finishes the initialization of the pmap module.
976  * This procedure is called from uvm_init() in uvm/uvm_init.c
977  * to initialize any remaining data structures that the pmap module
978  * needs to map virtual memory (VM is already ON).
979  */
980 void
981 pmap_init(void)
982 {
983 	extern void gateway_page(void);
984 	volatile pt_entry_t *pde;
985 
986 	DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s()\n", __func__));
987 
988 	sid_counter = HPPA_SID_KERNEL;
989 
990 	pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
991 	    &pool_allocator_nointr, IPL_NONE);
992 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv",
993 	    &pool_allocator_nointr, IPL_NONE);
994 
995 	pool_setlowat(&pmap_pv_pool, pmap_pvlowat);
996 	pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32);
997 
998 	/*
999 	 * map SysCall gateway page once for everybody
1000 	 * NB: we'll have to remap the phys memory
1001 	 *     if we have any at SYSCALLGATE address (;
1002 	 *
1003 	 * no spls since no interrupts
1004 	 */
1005 	if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) &&
1006 	    !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL)))
1007 		panic("pmap_init: cannot allocate pde");
1008 
1009 	pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
1010 	    PTE_PROT(TLB_GATE_PROT));
1011 
1012 	pmap_initialized = true;
1013 
1014 	DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(): done\n", __func__));
1015 }
1016 
1017 /*
1018  * How much virtual space does this kernel have?
1019  */
1020 void
1021 pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
1022 {
1023 
1024 	*startp = SYSCALLGATE + PAGE_SIZE;
1025 	*endp = VM_MAX_KERNEL_ADDRESS;
1026 }
1027 
1028 /*
1029  * pmap_create()
1030  *
1031  * Create and return a physical map.
1032  * The map is an actual physical map, and may be referenced by the hardware.
1033  */
1034 pmap_t
1035 pmap_create(void)
1036 {
1037 	pmap_t pmap;
1038 	pa_space_t space;
1039 
1040 	pmap = pool_get(&pmap_pool, PR_WAITOK);
1041 
1042 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pmap = %p\n", __func__, pmap));
1043 
1044 	UVM_OBJ_INIT(&pmap->pm_obj, NULL, 1);
1045 
1046 	mutex_enter(&pmaps_lock);
1047 
1048 	/*
1049 	 * Allocate space IDs for the pmap; we get the protection ID from this.
1050 	 * If all are allocated, there is nothing we can do.
1051 	 */
1052 	/* XXXNH can't this loop forever??? */
1053 	for (space = sid_counter; pmap_sdir_get(space);
1054 	    space = (space + 1) % hppa_sid_max)
1055 		;
1056 
1057 	if ((pmap->pm_pdir_pg = pmap_pagealloc(NULL, 0)) == NULL)
1058 		panic("pmap_create: no pages");
1059 	pmap->pm_ptphint = NULL;
1060 	pmap->pm_pdir = (uint32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
1061 	pmap_sdir_set(space, pmap->pm_pdir);
1062 
1063 	pmap->pm_space = space;
1064 	pmap->pm_pid = (space + 1) << 1;
1065 
1066 	pmap->pm_stats.resident_count = 1;
1067 	pmap->pm_stats.wired_count = 0;
1068 
1069 	mutex_exit(&pmaps_lock);
1070 
1071 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pm = %p, space = %d, pid = %d\n",
1072 	    __func__, pmap, space, pmap->pm_pid));
1073 
1074 	return (pmap);
1075 }
1076 
1077 /*
1078  * pmap_destroy(pmap)
1079  *	Gives up a reference to the specified pmap.  When the reference count
1080  *	reaches zero the pmap structure is added to the pmap free list.
1081  *	Should only be called if the map contains no valid mappings.
1082  */
1083 void
1084 pmap_destroy(pmap_t pmap)
1085 {
1086 #ifdef DIAGNOSTIC
1087 	struct vm_page *pg;
1088 #endif
1089 	int refs;
1090 
1091 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap));
1092 
1093 	mutex_enter(&pmap->pm_lock);
1094 	refs = --pmap->pm_obj.uo_refs;
1095 	mutex_exit(&pmap->pm_lock);
1096 
1097 	if (refs > 0)
1098 		return;
1099 
1100 #ifdef DIAGNOSTIC
1101 	while ((pg = TAILQ_FIRST(&pmap->pm_obj.memq))) {
1102 		pt_entry_t *pde, *epde;
1103 		struct vm_page *sheep;
1104 		struct pv_entry *haggis;
1105 
1106 		if (pg == pmap->pm_pdir_pg)
1107 			continue;
1108 
1109 		DPRINTF(PDB_FOLLOW, ("%s(%p): stray ptp "
1110 		    "0x%lx w/ %d ents:", __func__, pmap, VM_PAGE_TO_PHYS(pg),
1111 		    pg->wire_count - 1));
1112 
1113 		pde = (pt_entry_t *)VM_PAGE_TO_PHYS(pg);
1114 		epde = (pt_entry_t *)(VM_PAGE_TO_PHYS(pg) + PAGE_SIZE);
1115 		for (; pde < epde; pde++) {
1116 			if (*pde == 0)
1117 				continue;
1118 
1119 			sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
1120 			struct vm_page_md * const md = VM_PAGE_TO_MD(sheep);
1121 			for (haggis = md->pvh_list; haggis != NULL; )
1122 				if (haggis->pv_pmap == pmap) {
1123 
1124 					DPRINTF(PDB_FOLLOW, (" 0x%lx",
1125 					    haggis->pv_va));
1126 
1127 					pmap_remove(pmap,
1128 					    haggis->pv_va & PV_VAMASK,
1129 					    haggis->pv_va + PAGE_SIZE);
1130 
1131 					/*
1132 					 * exploit the sacred knowledge of
1133 					 * lambeous ozzmosis
1134 					 */
1135 					haggis = md->pvh_list;
1136 				} else
1137 					haggis = haggis->pv_next;
1138 		}
1139 		DPRINTF(PDB_FOLLOW, ("\n"));
1140 	}
1141 #endif
1142 	pmap_sdir_set(pmap->pm_space, 0);
1143 	mutex_enter(&pmap->pm_lock);
1144 	pmap_pagefree(pmap->pm_pdir_pg);
1145 	mutex_exit(&pmap->pm_lock);
1146 	mutex_destroy(&pmap->pm_lock);
1147 	pmap->pm_pdir_pg = NULL;
1148 	pool_put(&pmap_pool, pmap);
1149 }
1150 
1151 /*
1152  * Add a reference to the specified pmap.
1153  */
1154 void
1155 pmap_reference(pmap_t pmap)
1156 {
1157 
1158 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap));
1159 
1160 	mutex_enter(&pmap->pm_lock);
1161 	pmap->pm_obj.uo_refs++;
1162 	mutex_exit(&pmap->pm_lock);
1163 }
1164 
1165 /*
1166  * pmap_enter(pmap, va, pa, prot, flags)
1167  *	Create a translation for the virtual address (va) to the physical
1168  *	address (pa) in the pmap with the protection requested. If the
1169  *	translation is wired then we can not allow a page fault to occur
1170  *	for this mapping.
1171  */
1172 int
1173 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1174 {
1175 	volatile pt_entry_t *pde;
1176 	pt_entry_t pte;
1177 	struct vm_page *pg, *ptp = NULL;
1178 	struct pv_entry *pve;
1179 	bool wired = (flags & PMAP_WIRED) != 0;
1180 
1181 	DPRINTF(PDB_FOLLOW|PDB_ENTER,
1182 	    ("%s(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n", __func__, pmap, va, pa,
1183 	    prot, flags));
1184 
1185 	PMAP_LOCK(pmap);
1186 
1187 	if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
1188 	    !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
1189 		if (flags & PMAP_CANFAIL) {
1190 			PMAP_UNLOCK(pmap);
1191 			return (ENOMEM);
1192 		}
1193 
1194 		panic("pmap_enter: cannot allocate pde");
1195 	}
1196 
1197 	if (!ptp)
1198 		ptp = pmap_pde_ptp(pmap, pde);
1199 
1200 	if ((pte = pmap_pte_get(pde, va))) {
1201 
1202 		DPRINTF(PDB_ENTER,
1203 		    ("%s: remapping 0x%x -> 0x%lx\n", __func__, pte, pa));
1204 
1205 		pmap_pte_flush(pmap, va, pte);
1206 		if (wired && !(pte & PTE_PROT(TLB_WIRED)))
1207 			pmap->pm_stats.wired_count++;
1208 		else if (!wired && (pte & PTE_PROT(TLB_WIRED)))
1209 			pmap->pm_stats.wired_count--;
1210 
1211 		if (PTE_PAGE(pte) == pa) {
1212 			DPRINTF(PDB_FOLLOW|PDB_ENTER,
1213 			    ("%s: same page\n", __func__));
1214 			goto enter;
1215 		}
1216 
1217 		pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1218 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1219 		mutex_enter(&md->pvh_lock);
1220 		pve = pmap_pv_remove(pg, pmap, va);
1221 		md->pvh_attrs |= pmap_pvh_attrs(pte);
1222 		mutex_exit(&md->pvh_lock);
1223 	} else {
1224 		DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n",
1225 		    __func__, va, pa));
1226 		pte = PTE_PROT(TLB_REFTRAP);
1227 		pve = NULL;
1228 		pmap->pm_stats.resident_count++;
1229 		if (wired)
1230 			pmap->pm_stats.wired_count++;
1231 		if (ptp)
1232 			ptp->wire_count++;
1233 	}
1234 
1235 	if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
1236 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1237 
1238 		if (!pve && !(pve = pmap_pv_alloc())) {
1239 			if (flags & PMAP_CANFAIL) {
1240  				mutex_exit(&md->pvh_lock);
1241  				PMAP_UNLOCK(pmap);
1242 				return (ENOMEM);
1243 			}
1244 			panic("%s: no pv entries available", __func__);
1245 		}
1246                 pte |= PTE_PROT(pmap_prot(pmap, prot));
1247 		mutex_enter(&md->pvh_lock);
1248 		if (pmap_check_alias(pg, va, pte))
1249 			pmap_page_remove_locked(pg);
1250 		pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
1251 		mutex_exit(&md->pvh_lock);
1252 	} else if (pve) {
1253 		pmap_pv_free(pve);
1254 	}
1255 
1256 enter:
1257 	/* preserve old ref & mod */
1258 	pte = pa | PTE_PROT(pmap_prot(pmap, prot)) |
1259 	    (pte & PTE_PROT(TLB_UNCACHEABLE|TLB_DIRTY|TLB_REFTRAP));
1260 	if (wired)
1261 		pte |= PTE_PROT(TLB_WIRED);
1262 	pmap_pte_set(pde, va, pte);
1263 
1264 	PMAP_UNLOCK(pmap);
1265 
1266 	DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__));
1267 
1268 	return (0);
1269 }
1270 
1271 /*
1272  * pmap_remove(pmap, sva, eva)
1273  *	unmaps all virtual addresses in the virtual address
1274  *	range determined by [sva, eva) and pmap.
1275  *	sva and eva must be on machine independent page boundaries and
1276  *	sva must be less than or equal to eva.
1277  */
1278 void
1279 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1280 {
1281 	struct pv_entry *pve;
1282 	volatile pt_entry_t *pde = NULL;
1283 	pt_entry_t pte;
1284 	struct vm_page *pg;
1285 	vaddr_t pdemask;
1286 	int batch;
1287 
1288 	DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1289 	    ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pmap, sva, eva));
1290 
1291 	PMAP_LOCK(pmap);
1292 
1293 	for (batch = 0; sva < eva; sva += PAGE_SIZE) {
1294 		pdemask = sva & PDE_MASK;
1295 		if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
1296 			sva = pdemask + PDE_SIZE - PAGE_SIZE;
1297 			continue;
1298 		}
1299 		batch = pdemask == sva && sva + PDE_SIZE <= eva;
1300 
1301 		if ((pte = pmap_pte_get(pde, sva))) {
1302 
1303 			/* TODO measure here the speed tradeoff
1304 			 * for flushing whole 4M vs per-page
1305 			 * in case of non-complete pde fill
1306 			 */
1307 			pmap_pte_flush(pmap, sva, pte);
1308 			if (pte & PTE_PROT(TLB_WIRED))
1309 				pmap->pm_stats.wired_count--;
1310 			pmap->pm_stats.resident_count--;
1311 
1312 			/* iff properly accounted pde will be dropped anyway */
1313 			if (!batch)
1314 				pmap_pte_set(pde, sva, 0);
1315 
1316 			if (pmap_initialized &&
1317 			    (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1318 				struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1319 
1320 				mutex_enter(&md->pvh_lock);
1321 
1322 				pve = pmap_pv_remove(pg, pmap, sva);
1323 				md->pvh_attrs |= pmap_pvh_attrs(pte);
1324 
1325 				mutex_exit(&md->pvh_lock);
1326 
1327 				if (pve != NULL)
1328 					pmap_pv_free(pve);
1329 			}
1330 		}
1331 	}
1332 
1333 	PMAP_UNLOCK(pmap);
1334 
1335 	DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__));
1336 }
1337 
1338 
1339 void
1340 pmap_write_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1341 {
1342 	struct vm_page *pg;
1343 	volatile pt_entry_t *pde = NULL;
1344 	pt_entry_t pte;
1345 	u_int pteprot, pdemask;
1346 
1347 	DPRINTF(PDB_FOLLOW|PDB_PMAP,
1348 	    ("%s(%p, %lx, %lx, %x)\n", __func__, pmap, sva, eva, prot));
1349 
1350 	sva = trunc_page(sva);
1351 	pteprot = PTE_PROT(pmap_prot(pmap, prot));
1352 
1353 	PMAP_LOCK(pmap);
1354 
1355 	for (pdemask = 1; sva < eva; sva += PAGE_SIZE) {
1356 		if (pdemask != (sva & PDE_MASK)) {
1357 			pdemask = sva & PDE_MASK;
1358 			if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
1359 				sva = pdemask + PDE_SIZE - PAGE_SIZE;
1360 				continue;
1361 			}
1362 		}
1363 		if ((pte = pmap_pte_get(pde, sva))) {
1364 
1365 			DPRINTF(PDB_PMAP,
1366 			    ("%s: va=0x%lx pte=0x%x\n", __func__, sva,  pte));
1367 			/*
1368 			 * Determine if mapping is changing.
1369 			 * If not, nothing to do.
1370 			 */
1371 			if ((pte & PTE_PROT(TLB_AR_MASK)) == pteprot)
1372 				continue;
1373 
1374 			pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1375 			struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1376 			mutex_enter(&md->pvh_lock);
1377 			md->pvh_attrs |= pmap_pvh_attrs(pte);
1378 			mutex_exit(&md->pvh_lock);
1379 
1380 			pmap_pte_flush(pmap, sva, pte);
1381 			pte &= ~PTE_PROT(TLB_AR_MASK);
1382 			pte |= pteprot;
1383 			pmap_pte_set(pde, sva, pte);
1384 		}
1385 	}
1386 
1387 	PMAP_UNLOCK(pmap);
1388 }
1389 
1390 void
1391 pmap_page_remove(struct vm_page *pg)
1392 {
1393 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1394 
1395 	mutex_enter(&md->pvh_lock);
1396 	pmap_page_remove_locked(pg);
1397 	mutex_exit(&md->pvh_lock);
1398 }
1399 
1400 void
1401 pmap_page_remove_locked(struct vm_page *pg)
1402 {
1403 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1404 	struct pv_entry *pve, *npve, **pvp;
1405 
1406 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
1407 
1408 	if (md->pvh_list == NULL)
1409 		return;
1410 
1411 	pvp = &md->pvh_list;
1412 	for (pve = md->pvh_list; pve; pve = npve) {
1413 		pmap_t pmap = pve->pv_pmap;
1414 		vaddr_t va = pve->pv_va & PV_VAMASK;
1415 		volatile pt_entry_t *pde;
1416 		pt_entry_t pte;
1417 
1418 		PMAP_LOCK(pmap);
1419 
1420 		pde = pmap_pde_get(pmap->pm_pdir, va);
1421 		pte = pmap_pte_get(pde, va);
1422 
1423 		npve = pve->pv_next;
1424 		/*
1425 		 * If this was an unmanaged mapping, it must be preserved. Move
1426 		 * it back on the list and advance the end-of-list pointer.
1427 		 */
1428 		if (pve->pv_va & PV_KENTER) {
1429 			*pvp = pve;
1430 			pvp = &pve->pv_next;
1431 		} else
1432 			md->pvh_attrs |= pmap_pvh_attrs(pte);
1433 
1434 		pmap_pte_flush(pmap, va, pte);
1435 		if (pte & PTE_PROT(TLB_WIRED))
1436 			pmap->pm_stats.wired_count--;
1437 		pmap->pm_stats.resident_count--;
1438 
1439 		if (!(pve->pv_va & PV_KENTER)) {
1440 			pmap_pte_set(pde, va, 0);
1441 			pmap_pv_free(pve);
1442 		}
1443 		PMAP_UNLOCK(pmap);
1444 	}
1445 	*pvp = NULL;
1446 
1447 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__));
1448 }
1449 
1450 /*
1451  *	Routine:	pmap_unwire
1452  *	Function:	Change the wiring attribute for a map/virtual-address
1453  *			pair.
1454  *	In/out conditions:
1455  *			The mapping must already exist in the pmap.
1456  *
1457  * Change the wiring for a given virtual page. This routine currently is
1458  * only used to unwire pages and hence the mapping entry will exist.
1459  */
1460 void
1461 pmap_unwire(pmap_t pmap, vaddr_t va)
1462 {
1463 	volatile pt_entry_t *pde;
1464 	pt_entry_t pte = 0;
1465 
1466 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p, 0x%lx)\n", __func__, pmap, va));
1467 
1468 	PMAP_LOCK(pmap);
1469 	if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1470 		pte = pmap_pte_get(pde, va);
1471 
1472 		KASSERT(pte);
1473 
1474 		if (pte & PTE_PROT(TLB_WIRED)) {
1475 			pte &= ~PTE_PROT(TLB_WIRED);
1476 			pmap->pm_stats.wired_count--;
1477 			pmap_pte_set(pde, va, pte);
1478 		}
1479 	}
1480 	PMAP_UNLOCK(pmap);
1481 
1482 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: leaving\n", __func__));
1483 }
1484 
1485 bool
1486 pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
1487 {
1488 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1489 	struct pv_entry *pve;
1490 	int res;
1491 
1492 	DPRINTF(PDB_FOLLOW|PDB_BITS,
1493 	    ("%s(%p, %x, %x)\n", __func__, pg, set, clear));
1494 
1495 	KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0);
1496 	KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0);
1497 
1498 	mutex_enter(&md->pvh_lock);
1499 
1500 	/* preserve other bits */
1501 	res = md->pvh_attrs & (set | clear);
1502 	md->pvh_attrs ^= res;
1503 
1504 	for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1505 		pmap_t pmap = pve->pv_pmap;
1506 		vaddr_t va = pve->pv_va & PV_VAMASK;
1507 		volatile pt_entry_t *pde;
1508 		pt_entry_t opte, pte;
1509 
1510 		if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1511 			opte = pte = pmap_pte_get(pde, va);
1512 #ifdef PMAPDEBUG
1513 			if (!pte) {
1514 				DPRINTF(PDB_FOLLOW|PDB_BITS,
1515 				    ("%s: zero pte for 0x%lx\n", __func__,
1516 				    va));
1517 				continue;
1518 			}
1519 #endif
1520 			pte &= ~clear;
1521 			pte |= set;
1522 
1523 			if (!(pve->pv_va & PV_KENTER)) {
1524 				md->pvh_attrs |= pmap_pvh_attrs(pte);
1525 				res |= pmap_pvh_attrs(opte);
1526 			}
1527 
1528 			if (opte != pte) {
1529 				pmap_pte_flush(pmap, va, opte);
1530 				pmap_pte_set(pde, va, pte);
1531 			}
1532 		}
1533 	}
1534 	mutex_exit(&md->pvh_lock);
1535 
1536 	return ((res & (clear | set)) != 0);
1537 }
1538 
1539 bool
1540 pmap_testbit(struct vm_page *pg, u_int bit)
1541 {
1542 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1543 	struct pv_entry *pve;
1544 	pt_entry_t pte;
1545 	int ret;
1546 
1547 	DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit));
1548 
1549 	mutex_enter(&md->pvh_lock);
1550 
1551 	for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve;
1552 	    pve = pve->pv_next) {
1553 		pmap_t pm = pve->pv_pmap;
1554 
1555 		pte = pmap_vp_find(pm, pve->pv_va & PV_VAMASK);
1556 		if (pve->pv_va & PV_KENTER)
1557 			continue;
1558 
1559 		md->pvh_attrs |= pmap_pvh_attrs(pte);
1560 	}
1561 	ret = ((md->pvh_attrs & bit) != 0);
1562 	mutex_exit(&md->pvh_lock);
1563 
1564 	return ret;
1565 }
1566 
1567 /*
1568  * pmap_extract(pmap, va, pap)
1569  *	fills in the physical address corresponding to the
1570  *	virtual address specified by pmap and va into the
1571  *	storage pointed to by pap and returns true if the
1572  *	virtual address is mapped. returns false in not mapped.
1573  */
1574 bool
1575 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1576 {
1577 	pt_entry_t pte;
1578 
1579 	DPRINTF(PDB_FOLLOW|PDB_EXTRACT, ("%s(%p, %lx)\n", __func__, pmap, va));
1580 
1581 	PMAP_LOCK(pmap);
1582 	pte = pmap_vp_find(pmap, va);
1583 	PMAP_UNLOCK(pmap);
1584 
1585 	if (pte) {
1586 		if (pap)
1587 			*pap = (pte & ~PGOFSET) | (va & PGOFSET);
1588 		return true;
1589 	}
1590 
1591 	return false;
1592 }
1593 
1594 
1595 /*
1596  * pmap_activate(lwp)
1597  *
1598  *	Activates the vmspace for the given LWP.
1599  *	This is not necessarily the current LWP.
1600  */
1601 void
1602 pmap_activate(struct lwp *l)
1603 {
1604 	struct proc *p = l->l_proc;
1605 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
1606 	pa_space_t space = pmap->pm_space;
1607 	struct pcb *pcb = lwp_getpcb(l);
1608 
1609 	/* space is cached for the copy{in,out}'s pleasure */
1610 	pcb->pcb_space = space;
1611 	fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, sizeof(struct pcb));
1612 
1613 	if (p == curproc)
1614 		mtctl(pmap->pm_pid, CR_PIDR2);
1615 }
1616 
1617 
1618 static inline void
1619 pmap_flush_page(struct vm_page *pg, bool purge)
1620 {
1621 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1622 	struct pv_entry *pve;
1623 
1624 	DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge));
1625 
1626 	/* purge cache for all possible mappings for the pa */
1627 	for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1628 		vaddr_t va = pve->pv_va & PV_VAMASK;
1629 		pa_space_t sp = pve->pv_pmap->pm_space;
1630 
1631 		if (purge)
1632 			pdcache(sp, va, PAGE_SIZE);
1633 		else
1634 			fdcache(sp, va, PAGE_SIZE);
1635 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1636     defined(HP8500_CPU) || defined(HP8600_CPU)
1637 		ficache(sp, va, PAGE_SIZE);
1638 		pdtlb(sp, va);
1639 		pitlb(sp, va);
1640 #endif
1641 	}
1642 }
1643 
1644 /*
1645  * pmap_zero_page(pa)
1646  *
1647  * Zeros the specified page.
1648  */
1649 void
1650 pmap_zero_page(paddr_t pa)
1651 {
1652 
1653 	DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx)\n", __func__, pa));
1654 
1655 	KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL);
1656 
1657 	memset((void *)pa, 0, PAGE_SIZE);
1658 	fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1659 
1660 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1661     defined(HP8500_CPU) || defined(HP8600_CPU)
1662 	ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1663 	pdtlb(HPPA_SID_KERNEL, pa);
1664 	pitlb(HPPA_SID_KERNEL, pa);
1665 #endif
1666 }
1667 
1668 /*
1669  * pmap_copy_page(src, dst)
1670  *
1671  * pmap_copy_page copies the source page to the destination page.
1672  */
1673 void
1674 pmap_copy_page(paddr_t spa, paddr_t dpa)
1675 {
1676 	struct vm_page *srcpg = PHYS_TO_VM_PAGE(spa);
1677 
1678 	DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx, %lx)\n", __func__, spa, dpa));
1679 
1680 	KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL);
1681 
1682 	pmap_flush_page(srcpg, false);
1683 
1684 	memcpy((void *)dpa, (void *)spa, PAGE_SIZE);
1685 
1686 	pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1687 	fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1688 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1689     defined(HP8500_CPU) || defined(HP8600_CPU)
1690 	ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1691 	ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1692 	pdtlb(HPPA_SID_KERNEL, spa);
1693 	pdtlb(HPPA_SID_KERNEL, dpa);
1694 	pitlb(HPPA_SID_KERNEL, spa);
1695 	pitlb(HPPA_SID_KERNEL, dpa);
1696 #endif
1697 }
1698 
1699 void
1700 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1701 {
1702 	volatile pt_entry_t *pde;
1703 	pt_entry_t pte, opte;
1704 
1705 #ifdef PMAPDEBUG
1706 	int opmapdebug = pmapdebug;
1707 
1708 	/*
1709 	 * If we're being told to map page zero, we can't call printf() at all,
1710 	 * because doing so would lead to an infinite recursion on this call.
1711 	 * (printf requires page zero to be mapped).
1712 	 */
1713 	if (va == 0)
1714 		pmapdebug = 0;
1715 #endif /* PMAPDEBUG */
1716 
1717 	DPRINTF(PDB_FOLLOW|PDB_ENTER,
1718 	    ("%s(%lx, %lx, %x)\n", __func__, va, pa, prot));
1719 
1720 	if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
1721 	    !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
1722 		panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va);
1723 	opte = pmap_pte_get(pde, va);
1724 	pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP |
1725 	    pmap_prot(pmap_kernel(), prot & VM_PROT_ALL));
1726 	if (pa >= HPPA_IOBEGIN || (flags & PMAP_NOCACHE))
1727 		pte |= PTE_PROT(TLB_UNCACHEABLE);
1728 	pmap_kernel()->pm_stats.wired_count++;
1729 	pmap_kernel()->pm_stats.resident_count++;
1730 	if (opte)
1731 		pmap_pte_flush(pmap_kernel(), va, opte);
1732 
1733 	if (pmap_initialized) {
1734 		struct vm_page *pg;
1735 
1736 		pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1737 		if (pg != NULL) {
1738 			struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1739 
1740 			KASSERT(pa < HPPA_IOBEGIN);
1741 
1742 			struct pv_entry *pve;
1743 
1744 			pve = pmap_pv_alloc();
1745 			if (!pve)
1746 				panic("%s: no pv entries available",
1747 				    __func__);
1748 			DPRINTF(PDB_FOLLOW|PDB_ENTER,
1749 			    ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__,
1750 			    va, pa, pte));
1751 
1752 			mutex_enter(&md->pvh_lock);
1753 			if (pmap_check_alias(pg, va, pte))
1754 				pmap_page_remove_locked(pg);
1755 			pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL,
1756 			    PV_KENTER);
1757 			mutex_exit(&md->pvh_lock);
1758 		}
1759 	}
1760 	pmap_pte_set(pde, va, pte);
1761 
1762 	DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__));
1763 
1764 #ifdef PMAPDEBUG
1765 	pmapdebug = opmapdebug;
1766 #endif /* PMAPDEBUG */
1767 }
1768 
1769 void
1770 pmap_kremove(vaddr_t va, vsize_t size)
1771 {
1772 	struct pv_entry *pve;
1773 	vaddr_t eva, pdemask;
1774 	volatile pt_entry_t *pde = NULL;
1775 	pt_entry_t pte;
1776 	struct vm_page *pg;
1777 	pmap_t pmap = pmap_kernel();
1778 #ifdef PMAPDEBUG
1779 	int opmapdebug = pmapdebug;
1780 
1781 	/*
1782 	 * If we're being told to unmap page zero, we can't call printf() at
1783 	 * all as printf requires page zero to be mapped.
1784 	 */
1785 	if (va == 0)
1786 		pmapdebug = 0;
1787 #endif /* PMAPDEBUG */
1788 
1789 	DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1790 	    ("%s(%lx, %lx)\n", __func__, va, size));
1791 #ifdef PMAPDEBUG
1792 
1793 	/*
1794 	 * Don't allow the VA == PA mappings, apart from page zero, to be
1795 	 * removed. Page zero is given special treatment so that we get TLB
1796 	 * faults when the kernel tries to de-reference NULL or anything else
1797 	 * in the first page when it shouldn't.
1798 	 */
1799 	if (va != 0 && va < ptoa(physmem)) {
1800 		DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1801 		    ("%s(%lx, %lx): unmapping physmem\n", __func__, va,
1802 		    size));
1803 		pmapdebug = opmapdebug;
1804 		return;
1805 	}
1806 #endif
1807 
1808 	for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) {
1809 		if (pdemask != (va & PDE_MASK)) {
1810 			pdemask = va & PDE_MASK;
1811 			if (!(pde = pmap_pde_get(pmap->pm_pdir, va))) {
1812 				va = pdemask + PDE_SIZE - PAGE_SIZE;
1813 				continue;
1814 			}
1815 		}
1816 		if (!(pte = pmap_pte_get(pde, va))) {
1817 			DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1818 			    ("%s: unmapping unmapped 0x%lx\n", __func__,
1819 			    va));
1820 			continue;
1821 		}
1822 
1823 		pmap_pte_flush(pmap, va, pte);
1824 		pmap_pte_set(pde, va, 0);
1825 		if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1826 			struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1827 
1828 			mutex_enter(&md->pvh_lock);
1829 
1830 			pve = pmap_pv_remove(pg, pmap, va);
1831 
1832 			mutex_exit(&md->pvh_lock);
1833 			if (pve != NULL)
1834 				pmap_pv_free(pve);
1835 		}
1836 	}
1837 	DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__));
1838 
1839 #ifdef PMAPDEBUG
1840 	pmapdebug = opmapdebug;
1841 #endif /* PMAPDEBUG */
1842 }
1843 
1844 #if defined(USE_HPT)
1845 #if defined(DDB)
1846 /*
1847  * prints whole va->pa (aka HPT or HVT)
1848  */
1849 void
1850 pmap_hptdump(void)
1851 {
1852 	struct hpt_entry *hpt, *ehpt;
1853 
1854 	hpt = (struct hpt_entry *)pmap_hpt;
1855 	ehpt = (struct hpt_entry *)((int)hpt + pmap_hptsize);
1856 	db_printf("HPT dump %p-%p:\n", hpt, ehpt);
1857 	for (; hpt < ehpt; hpt++)
1858 		if (hpt->hpt_valid) {
1859 			char buf[128];
1860 
1861 			snprintb(buf, sizeof(buf), TLB_BITS, hpt->hpt_tlbprot);
1862 
1863 			db_printf("hpt@%p: %x{%sv=%x:%x},%s,%x\n",
1864 			    hpt, *(int *)hpt, (hpt->hpt_valid?"ok,":""),
1865 			    hpt->hpt_space, hpt->hpt_vpn << 9,
1866 			    buf, tlbptob(hpt->hpt_tlbpage));
1867 		}
1868 }
1869 #endif
1870 #endif
1871